repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
IBM/urcanet
[ "ce3f41eba23c24506ea2cf9e77cd3898a4eafbaf" ]
[ "orca/modules/bert.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom pytorch_pretrained_bert.modeling import BertEncoder, BertPooler, BertLayerNorm, BertPreTrainedModel\n\nclass BertEmbeddingsModified(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n def __init__(self, config):\n super(BertEmbeddingsModified, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n self.history_embeddings = nn.Embedding(4, config.hidden_size, padding_idx=0)\n self.turn_embeddings = nn.Embedding(8, config.hidden_size, padding_idx=0)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, token_type_ids=None, history_encoding=None, turn_encoding=None, scenario_encoding=None):\n seq_length = input_ids.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n if history_encoding is None:\n history_encoding = torch.zeros_like(input_ids)\n if turn_encoding is None:\n turn_encoding = torch.zeros_like(input_ids)\n if scenario_encoding is None:\n scenario_encoding = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n history_embeddings = self.history_embeddings(history_encoding)\n scenario_embeddings = self.history_embeddings(scenario_encoding)\n turn_embeddings = self.turn_embeddings(turn_encoding)\n\n embeddings = words_embeddings + position_embeddings + token_type_embeddings + history_embeddings + turn_embeddings + scenario_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\nclass BertModelModified(BertPreTrainedModel):\n def __init__(self, config):\n super(BertModelModified, self).__init__(config)\n self.embeddings = BertEmbeddingsModified(config)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n self.apply(self.init_bert_weights) \n self.embeddings.history_embeddings.weight[0].data.zero_() # self.embeddings.history_embeddings.padding_idx\n self.embeddings.turn_embeddings.weight[0].data.zero_() # self.embeddings.turn_embeddings.padding_idx\n\n def forward(self, input_ids, token_type_ids=None, history_encoding=None, turn_encoding=None, scenario_encoding=None, attention_mask=None, output_all_encoded_layers=True):\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n if history_encoding is None:\n history_encoding = torch.zeros_like(input_ids)\n if turn_encoding is None:\n turn_encoding = torch.zeros_like(input_ids)\n if scenario_encoding is None:\n scenario_encoding = torch.zeros_like(input_ids)\n \n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n embedding_output = self.embeddings(input_ids, token_type_ids, history_encoding, turn_encoding, scenario_encoding)\n encoded_layers = self.encoder(embedding_output,\n extended_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers)\n sequence_output = encoded_layers[-1]\n pooled_output = self.pooler(sequence_output)\n if not output_all_encoded_layers:\n encoded_layers = encoded_layers[-1]\n return encoded_layers, pooled_output" ]
[ [ "torch.nn.Dropout", "torch.zeros_like", "torch.nn.Embedding", "torch.arange", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
One-sixth/check_cuda_numerical_stability
[ "0229632f81e8558436132eb0a9d8e8c2b332cb81" ]
[ "_check_cuda_numerical_stability.py" ]
[ "'''\n用于检测cuda运算错误\n'''\n\nimport torch\nimport torch.nn as nn\nfrom torch.backends import cudnn\nimport argparse\nimport time\nimport math\n\n\ndef ConvBnAct(in_ch, out_ch, ker_sz, stride, pad, act=nn.Identity(), group=1, dilation=1):\n return nn.Sequential(nn.Conv2d(in_ch, out_ch, ker_sz, stride, pad, groups=group, bias=False, dilation=dilation),\n nn.BatchNorm2d(out_ch, eps=1e-8, momentum=0.9),\n act)\n\n\ndef DeConvBnAct(in_ch, out_ch, ker_sz, stride, pad, act=nn.Identity(), group=1, dilation=1):\n return nn.Sequential(nn.ConvTranspose2d(in_ch, out_ch, ker_sz, stride, pad, groups=group, bias=False, dilation=dilation),\n nn.BatchNorm2d(out_ch, eps=1e-8, momentum=0.9),\n act)\n\n\nclass RevSequential(nn.ModuleList):\n '''\n 功能大部分与ModuleList重叠\n '''\n def __init__(self, modules=None):\n super().__init__(modules)\n\n def append(self, module):\n assert hasattr(module, 'invert') and callable(module.invert)\n super().append(module)\n\n def extend(self, modules):\n for m in modules:\n self.append(m)\n\n def forward(self, x1, x2):\n y1, y2 = x1, x2\n for m in self:\n y1, y2 = m(y1, y2)\n return y1, y2\n\n def invert(self, y1, y2):\n x1, x2 = y1, y2\n for m in list(self)[::-1]:\n x1, x2 = m.invert(x1, x2)\n return x1, x2\n\n\nclass RevGroupBlock(RevSequential):\n '''\n 当前只支持输入通道等于输出通道,并且不允许下采样\n '''\n def __init__(self, in_ch, out_ch, stride, act, block_type, blocks, **kwargs):\n assert in_ch == out_ch\n assert stride == 1\n mods = []\n for _ in range(blocks):\n mods.append(block_type(in_ch=in_ch, out_ch=out_ch, stride=1, act=act, **kwargs))\n # self.extend(mods)\n super().__init__(mods)\n\n\nclass RevBlockC(nn.Module):\n def __init__(self, in_ch, out_ch, stride, act, **kwargs):\n super().__init__()\n inter_ch = in_ch // 2\n self.conv1 = ConvBnAct(in_ch, inter_ch, ker_sz=3, stride=1, pad=1, act=act)\n self.conv2 = ConvBnAct(inter_ch, inter_ch, ker_sz=5, stride=1, pad=2, act=act, group=inter_ch)\n self.conv3 = ConvBnAct(in_ch, in_ch, ker_sz=1, stride=1, pad=0, act=nn.Identity())\n\n def func(self, x):\n y1 = self.conv1(x)\n y2 = self.conv2(y1)\n y = torch.cat([y1, y2], dim=1)\n y = self.conv3(y)\n return y\n\n def forward(self, x1, x2):\n y = x1 + self.func(x2)\n return x2, y\n\n def invert(self, y1, y2):\n x2, y = y1, y2\n x1 = y - self.func(x2)\n return x1, x2\n\n\nif __name__ == '__main__':\n cudnn.benchmark = False\n cudnn.deterministic = True\n torch.set_grad_enabled(False)\n # Close tf32 features. Fix low numerical accuracy on rtx30xx gpu.\n try:\n torch.backends.cuda.matmul.allow_tf32 = False\n torch.backends.cudnn.allow_tf32 = False\n except AttributeError as e:\n print('Info. This pytorch version is not support with tf32.')\n\n parse = argparse.ArgumentParser(description='Used to detect CUDA numerical stability problems.')\n parse.add_argument('-i', type=int, help='card id. Which cuda card do you want to test. default: 0', default=0)\n parse.add_argument('-t', type=int, help='minute. Test duration. When the setting is less than or equal to 0, it will not stop automatically. defaule: 30', default=30)\n parse.add_argument('-bs', type=int, help='Test batch size when testing. defaule: 20', default=20)\n parse = parse.parse_args()\n\n duration = parse.t * 60\n if duration <= 0:\n duration = math.inf\n\n card_id = parse.i\n if card_id == -1:\n # 使用cpu测试理论上是永远不会报错的\n device = torch.device('cpu')\n else:\n device = torch.device(f'cuda:{card_id}')\n\n batch_size = parse.bs\n assert batch_size > 0\n\n start_time = time.time()\n test_count = 0\n\n act = nn.ELU()\n rvb = RevGroupBlock(128, 128, 1, act, RevBlockC, 32).to(device)\n rvb.eval()\n\n is_no_error = True\n\n print('CUDA numerical stability test begin.')\n while is_no_error:\n cur_time = time.time()\n if cur_time - start_time > duration:\n break\n test_count += 1\n\n if test_count % 50 == 0:\n # 每50次迭代后,刷新一次网络权重\n rvb = RevGroupBlock(128, 128, 1, act, RevBlockC, 32).to(device)\n rvb.eval()\n\n a1 = torch.randn(batch_size, 128, 128, 128, device=device)\n b1, b2 = rvb(a1, a1)\n o_a1, o_a2 = rvb.invert(b1, b2)\n max_diff_1 = torch.abs(o_a1 - o_a2).max()\n max_diff_2 = torch.abs(a1 - o_a1).max()\n\n line = f'elapsed/total: {int(cur_time-start_time)}/{duration} card_id: {card_id} count: {test_count} max_diff_1: {max_diff_1:.8f} max_diff_2: {max_diff_2:.8f}'\n print(line)\n if max_diff_1 > 1e-3 or max_diff_2 > 1e-3:\n print(f'A large numerical error was found!')\n is_no_error = False\n\n if is_no_error:\n print(f'Test passed. Card ID: {card_id}')\n else:\n print(f'Test failed. Card ID: {card_id}')\n" ]
[ [ "torch.abs", "torch.nn.ConvTranspose2d", "torch.cat", "torch.randn", "torch.nn.ELU", "torch.nn.Conv2d", "torch.nn.Identity", "torch.set_grad_enabled", "torch.nn.BatchNorm2d", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mrzhuzhe/mmdetection
[ "c04ca2c2a65500bc248a5d2ab6ace5b15f00064d", "c04ca2c2a65500bc248a5d2ab6ace5b15f00064d", "c04ca2c2a65500bc248a5d2ab6ace5b15f00064d" ]
[ "mmdet/models/losses/ae_loss.py", "mmdet/models/necks/bfp.py", "mmdet/models/utils/ckpt_convert.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\r\nimport mmcv\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom ..builder import LOSSES\r\n\r\n\r\[email protected](derivate=True, coderize=True)\r\ndef ae_loss_per_image(tl_preds, br_preds, match):\r\n \"\"\"Associative Embedding Loss in one image.\r\n\r\n Associative Embedding Loss including two parts: pull loss and push loss.\r\n Pull loss makes embedding vectors from same object closer to each other.\r\n Push loss distinguish embedding vector from different objects, and makes\r\n the gap between them is large enough.\r\n\r\n During computing, usually there are 3 cases:\r\n - no object in image: both pull loss and push loss will be 0.\r\n - one object in image: push loss will be 0 and pull loss is computed\r\n by the two corner of the only object.\r\n - more than one objects in image: pull loss is computed by corner pairs\r\n from each object, push loss is computed by each object with all\r\n other objects. We use confusion matrix with 0 in diagonal to\r\n compute the push loss.\r\n\r\n Args:\r\n tl_preds (tensor): Embedding feature map of left-top corner.\r\n br_preds (tensor): Embedding feature map of bottim-right corner.\r\n match (list): Downsampled coordinates pair of each ground truth box.\r\n \"\"\"\r\n\r\n tl_list, br_list, me_list = [], [], []\r\n if len(match) == 0: # no object in image\r\n pull_loss = tl_preds.sum() * 0.\r\n push_loss = tl_preds.sum() * 0.\r\n else:\r\n for m in match:\r\n [tl_y, tl_x], [br_y, br_x] = m\r\n tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)\r\n br_e = br_preds[:, br_y, br_x].view(-1, 1)\r\n tl_list.append(tl_e)\r\n br_list.append(br_e)\r\n me_list.append((tl_e + br_e) / 2.0)\r\n\r\n tl_list = torch.cat(tl_list)\r\n br_list = torch.cat(br_list)\r\n me_list = torch.cat(me_list)\r\n\r\n assert tl_list.size() == br_list.size()\r\n\r\n # N is object number in image, M is dimension of embedding vector\r\n N, M = tl_list.size()\r\n\r\n pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)\r\n pull_loss = pull_loss.sum() / N\r\n\r\n margin = 1 # exp setting of CornerNet, details in section 3.3 of paper\r\n\r\n # confusion matrix of push loss\r\n conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list\r\n conf_weight = 1 - torch.eye(N).type_as(me_list)\r\n conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())\r\n\r\n if N > 1: # more than one object in current image\r\n push_loss = F.relu(conf_mat).sum() / (N * (N - 1))\r\n else:\r\n push_loss = tl_preds.sum() * 0.\r\n\r\n return pull_loss, push_loss\r\n\r\n\r\[email protected]_module()\r\nclass AssociativeEmbeddingLoss(nn.Module):\r\n \"\"\"Associative Embedding Loss.\r\n\r\n More details can be found in\r\n `Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and\r\n `CornerNet <https://arxiv.org/abs/1808.01244>`_ .\r\n Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501\r\n\r\n Args:\r\n pull_weight (float): Loss weight for corners from same object.\r\n push_weight (float): Loss weight for corners from different object.\r\n \"\"\"\r\n\r\n def __init__(self, pull_weight=0.25, push_weight=0.25):\r\n super(AssociativeEmbeddingLoss, self).__init__()\r\n self.pull_weight = pull_weight\r\n self.push_weight = push_weight\r\n\r\n def forward(self, pred, target, match):\r\n \"\"\"Forward function.\"\"\"\r\n batch = pred.size(0)\r\n pull_all, push_all = 0.0, 0.0\r\n for i in range(batch):\r\n pull, push = ae_loss_per_image(pred[i], target[i], match[i])\r\n\r\n pull_all += self.pull_weight * pull\r\n push_all += self.push_weight * push\r\n\r\n return pull_all, push_all\r\n", "# Copyright (c) OpenMMLab. All rights reserved.\r\nimport torch.nn.functional as F\r\nfrom mmcv.cnn import ConvModule\r\nfrom mmcv.cnn.bricks import NonLocal2d\r\nfrom mmcv.runner import BaseModule\r\n\r\nfrom ..builder import NECKS\r\n\r\n\r\[email protected]_module()\r\nclass BFP(BaseModule):\r\n \"\"\"BFP (Balanced Feature Pyramids)\r\n\r\n BFP takes multi-level features as inputs and gather them into a single one,\r\n then refine the gathered feature and scatter the refined results to\r\n multi-level features. This module is used in Libra R-CNN (CVPR 2019), see\r\n the paper `Libra R-CNN: Towards Balanced Learning for Object Detection\r\n <https://arxiv.org/abs/1904.02701>`_ for details.\r\n\r\n Args:\r\n in_channels (int): Number of input channels (feature maps of all levels\r\n should have the same channels).\r\n num_levels (int): Number of input feature levels.\r\n conv_cfg (dict): The config dict for convolution layers.\r\n norm_cfg (dict): The config dict for normalization layers.\r\n refine_level (int): Index of integration and refine level of BSF in\r\n multi-level features from bottom to top.\r\n refine_type (str): Type of the refine op, currently support\r\n [None, 'conv', 'non_local'].\r\n init_cfg (dict or list[dict], optional): Initialization config dict.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n in_channels,\r\n num_levels,\r\n refine_level=2,\r\n refine_type=None,\r\n conv_cfg=None,\r\n norm_cfg=None,\r\n init_cfg=dict(\r\n type='Xavier', layer='Conv2d', distribution='uniform')):\r\n super(BFP, self).__init__(init_cfg)\r\n assert refine_type in [None, 'conv', 'non_local']\r\n\r\n self.in_channels = in_channels\r\n self.num_levels = num_levels\r\n self.conv_cfg = conv_cfg\r\n self.norm_cfg = norm_cfg\r\n\r\n self.refine_level = refine_level\r\n self.refine_type = refine_type\r\n assert 0 <= self.refine_level < self.num_levels\r\n\r\n if self.refine_type == 'conv':\r\n self.refine = ConvModule(\r\n self.in_channels,\r\n self.in_channels,\r\n 3,\r\n padding=1,\r\n conv_cfg=self.conv_cfg,\r\n norm_cfg=self.norm_cfg)\r\n elif self.refine_type == 'non_local':\r\n self.refine = NonLocal2d(\r\n self.in_channels,\r\n reduction=1,\r\n use_scale=False,\r\n conv_cfg=self.conv_cfg,\r\n norm_cfg=self.norm_cfg)\r\n\r\n def forward(self, inputs):\r\n \"\"\"Forward function.\"\"\"\r\n assert len(inputs) == self.num_levels\r\n\r\n # step 1: gather multi-level features by resize and average\r\n feats = []\r\n gather_size = inputs[self.refine_level].size()[2:]\r\n for i in range(self.num_levels):\r\n if i < self.refine_level:\r\n gathered = F.adaptive_max_pool2d(\r\n inputs[i], output_size=gather_size)\r\n else:\r\n gathered = F.interpolate(\r\n inputs[i], size=gather_size, mode='nearest')\r\n feats.append(gathered)\r\n\r\n bsf = sum(feats) / len(feats)\r\n\r\n # step 2: refine gathered features\r\n if self.refine_type is not None:\r\n bsf = self.refine(bsf)\r\n\r\n # step 3: scatter refined features to multi-levels by a residual path\r\n outs = []\r\n for i in range(self.num_levels):\r\n out_size = inputs[i].size()[2:]\r\n if i < self.refine_level:\r\n residual = F.interpolate(bsf, size=out_size, mode='nearest')\r\n else:\r\n residual = F.adaptive_max_pool2d(bsf, output_size=out_size)\r\n outs.append(residual + inputs[i])\r\n\r\n return tuple(outs)\r\n", "# Copyright (c) OpenMMLab. All rights reserved.\r\n\r\n# This script consists of several convert functions which\r\n# can modify the weights of model in original repo to be\r\n# pre-trained weights.\r\n\r\nfrom collections import OrderedDict\r\n\r\nimport torch\r\n\r\n\r\ndef pvt_convert(ckpt):\r\n new_ckpt = OrderedDict()\r\n # Process the concat between q linear weights and kv linear weights\r\n use_abs_pos_embed = False\r\n use_conv_ffn = False\r\n for k in ckpt.keys():\r\n if k.startswith('pos_embed'):\r\n use_abs_pos_embed = True\r\n if k.find('dwconv') >= 0:\r\n use_conv_ffn = True\r\n for k, v in ckpt.items():\r\n if k.startswith('head'):\r\n continue\r\n if k.startswith('norm.'):\r\n continue\r\n if k.startswith('cls_token'):\r\n continue\r\n if k.startswith('pos_embed'):\r\n stage_i = int(k.replace('pos_embed', ''))\r\n new_k = k.replace(f'pos_embed{stage_i}',\r\n f'layers.{stage_i - 1}.1.0.pos_embed')\r\n if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7\r\n new_v = v[:, 1:, :] # remove cls token\r\n else:\r\n new_v = v\r\n elif k.startswith('patch_embed'):\r\n stage_i = int(k.split('.')[0].replace('patch_embed', ''))\r\n new_k = k.replace(f'patch_embed{stage_i}',\r\n f'layers.{stage_i - 1}.0')\r\n new_v = v\r\n if 'proj.' in new_k:\r\n new_k = new_k.replace('proj.', 'projection.')\r\n elif k.startswith('block'):\r\n stage_i = int(k.split('.')[0].replace('block', ''))\r\n layer_i = int(k.split('.')[1])\r\n new_layer_i = layer_i + use_abs_pos_embed\r\n new_k = k.replace(f'block{stage_i}.{layer_i}',\r\n f'layers.{stage_i - 1}.1.{new_layer_i}')\r\n new_v = v\r\n if 'attn.q.' in new_k:\r\n sub_item_k = k.replace('q.', 'kv.')\r\n new_k = new_k.replace('q.', 'attn.in_proj_')\r\n new_v = torch.cat([v, ckpt[sub_item_k]], dim=0)\r\n elif 'attn.kv.' in new_k:\r\n continue\r\n elif 'attn.proj.' in new_k:\r\n new_k = new_k.replace('proj.', 'attn.out_proj.')\r\n elif 'attn.sr.' in new_k:\r\n new_k = new_k.replace('sr.', 'sr.')\r\n elif 'mlp.' in new_k:\r\n string = f'{new_k}-'\r\n new_k = new_k.replace('mlp.', 'ffn.layers.')\r\n if 'fc1.weight' in new_k or 'fc2.weight' in new_k:\r\n new_v = v.reshape((*v.shape, 1, 1))\r\n new_k = new_k.replace('fc1.', '0.')\r\n new_k = new_k.replace('dwconv.dwconv.', '1.')\r\n if use_conv_ffn:\r\n new_k = new_k.replace('fc2.', '4.')\r\n else:\r\n new_k = new_k.replace('fc2.', '3.')\r\n string += f'{new_k} {v.shape}-{new_v.shape}'\r\n elif k.startswith('norm'):\r\n stage_i = int(k[4])\r\n new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2')\r\n new_v = v\r\n else:\r\n new_k = k\r\n new_v = v\r\n new_ckpt[new_k] = new_v\r\n\r\n return new_ckpt\r\n\r\n\r\ndef swin_converter(ckpt):\r\n\r\n new_ckpt = OrderedDict()\r\n\r\n def correct_unfold_reduction_order(x):\r\n out_channel, in_channel = x.shape\r\n x = x.reshape(out_channel, 4, in_channel // 4)\r\n x = x[:, [0, 2, 1, 3], :].transpose(1,\r\n 2).reshape(out_channel, in_channel)\r\n return x\r\n\r\n def correct_unfold_norm_order(x):\r\n in_channel = x.shape[0]\r\n x = x.reshape(4, in_channel // 4)\r\n x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)\r\n return x\r\n\r\n for k, v in ckpt.items():\r\n if k.startswith('head'):\r\n continue\r\n elif k.startswith('layers'):\r\n new_v = v\r\n if 'attn.' in k:\r\n new_k = k.replace('attn.', 'attn.w_msa.')\r\n elif 'mlp.' in k:\r\n if 'mlp.fc1.' in k:\r\n new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')\r\n elif 'mlp.fc2.' in k:\r\n new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')\r\n else:\r\n new_k = k.replace('mlp.', 'ffn.')\r\n elif 'downsample' in k:\r\n new_k = k\r\n if 'reduction.' in k:\r\n new_v = correct_unfold_reduction_order(v)\r\n elif 'norm.' in k:\r\n new_v = correct_unfold_norm_order(v)\r\n else:\r\n new_k = k\r\n new_k = new_k.replace('layers', 'stages', 1)\r\n elif k.startswith('patch_embed'):\r\n new_v = v\r\n if 'proj' in k:\r\n new_k = k.replace('proj', 'projection')\r\n else:\r\n new_k = k\r\n else:\r\n new_v = v\r\n new_k = k\r\n\r\n new_ckpt['backbone.' + new_k] = new_v\r\n\r\n return new_ckpt\r\n" ]
[ [ "torch.nn.functional.relu", "torch.eye", "torch.cat" ], [ "torch.nn.functional.adaptive_max_pool2d", "torch.nn.functional.interpolate" ], [ "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PhilippRue/ase-notebook
[ "46205d7e1b0e5a48a3ca61d84d4ee877eea19e62" ]
[ "ase_notebook/atoms_convert.py" ]
[ "\"\"\"Module for serializing ``ase.Atoms``.\"\"\"\n# TODO very recent versions of ase.Atoms have `todict` and `fromdict` methods, ands\n# see: https://gitlab.com/ase/ase/atoms.py and\n# https://gitlab.com/ase/ase/blob/master/ase/io/jsonio.py\nimport datetime\nimport json\n\nimport ase\nfrom ase.constraints import dict2constraint\nimport numpy as np\n\n\nclass ASEEncoder(json.JSONEncoder):\n \"\"\"JSON Encoder for ase.Atoms serialization.\"\"\"\n\n def default(self, obj):\n \"\"\"Parse object.\"\"\"\n if hasattr(obj, \"todict\"):\n d = obj.todict()\n\n if not isinstance(d, dict):\n raise RuntimeError(\n f\"todict() of {obj} returned object of type {type(d)} \"\n \"but should have returned dict\"\n )\n if hasattr(obj, \"ase_objtype\"):\n d[\"__ase_objtype__\"] = obj.ase_objtype\n\n return d\n if isinstance(obj, np.ndarray):\n flatobj = obj.ravel()\n if np.iscomplexobj(obj):\n flatobj.dtype = obj.real.dtype\n return {\"__ndarray__\": (obj.shape, str(obj.dtype), flatobj.tolist())}\n if isinstance(obj, np.integer):\n return int(obj)\n if isinstance(obj, np.bool_):\n return bool(obj)\n if isinstance(obj, datetime.datetime):\n return {\"__datetime__\": obj.isoformat()}\n if isinstance(obj, complex):\n return {\"__complex__\": (obj.real, obj.imag)}\n\n return json.JSONEncoder.default(self, obj)\n\n\ndef create_ndarray(shape, dtype, data):\n \"\"\"Create ndarray from shape, dtype and flattened data.\"\"\"\n array = np.empty(shape, dtype=dtype)\n flatbuf = array.ravel()\n if np.iscomplexobj(array):\n flatbuf.dtype = array.real.dtype\n flatbuf[:] = data\n return array\n\n\ndef try_int(obj):\n \"\"\"Try conversion of object to int.\"\"\"\n try:\n return int(obj)\n except ValueError:\n return obj\n\n\ndef numpyfy(obj):\n \"\"\"Convert an object to numpy array(s) recursively.\"\"\"\n if isinstance(obj, dict):\n if \"__complex_ndarray__\" in obj:\n r, i = (np.array(x) for x in obj[\"__complex_ndarray__\"])\n return r + i * 1j\n return {try_int(key): numpyfy(value) for key, value in obj.items()}\n if isinstance(obj, list) and len(obj) > 0:\n try:\n a = np.array(obj)\n except ValueError:\n pass\n else:\n if a.dtype in [bool, int, float] or str(a.dtype).startswith(\"<U\"):\n return a\n obj = [numpyfy(value) for value in obj]\n return obj\n\n\ndef ase_decoder_hook(dct):\n \"\"\"JSON decoder hook for ase.Atoms de-serialization.\"\"\"\n if \"__datetime__\" in dct:\n return datetime.datetime.strptime(dct[\"__datetime__\"], \"%Y-%m-%dT%H:%M:%S.%f\")\n if \"__complex__\" in dct:\n return complex(*dct[\"__complex__\"])\n\n if \"__ndarray__\" in dct:\n return create_ndarray(*dct[\"__ndarray__\"])\n\n # No longer used (only here for backwards compatibility):\n if \"__complex_ndarray__\" in dct:\n r, i = (np.array(x) for x in dct[\"__complex_ndarray__\"])\n return r + i * 1j\n\n if \"__ase_objtype__\" in dct:\n objtype = dct.pop(\"__ase_objtype__\")\n dct = numpyfy(dct)\n\n if objtype == \"cell\":\n from ase.cell import Cell\n\n pbc = dct.pop(\"pbc\", None)\n obj = Cell(**dct)\n if pbc is not None:\n obj._pbc = pbc\n else:\n raise RuntimeError(\n \"Do not know how to decode object type {} \"\n \"into an actual object\".format(objtype)\n )\n\n assert obj.ase_objtype == objtype\n return obj\n\n return dct\n\n\ndef serialize_atoms(atoms: ase.Atoms, description: str = \"\") -> str:\n \"\"\"Serialize an ase.Atoms instance to a dictionary.\"\"\"\n dct = {\n \"description\": description,\n \"cell\": atoms.cell,\n \"arrays\": atoms.arrays,\n \"info\": atoms.info,\n \"constraints\": atoms.constraints,\n \"celldisp\": atoms.get_celldisp(),\n \"calculator\": atoms.calc,\n }\n return ASEEncoder().encode(dct)\n\n\ndef deserialize_atoms(json_string: str) -> ase.Atoms:\n \"\"\"Deserialize a JSON string to an ase.Atoms instance.\"\"\"\n data = json.JSONDecoder(object_hook=ase_decoder_hook).decode(json_string)\n atoms = ase.Atoms()\n atoms.cell = data[\"cell\"]\n atoms.arrays = numpyfy(data[\"arrays\"])\n atoms.info = data[\"info\"]\n atoms.constraints = [dict2constraint(d) for d in data[\"constraints\"]]\n atoms.set_celldisp(data[\"celldisp\"])\n # TODO ase.calculators.calculator.Calculator has a todict method,\n # but not clear how to convert it back\n\n return atoms\n\n\ndef convert_to_atoms(obj):\n \"\"\"Attempt to convert an object to an ase.Atoms object.\"\"\"\n if isinstance(obj, ase.Atoms):\n return obj\n\n if isinstance(obj, str):\n return deserialize_atoms(obj)\n\n if isinstance(obj, dict):\n return deserialize_atoms(json.loads(obj))\n\n if hasattr(obj, \"lattice\") and hasattr(obj, \"sites\"):\n # we assume the obj is a pymatgen Structure\n\n # from pymatgen.io.ase adaptor\n if not obj.is_ordered:\n raise ValueError(\"ASE Atoms only supports ordered Pymatgen structures\")\n symbols = [str(site.specie.symbol) for site in obj]\n positions = [site.coords for site in obj]\n cell = obj.lattice.matrix\n # TODO test if slab, then use pbc = [True, True, False]\n atoms = ase.Atoms(symbols=symbols, positions=positions, pbc=True, cell=cell)\n\n # additionally, propagate site properties\n for key, array in obj.site_properties.items():\n if key not in atoms.arrays:\n atoms.set_array(key, np.array(array))\n # TODO propagate partial occupancies, and other properties\n\n return atoms\n\n raise TypeError(f\"Cannot convert object of type {obj.__class__.__name__}\")\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.iscomplexobj" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ChengShusss/spt
[ "f68e6b317ae64237aec5d0a058064804a28996d1" ]
[ "src/_test/testSimply.py" ]
[ "#!/usr/local/bin/python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\n# Setup, and create the data to plot\ny = np.random.rand(100000)\ny[50000:] *= 2\ny[np.geomspace(10, 50000, 400).astype(int)] = -1\nmpl.rcParams['path.simplify'] = True\n\nmpl.rcParams['path.simplify_threshold'] = 0.0\nplt.plot(y)\nplt.show()\n\nmpl.rcParams['path.simplify_threshold'] = 1.0\nplt.plot(y)\nplt.show()" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "numpy.random.rand", "numpy.geomspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
upc-arco/PCA-DIRIE-Pruning
[ "697ce664786e9791c7cf8911e642b3e6e70d7b98" ]
[ "transformer/ffn_layer.py" ]
[ "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Implementation of fully connected network.\"\"\"\n\nimport tensorflow as tf\nfrom pruning import custom_layers\n\nclass FeedForwardNetwork(tf.keras.layers.Layer):\n \"\"\"Fully connected feedforward network.\"\"\"\n\n def __init__(self, hidden_size, filter_size, relu_dropout):\n \"\"\"Initialize FeedForwardNetwork.\n\n Args:\n hidden_size: int, output dim of hidden layer.\n filter_size: int, filter size for the inner (first) dense layer.\n relu_dropout: float, dropout rate for training.\n \"\"\"\n super(FeedForwardNetwork, self).__init__()\n self.hidden_size = hidden_size\n self.filter_size = filter_size\n self.relu_dropout = relu_dropout\n\n def build(self, input_shape):\n self.filter_dense_layer = custom_layers.MyDenseLayer(\n self.filter_size,\n use_bias=True,\n activation=tf.nn.relu,\n name=\"filter_layer\")\n self.output_dense_layer = custom_layers.MyDenseLayer(\n self.hidden_size, use_bias=True, use_mask=True, name=\"output_layer\")\n super(FeedForwardNetwork, self).build(input_shape)\n\n def get_config(self):\n return {\n \"hidden_size\": self.hidden_size,\n \"filter_size\": self.filter_size,\n \"relu_dropout\": self.relu_dropout,\n }\n\n def call(self, x, training):\n \"\"\"Return outputs of the feedforward network.\n\n Args:\n x: tensor with shape [batch_size, length, hidden_size]\n training: boolean, whether in training mode or not.\n\n Returns:\n Output of the feedforward network.\n tensor with shape [batch_size, length, hidden_size]\n \"\"\"\n # Retrieve dynamically known shapes\n output = self.filter_dense_layer(x)\n if training:\n output = tf.nn.dropout(output, rate=self.relu_dropout)\n output = self.output_dense_layer(output)\n\n return output\n" ]
[ [ "tensorflow.nn.dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
batucimenn/eyeTrackingUsingOpenCV
[ "4f6c3249cb52f04208405a9ac48ccf41d0d38364" ]
[ "eyeTrackingUsingOpenCV.py" ]
[ "import cv2\r\nimport numpy as np\r\nimport dlib\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nfrom math import hypot,ceil\r\n\r\n#cap = cv2.VideoCapture(\"projectvideo.mp4\")\r\ncap = cv2.VideoCapture(0)\r\nliste=[]\r\ndetector = dlib.get_frontal_face_detector()\r\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\r\n\r\ndef midpoint(p1 ,p2):\r\n return int((p1.x + p2.x)/2), int((p1.y + p2.y)/2)\r\n\r\nfont = cv2.FONT_HERSHEY_PLAIN\r\n\r\ndef get_blinking_ratio(eye_points, facial_landmarks):\r\n left_point = (facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y)\r\n right_point = (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y)\r\n center_top = midpoint(facial_landmarks.part(eye_points[1]), facial_landmarks.part(eye_points[2]))\r\n center_bottom = midpoint(facial_landmarks.part(eye_points[5]), facial_landmarks.part(eye_points[4]))\r\n #hor_line = cv2.line(frame, left_point, right_point, (0, 255, 0), 2)\r\n #ver_line = cv2.line(frame, center_top, center_bottom, (0, 255, 0), 2)\r\n hor_line_lenght = hypot((left_point[0] - right_point[0]), (left_point[1] - right_point[1]))\r\n ver_line_lenght = hypot((center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))\r\n ratio = hor_line_lenght / ver_line_lenght\r\n return ratio\r\n\r\ndef get_gaze_ratio(eye_points, facial_landmarks):\r\n left_eye_region = np.array([(facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y),\r\n (facial_landmarks.part(eye_points[1]).x, facial_landmarks.part(eye_points[1]).y),\r\n (facial_landmarks.part(eye_points[2]).x, facial_landmarks.part(eye_points[2]).y),\r\n (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y),\r\n (facial_landmarks.part(eye_points[4]).x, facial_landmarks.part(eye_points[4]).y),\r\n (facial_landmarks.part(eye_points[5]).x, facial_landmarks.part(eye_points[5]).y)], np.int32)\r\n # cv2.polylines(frame, [left_eye_region], True, (0, 0, 255), 2)\r\n height, width, _ = frame.shape\r\n mask = np.zeros((height, width), np.uint8)\r\n cv2.polylines(mask, [left_eye_region], True, 255, 2)\r\n cv2.fillPoly(mask, [left_eye_region], 255)\r\n eye = cv2.bitwise_and(gray, gray, mask=mask)\r\n min_x = np.min(left_eye_region[:, 0])\r\n max_x = np.max(left_eye_region[:, 0])\r\n min_y = np.min(left_eye_region[:, 1])\r\n max_y = np.max(left_eye_region[:, 1])\r\n gray_eye = eye[min_y: max_y, min_x: max_x ] \r\n _, threshold_eye = cv2.threshold(gray_eye, 70, 255, cv2.THRESH_BINARY)\r\n height, width = threshold_eye.shape\r\n left_side_threshold = threshold_eye[0: height, 0: int(width / 2)]\r\n left_side_white = cv2.countNonZero(left_side_threshold)\r\n right_side_threshold = threshold_eye[0: height, int(width / 2): width]\r\n right_side_white = cv2.countNonZero(right_side_threshold)\r\n if left_side_white == 0:\r\n gaze_ratio = 1\r\n elif right_side_white == 0:\r\n gaze_ratio = 5\r\n else:\r\n gaze_ratio = left_side_white / right_side_white\r\n return gaze_ratio\r\n \r\nstart_time = time.time()\r\nwhile True:\r\n _, frame = cap.read()\r\n new_frame = np.zeros((500, 500, 3), np.uint8)\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n faces = detector(gray)\r\n for face in faces:\r\n #x, y = face.left(), face.top()\r\n #x1, y1 = face.right(), face.bottom()\r\n #cv2.rectangle(frame, (x, y), (x1, y1), (0, 255, 0), 2)\r\n landmarks = predictor(gray, face)\r\n # Detect blinking\r\n left_eye_ratio = get_blinking_ratio([36, 37, 38, 39, 40, 41], landmarks)\r\n right_eye_ratio = get_blinking_ratio([42, 43, 44, 45, 46, 47], landmarks)\r\n blinking_ratio = (left_eye_ratio + right_eye_ratio) / 2\r\n #if blinking_ratio > 5.7:\r\n #cv2.putText(frame, \"GOZLER KAPALI\", (50, 150), font, 2, (0, 0, 255),3)\r\n # Gaze detection\r\n gaze_ratio_left_eye = get_gaze_ratio([36, 37, 38, 39, 40, 41], landmarks)\r\n gaze_ratio_right_eye = get_gaze_ratio([42, 43, 44, 45, 46, 47], landmarks)\r\n gaze_ratio = (gaze_ratio_right_eye + gaze_ratio_left_eye) / 2\r\n if blinking_ratio > 5.7:\r\n cv2.putText(frame, \"GOZLER KAPALI\", (50, 150), font, 2, (0, 0, 255),3)\r\n liste.append(0) \r\n elif gaze_ratio <= 1:\r\n cv2.putText(frame, \"SAG\", (50, 100), font, 2, (0, 0, 255), 3)\r\n new_frame[:] = (0, 0, 255)\r\n liste.append(1) \r\n elif 1 < gaze_ratio < 1.5:\r\n cv2.putText(frame, \"ORTA\", (50, 100), font, 2, (0, 0, 255), 3)\r\n liste.append(2) \r\n else:\r\n new_frame[:] = (255, 0, 0)\r\n cv2.putText(frame, \"SOL\", (50, 100), font, 2, (0, 0, 255), 3)\r\n liste.append(1) \r\n #if len(liste)%60==0 and len(liste)>0:\r\n if sum(liste)/len(liste)<=1:\r\n cv2.putText(frame, \"DIKKATSIZ\", (1000,50), font, 2,(0,0,255),3) \r\n elif 1<sum(liste)/len(liste)<=1.5:\r\n cv2.putText(frame, \"ORTA DIKKATLI\", (1000,50), font, 2,(0,0,255),3) \r\n else:\r\n cv2.putText(frame, \"DIKKATLI\", (1000,50), font, 2,(0,0,255),3)\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n cv2.imshow(\"Frame\", frame)\r\n cv2.imshow(\"New frame\", new_frame)\r\n\r\n #key = cv2.waitKey(1) \r\n #if key == 27:\r\n # break\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\nstop_time=time.time()\r\ncap.release()\r\ncv2.destroyAllWindows()\r\nprint(stop_time-start_time)\r\n\r\n\r\n\r\nmy_time = stop_time-start_time\r\nmy_frame = ceil(len(liste)/my_time)\r\ntoplam = 0\r\ntoplam_r = 0\r\nsayac = 0\r\nort = []\r\nekstra = len(liste)%my_frame\r\n\r\nfor i in liste:\r\n toplam += i\r\n sayac += 1\r\n if sayac%my_frame == 0:\r\n ort.append(toplam/my_frame)\r\n toplam = 0\r\n\r\nliste.reverse()\r\nprint(liste[1])\r\nif ekstra != 0:\r\n for i in range(ekstra):\r\n toplam_r += liste[i]\r\n \r\n ort.append(toplam_r/ekstra)\r\n\r\nprint(ceil(my_time),ort)\r\n#plot(x, y, color='green', marker='o', linestyle='dashed', linewidth=2, markersize=12)\r\n#plt.plot(range(1,len(ort)+1), ort, color=\"blue\")\r\nplt.scatter(range(1,len(ort)+1), ort, color=\"blue\")\r\nplt.xlabel(\"Zaman [s]\")\r\nplt.ylabel(\"Dikkat Değer Puanı [0-Min 2-Max]]\")\r\nplt.title(\"Zaman - Dikkat Değer Puanı Grafiği\")\r\nplt.grid()\r\nplt.ylim([0, 2])\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "matplotlib.pyplot.title", "numpy.min", "matplotlib.pyplot.ylim", "numpy.max", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jtegedor/poliastro
[ "48c854a4ad5500116f7c09ca171f77a60e2de04b" ]
[ "src/poliastro/twobody/propagation.py" ]
[ "\"\"\"The following script holds the different high level functions for the\ndifferent propagators available at poliastro:\n\n+-------------+------------+-----------------+-----------------+\n| Propagator | Elliptical | Parabolic | Hyperbolic |\n+-------------+------------+-----------------+-----------------+\n| farnocchia | ✓ | ✓ | ✓ |\n+-------------+------------+-----------------+-----------------+\n| vallado | ✓ | ✓ | ✓ |\n+-------------+------------+-----------------+-----------------+\n| mikkola | ✓ | ✓ | ✓ |\n+-------------+------------+-----------------+-----------------+\n| markley | ✓ | x | x |\n+-------------+------------+-----------------+-----------------+\n| pimienta | ✓ | ✓ | x |\n+-------------+------------+-----------------+-----------------+\n| gooding | ✓ | x | x |\n+-------------+------------+-----------------+-----------------+\n| danby | ✓ | ✓ | ✓ |\n+-------------+------------+-----------------+-----------------+\n| cowell | ✓ | ✓ | ✓ |\n+-------------+------------+-----------------+-----------------+\n\n\"\"\"\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.coordinates import CartesianDifferential, CartesianRepresentation\nfrom scipy.integrate import DOP853, solve_ivp\n\nfrom poliastro.core.propagation import (\n danby as danby_fast,\n farnocchia as farnocchia_fast,\n func_twobody,\n gooding as gooding_fast,\n markley as markley_fast,\n mikkola as mikkola_fast,\n pimienta as pimienta_fast,\n vallado as vallado_fast,\n)\n\n\ndef cowell(k, r, v, tofs, rtol=1e-11, *, events=None, f=func_twobody):\n \"\"\"Propagates orbit using Cowell's formulation.\n\n Parameters\n ----------\n k : ~astropy.units.Quantity\n Standard gravitational parameter of the attractor.\n r : ~astropy.units.Quantity\n Position vector.\n v : ~astropy.units.Quantity\n Velocity vector.\n tofs : ~astropy.units.Quantity\n Array of times to propagate.\n rtol : float, optional\n Maximum relative error permitted, default to 1e-10.\n events : function(t, u(t)), optional\n Passed to `solve_ivp`: Integration stops when this function\n returns <= 0., assuming you set events.terminal=True\n f : function(t0, u, k), optional\n Objective function, default to Keplerian-only forces.\n\n Returns\n -------\n rr : ~astropy.units.Quantity\n Propagated position vectors.\n vv : ~astropy.units.Quantity\n Propagated velocity vectors.\n\n Raises\n ------\n RuntimeError\n If the algorithm didn't converge.\n\n Note\n -----\n This method uses the `solve_ivp` method from `scipy.integrate` using the\n Dormand & Prince integration method of order 8(5,3) (DOP853).\n If multiple tofs are provided, the method propagates to the maximum value\n (unless a terminal event is defined) and calculates the other values via dense output.\n\n \"\"\"\n k = k.to_value(u.km ** 3 / u.s ** 2)\n x, y, z = r.to_value(u.km)\n vx, vy, vz = v.to_value(u.km / u.s)\n tofs = tofs.to_value(u.s)\n\n u0 = np.array([x, y, z, vx, vy, vz])\n\n result = solve_ivp(\n f,\n (0, max(tofs)),\n u0,\n args=(k,),\n rtol=rtol,\n atol=1e-12,\n method=DOP853,\n dense_output=True,\n events=events,\n )\n if not result.success:\n raise RuntimeError(\"Integration failed\")\n\n if events is not None:\n # Collect only the terminal events\n terminal_events = [event for event in events if event.terminal]\n\n # If there are no terminal events, then the last time of integration is the\n # greatest one from the original array of propagation times\n if not terminal_events:\n last_t = max(tofs)\n else:\n # Filter the event which triggered first\n last_t = min([event.last_t for event in terminal_events]).to_value(u.s)\n tofs = [tof for tof in tofs if tof < last_t] + [last_t]\n\n rrs = []\n vvs = []\n for i in range(len(tofs)):\n t = tofs[i]\n y = result.sol(t)\n rrs.append(y[:3])\n vvs.append(y[3:])\n\n return rrs * u.km, vvs * u.km / u.s\n\n\ndef farnocchia(k, r, v, tofs, **kwargs):\n \"\"\"Propagates orbit.\n\n Parameters\n ----------\n k : ~astropy.units.Quantity\n Standard gravitational parameter of the attractor.\n r : ~astropy.units.Quantity\n Position vector.\n v : ~astropy.units.Quantity\n Velocity vector.\n tofs : ~astropy.units.Quantity\n Array of times to propagate.\n\n Returns\n -------\n rr : ~astropy.units.Quantity\n Propagated position vectors.\n vv : ~astropy.units.Quantity\n Propagated velocity vectors.\n\n \"\"\"\n k = k.to_value(u.km ** 3 / u.s ** 2)\n r0 = r.to_value(u.km)\n v0 = v.to_value(u.km / u.s)\n tofs = tofs.to_value(u.s)\n\n results = np.array([farnocchia_fast(k, r0, v0, tof) for tof in tofs])\n return (\n results[:, 0] << u.km,\n results[:, 1] << u.km / u.s,\n )\n\n\ndef vallado(k, r, v, tofs, numiter=350, **kwargs):\n \"\"\"Propagates Keplerian orbit.\n\n Parameters\n ----------\n k : ~astropy.units.Quantity\n Standard gravitational parameter of the attractor.\n r : ~astropy.units.Quantity\n Position vector.\n v : ~astropy.units.Quantity\n Velocity vector.\n tofs : ~astropy.units.Quantity\n Array of times to propagate.\n numiter : int, optional\n Maximum number of iterations, default to 35.\n\n Returns\n -------\n rr : ~astropy.units.Quantity\n Propagated position vectors.\n vv : ~astropy.units.Quantity\n Propagated velocity vectors.\n\n Raises\n ------\n RuntimeError\n If the algorithm didn't converge.\n\n Note\n -----\n This algorithm is based on Vallado implementation, and does basic Newton\n iteration on the Kepler equation written using universal variables. Battin\n claims his algorithm uses the same amount of memory but is between 40 %\n and 85 % faster.\n\n \"\"\"\n k = k.to_value(u.km ** 3 / u.s ** 2)\n r0 = r.to_value(u.km)\n v0 = v.to_value(u.km / u.s)\n tofs = tofs.to_value(u.s)\n\n results = np.array([_kepler(k, r0, v0, tof, numiter=numiter) for tof in tofs])\n return (\n results[:, 0] << u.km,\n results[:, 1] << u.km / u.s,\n )\n\n\ndef _kepler(k, r0, v0, tof, *, numiter):\n # Compute Lagrange coefficients\n f, g, fdot, gdot = vallado_fast(k, r0, v0, tof, numiter)\n\n assert np.abs(f * gdot - fdot * g - 1) < 1e-5 # Fixed tolerance\n\n # Return position and velocity vectors\n r = f * r0 + g * v0\n v = fdot * r0 + gdot * v0\n\n return r, v\n\n\ndef mikkola(k, r, v, tofs, rtol=None):\n \"\"\"Solves Kepler Equation by a cubic approximation. This method is valid\n no mater the orbit's nature.\n\n Parameters\n ----------\n k : ~astropy.units.Quantity\n Standard gravitational parameter of the attractor.\n r : ~astropy.units.Quantity\n Position vector.\n v : ~astropy.units.Quantity\n Velocity vector.\n tofs : ~astropy.units.Quantity\n Array of times to propagate.\n rtol: float\n This method does not require of tolerance since it is non iterative.\n\n Returns\n -------\n rr : ~astropy.units.Quantity\n Propagated position vectors.\n vv : ~astropy.units.Quantity\n\n Note\n ----\n This method was derived by Seppo Mikola in his paper *A Cubic Approximation\n For Kepler's Equation* with DOI: https://doi.org/10.1007/BF01235850\n\n \"\"\"\n\n k = k.to_value(u.m ** 3 / u.s ** 2)\n r0 = r.to_value(u.m)\n v0 = v.to_value(u.m / u.s)\n tofs = tofs.to_value(u.s)\n\n results = np.array([mikkola_fast(k, r0, v0, tof) for tof in tofs])\n return (\n results[:, 0] << u.m,\n results[:, 1] << u.m / u.s,\n )\n\n\ndef markley(k, r, v, tofs, rtol=None):\n \"\"\"Elliptical Kepler Equation solver based on a fifth-order\n refinement of the solution of a cubic equation.\n\n Parameters\n ----------\n k : ~astropy.units.Quantity\n Standard gravitational parameter of the attractor.\n r : ~astropy.units.Quantity\n Position vector.\n v : ~astropy.units.Quantity\n Velocity vector.\n tofs : ~astropy.units.Quantity\n Array of times to propagate.\n rtol: float\n This method does not require of tolerance since it is non iterative.\n\n Returns\n -------\n rr : ~astropy.units.Quantity\n Propagated position vectors.\n vv : ~astropy.units.Quantity\n Propagated velocity vectors.\n\n Note\n ----\n This method was originally presented by Markley in his paper *Kepler Equation Solver*\n with DOI: https://doi.org/10.1007/BF00691917\n\n \"\"\"\n\n k = k.to_value(u.m ** 3 / u.s ** 2)\n r0 = r.to_value(u.m)\n v0 = v.to_value(u.m / u.s)\n tofs = tofs.to_value(u.s)\n\n results = np.array([markley_fast(k, r0, v0, tof) for tof in tofs])\n return (\n results[:, 0] << u.m,\n results[:, 1] << u.m / u.s,\n )\n\n\ndef pimienta(k, r, v, tofs, rtol=None):\n \"\"\"Kepler solver for both elliptic and parabolic orbits based on a 15th\n order polynomial with accuracies around 10e-5 for elliptic case and 10e-13\n in the hyperbolic regime.\n\n Parameters\n ----------\n k : ~astropy.units.Quantity\n Standard gravitational parameter of the attractor.\n r : ~astropy.units.Quantity\n Position vector.\n v : ~astropy.units.Quantity\n Velocity vector.\n tofs : ~astropy.units.Quantity\n Array of times to propagate.\n rtol: float\n This method does not require of tolerance since it is non iterative.\n\n Returns\n -------\n rr : ~astropy.units.Quantity\n Propagated position vectors.\n vv : ~astropy.units.Quantity\n Propagated velocity vectors.\n\n Note\n ----\n This algorithm was developed by Pimienta-Peñalver and John L. Crassidis in\n their paper *Accurate Kepler Equation solver without trascendental function\n evaluations*. Original paper is on Buffalo's UBIR repository: http://hdl.handle.net/10477/50522\n\n \"\"\"\n\n k = k.to_value(u.m ** 3 / u.s ** 2)\n r0 = r.to_value(u.m)\n v0 = v.to_value(u.m / u.s)\n tofs = tofs.to_value(u.s)\n\n results = np.array([pimienta_fast(k, r0, v0, tof) for tof in tofs])\n return (\n results[:, 0] << u.m,\n results[:, 1] << u.m / u.s,\n )\n\n\ndef gooding(k, r, v, tofs, numiter=150, rtol=1e-8):\n \"\"\"Solves the Elliptic Kepler Equation with a cubic convergence and\n accuracy better than 10e-12 rad is normally achieved. It is not valid for\n eccentricities equal or greater than 1.0.\n\n Parameters\n ----------\n k : ~astropy.units.Quantity\n Standard gravitational parameter of the attractor.\n r : ~astropy.units.Quantity\n Position vector.\n v : ~astropy.units.Quantity\n Velocity vector.\n tofs : ~astropy.units.Quantity\n Array of times to propagate.\n rtol: float\n This method does not require of tolerance since it is non iterative.\n\n Returns\n -------\n rr : ~astropy.units.Quantity\n Propagated position vectors.\n vv : ~astropy.units.Quantity\n\n Note\n ----\n This method was developed by Gooding and Odell in their paper *The\n hyperbolic Kepler equation (and the elliptic equation revisited)* with\n DOI: https://doi.org/10.1007/BF01235540\n\n \"\"\"\n\n k = k.to_value(u.m ** 3 / u.s ** 2)\n r0 = r.to_value(u.m)\n v0 = v.to_value(u.m / u.s)\n tofs = tofs.to_value(u.s)\n\n results = np.array(\n [gooding_fast(k, r0, v0, tof, numiter=numiter, rtol=rtol) for tof in tofs]\n )\n return (\n results[:, 0] << u.m,\n results[:, 1] << u.m / u.s,\n )\n\n\ndef danby(k, r, v, tofs, rtol=1e-8):\n \"\"\"Kepler solver for both elliptic and parabolic orbits based on Danby's\n algorithm.\n\n Parameters\n ----------\n k : ~astropy.units.Quantity\n Standard gravitational parameter of the attractor.\n r : ~astropy.units.Quantity\n Position vector.\n v : ~astropy.units.Quantity\n Velocity vector.\n tofs : ~astropy.units.Quantity\n Array of times to propagate.\n rtol: float\n Relative error for accuracy of the method.\n\n Returns\n -------\n rr : ~astropy.units.Quantity\n Propagated position vectors.\n vv : ~astropy.units.Quantity\n Propagated velocity vectors.\n\n Note\n ----\n This algorithm was developed by Danby in his paper *The solution of Kepler\n Equation* with DOI: https://doi.org/10.1007/BF01686811\n\n \"\"\"\n\n k = k.to_value(u.m ** 3 / u.s ** 2)\n r0 = r.to_value(u.m)\n v0 = v.to_value(u.m / u.s)\n tofs = tofs.to_value(u.s)\n\n results = np.array([danby_fast(k, r0, v0, tof) for tof in tofs])\n return (\n results[:, 0] << u.m,\n results[:, 1] << u.m / u.s,\n )\n\n\ndef propagate(orbit, time_of_flight, *, method=farnocchia, rtol=1e-10, **kwargs):\n \"\"\"Propagate an orbit some time and return the result.\n\n Parameters\n ----------\n orbit : ~poliastro.twobody.Orbit\n Orbit object to propagate.\n time_of_flight : ~astropy.time.TimeDelta\n Time of propagation.\n method : callable, optional\n Propagation method, default to farnocchia.\n rtol : float, optional\n Relative tolerance, default to 1e-10.\n\n Returns\n -------\n astropy.coordinates.CartesianRepresentation\n Propagation coordinates.\n \"\"\"\n\n # Check if propagator fulfills orbit requirements\n if orbit.ecc < 1.0 and method not in ELLIPTIC_PROPAGATORS:\n raise ValueError(\n \"Can not use an parabolic/hyperbolic propagator for elliptical/circular orbits.\"\n )\n elif orbit.ecc == 1.0 and method not in PARABOLIC_PROPAGATORS:\n raise ValueError(\n \"Can not use an elliptic/hyperbolic propagator for parabolic orbits.\"\n )\n elif orbit.ecc > 1.0 and method not in HYPERBOLIC_PROPAGATORS:\n raise ValueError(\n \"Can not use an elliptic/parabolic propagator for hyperbolic orbits.\"\n )\n\n rr, vv = method(\n orbit.attractor.k,\n orbit.r,\n orbit.v,\n time_of_flight.reshape(-1).to(u.s),\n rtol=rtol,\n **kwargs\n )\n\n cartesian = CartesianRepresentation(\n rr, differentials=CartesianDifferential(vv, xyz_axis=1), xyz_axis=1\n )\n\n return cartesian\n\n\nELLIPTIC_PROPAGATORS = [\n farnocchia,\n vallado,\n mikkola,\n markley,\n pimienta,\n gooding,\n danby,\n cowell,\n]\nPARABOLIC_PROPAGATORS = [farnocchia, vallado, mikkola, pimienta, gooding, cowell]\nHYPERBOLIC_PROPAGATORS = [\n farnocchia,\n vallado,\n mikkola,\n pimienta,\n gooding,\n danby,\n cowell,\n]\nALL_PROPAGATORS = list(\n set(ELLIPTIC_PROPAGATORS + PARABOLIC_PROPAGATORS + HYPERBOLIC_PROPAGATORS)\n)\n" ]
[ [ "numpy.array", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
p517332051/face_benchmark
[ "c76c2b2142ecf65b7bace4b007a33fa4e795d2d0", "c76c2b2142ecf65b7bace4b007a33fa4e795d2d0" ]
[ "maskrcnn_benchmark/data/datasets/FaceDataset.py", "tools/train_face_netDivFC.py" ]
[ "import os\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nimport random\nimport tqdm\nfrom PIL import Image\nclass FaceDataset(data.Dataset):\n def __init__(self, data_dir, ann_file, transforms=None, augmenter=None,im_info=[112,96]):\n assert transforms is not None\n\n self.root = data_dir\n self.file_list = ann_file\n self.augmenter = augmenter\n self.transform = transforms\n self.im_info = im_info\n image_list = []\n label_list = []\n with open(ann_file) as f:\n img_label_list = f.read().splitlines()\n\n self.image_label_list = []\n for info in img_label_list:\n image_path, label_name = info.split(' ')\n self.image_label_list.append([image_path, int(label_name)])\n image_list.append(image_path)\n label_list.append(int(label_name))\n\n self.image_list = image_list\n self.label_list = label_list\n self.class_nums = len(set(self.label_list))\n # self.class_nums = max(self.label_list)\n print(\"dataset size: \", len(self.image_list), '/', self.class_nums)\n\n def __getitem__(self, index):\n img_path = self.image_list[index]\n label = self.label_list[index]\n p = random.random()\n img = Image.open(os.path.join(self.root, img_path)).convert('RGB')\n if self.augmenter is not None and p<=0.2:\n img_array = np.asarray(img)\n img_array = self.augmenter.augment_image(img_array)\n img = Image.fromarray(img_array.astype('uint8')).convert('RGB')\n img = self.transform(img)\n\n return img, label, index\n\n def __len__(self):\n return len(self.image_list)\n\n def get_img_info(self, index):\n return {\"height\": self.im_info[0], \"width\": self.im_info[1]}\n# def FaceDataset():\n# return FR_train_data\n\n\n\nclass TripletFaceDataset(data.Dataset):\n\n def __init__(self, data_dir, ann_file, n_triplets, transforms=None, augmenter=None,im_info=[112,96]):\n\n assert transforms is not None\n self.root = data_dir\n self.file_list = ann_file\n self.augmenter = augmenter\n self.transform = transforms\n self.im_info = im_info\n image_list = []\n label_list = []\n with open(self.file_list) as f:\n img_label_list = f.read().splitlines()\n self.image_label_list = []\n for info in img_label_list:\n image_path, label_name = info.split(' ')\n self.image_label_list.append([image_path, int(label_name)])\n image_list.append(image_path)\n label_list.append(int(label_name))\n\n self.image_list = image_list\n self.label_list = label_list\n self.class_nums = len(set(self.label_list))\n # self.class_nums = max(self.label_list)\n print(\"dataset size: \", len(self.image_list), '/', self.class_nums)\n\n self.n_triplets = n_triplets\n\n print('Generating {} triplets'.format(self.n_triplets))\n self.training_triplets = self.generate_triplets(self.image_list, self.label_list, self.n_triplets,self.class_nums)\n\n @staticmethod\n def generate_triplets(imgs, labels, num_triplets, n_classes):\n def create_indices(imgs, labels):\n inds = dict()\n for idx, img_path in enumerate(imgs):\n label = labels[idx]\n if label not in inds:\n inds[label] = []\n inds[label].append(img_path)\n return inds\n\n triplets = []\n # Indices = array of labels and each label is an array of indices\n indices = create_indices(imgs, labels)\n\n for x in range(num_triplets):\n c1 = np.random.randint(0, n_classes-1)\n c2 = np.random.randint(0, n_classes-1)\n while len(indices[c1]) < 2:\n c1 = np.random.randint(0, n_classes-1)\n\n while c1 == c2:\n c2 = np.random.randint(0, n_classes-1)\n if len(indices[c1]) == 2: # hack to speed up process\n n1, n2 = 0, 1\n else:\n n1 = np.random.randint(0, len(indices[c1]) - 1)\n n2 = np.random.randint(0, len(indices[c1]) - 1)\n while n1 == n2:\n n2 = np.random.randint(0, len(indices[c1]) - 1)\n if len(indices[c2]) ==1:\n n3 = 0\n else:\n n3 = np.random.randint(0, len(indices[c2]) - 1)\n\n triplets.append([indices[c1][n1], indices[c1][n2], indices[c2][n3],c1,c2])\n return triplets\n def loader(self,img_path):\n p = random.random()\n img = Image.open(os.path.join(self.root, img_path)).convert('RGB')\n if self.augmenter is not None and p<=0.2:\n img_array = np.asarray(img)\n img_array = self.augmenter.augment_image(img_array)\n img = Image.fromarray(img_array.astype('uint8')).convert('RGB')\n return img\n def __getitem__(self, index):\n '''\n Args:\n index: Index of the triplet or the matches - not of a single image\n Returns:\n '''\n def transform(img_path):\n \"\"\"Convert image into numpy array and apply transformation\n Doing this so that it is consistent with all other datasets\n to return a PIL Image.\n \"\"\"\n\n img = self.loader(img_path)\n return self.transform(img)\n\n # Get the index of each image in the triplet\n a, p, n,c1,c2 = self.training_triplets[index]\n\n # transform images if required\n img_a, img_p, img_n = transform(a), transform(p), transform(n)\n return img_a, img_p, img_n,c1,c2\n\n def __len__(self):\n return len(self.training_triplets)\n def get_img_info(self, index):\n return {\"height\": self.im_info[0], \"width\": self.im_info[1]}", "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nr\"\"\"\nBasic training script for PyTorch\n\"\"\"\n\n# Set up custom environment before nearly anything else is imported\n# NOTE: this should be the first import (no not reorder)\nimport os,sys\nsys.path.insert(0,'/data/hongwei/face_benchmark')\nfrom maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip\n#172.24.42.80\nimport argparse\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"4,5,6,7\"\nimport torch\nimport torchvision.transforms as T\nfrom maskrcnn_benchmark.config import face_cfg as cfg\nfrom maskrcnn_benchmark.data import make_face_data_loader\nfrom maskrcnn_benchmark.solver import make_lr_scheduler\nfrom maskrcnn_benchmark.solver import make_optimizer\nfrom maskrcnn_benchmark.engine.inference import inference\n\nfrom maskrcnn_benchmark.engine import do_face_train_dist_DIV_FC,do_face_train_dist\n\nfrom maskrcnn_benchmark.modeling.face_reg import build_dist_face_trainer\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.utils.collect_env import collect_env_info\nfrom maskrcnn_benchmark.utils.comm import synchronize, \\\n get_rank\nfrom maskrcnn_benchmark.utils.imports import import_file\nfrom maskrcnn_benchmark.utils.logger import setup_logger\nfrom maskrcnn_benchmark.utils.miscellaneous import mkdir\nfrom maskrcnn_benchmark.modeling.face_reg import FaceDistributedDataParallel\nfrom maskrcnn_benchmark.modeling.face_reg import face_trainer\n\ntry:\n from apex import amp\nexcept ImportError:\n raise ImportError('Use APEX for multi-precision via apex.amp')\n\n\n\n\ndef train(cfg, local_rank, distributed):\n model,head = build_dist_face_trainer(cfg,local_rank)\n device = torch.device(cfg.MODEL.DEVICE)\n model.to(device)\n if cfg.MODEL.USE_SYNCBN:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n if True:\n model = FaceDistributedDataParallel(\n model, device_ids=local_rank, output_device=local_rank,\n # this should be removed if we update BatchNorm stats\n broadcast_buffers=False,chunk_sizes=None, #[32,56,56,56]\n )\n head_local_rank=None\n if len(local_rank)==1:\n head_local_rank = local_rank\n head = FaceDistributedDataParallel(\n head, device_ids=head_local_rank, output_device=head_local_rank,\n # this should be removed if we update BatchNorm stats\n broadcast_buffers=False,\n )\n model = torch.nn.Sequential(*[model, head])\n optimizer = make_optimizer(cfg, model)\n scheduler = make_lr_scheduler(cfg, optimizer)\n # head_optimizer = make_optimizer(cfg, head)\n # head_scheduler = make_lr_scheduler(cfg, head_optimizer)\n # Initialize mixed-precision training\n use_mixed_precision = cfg.DTYPE == \"float16\"\n amp_opt_level = 'O1' if use_mixed_precision else 'O0'\n model, optimizer = amp.initialize(model, optimizer, opt_level=amp_opt_level)\n # head, head_optimizer = amp.initialize(head, head_optimizer, opt_level=amp_opt_level)\n\n\n arguments = {}\n arguments[\"iteration\"] = 0\n output_dir = cfg.OUTPUT_DIR\n save_to_disk = get_rank() == 0\n checkpointer = DetectronCheckpointer(\n cfg, model, optimizer, scheduler, output_dir, save_to_disk\n )\n # head_checkpointer = DetectronCheckpointer(\n # cfg, head, head_optimizer, head_scheduler, output_dir, save_to_disk\n # )\n extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)\n arguments.update(extra_checkpoint_data)\n\n\n #### init transforms #####\n transforms = T.Compose(\n [\n T.RandomCrop( (cfg.INPUT.SIZE_TRAIN[0], cfg.INPUT.SIZE_TRAIN[1]) ),\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n T.Normalize(mean=cfg.INPUT.RGB_MEAN, std=cfg.INPUT.RGB_STD),\n ]\n )\n data_loader = make_face_data_loader(\n cfg,\n is_train=True,\n is_distributed=distributed,\n start_iter=arguments[\"iteration\"],\n transforms=transforms,\n )\n test_period = cfg.SOLVER.TEST_PERIOD\n checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n divs_nums = cfg.SOLVER.DIVS_NUMS_PER_BATCH\n do_face_train_dist_DIV_FC(\n cfg,\n model,#[model,head],\n data_loader,\n None,\n optimizer,#[optimizer,head_optimizer],\n scheduler,#[scheduler,head_scheduler],\n checkpointer,#[checkpointer,head_checkpointer],\n device,\n checkpoint_period,\n test_period,\n arguments,\n divs_nums,\n )\n return model\n\n\ndef run_test(cfg, model, distributed):\n if distributed:\n model = model.module\n torch.cuda.empty_cache() # TODO check if it helps\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n iou_types = iou_types + (\"keypoints\",)\n output_folders = [None] * len(cfg.DATASETS.TEST)\n dataset_names = cfg.DATASETS.TEST\n if cfg.OUTPUT_DIR:\n for idx, dataset_name in enumerate(dataset_names):\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)\n for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):\n inference(\n model,\n data_loader_val,\n dataset_name=dataset_name,\n iou_types=iou_types,\n box_only=False if cfg.MODEL.FCOS_ON or cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=output_folder,\n )\n synchronize()\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Training\")\n parser.add_argument(\n \"--config-file\",\n default=\"\",\n metavar=\"FILE\",\n help=\"path to config file\",\n type=str,\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\"--ngpu_shared_fc\", type=list, default=1)\n parser.add_argument(\n \"--skip-test\",\n dest=\"skip_test\",\n help=\"Do not test the final model\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n args.distributed = num_gpus > 1\n size = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n \"MASTER_ADDR\"\n \"MASTER_PORT\"\n \"RANK\"\n \"WORLD_SIZE\"\n if True:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\",#rank=args.local_rank,world_size=size\n )\n synchronize()\n\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n output_dir = cfg.OUTPUT_DIR\n if output_dir:\n mkdir(output_dir)\n\n logger = setup_logger(\"maskrcnn_benchmark\", output_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(args)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n\n logger.info(\"Loaded configuration file {}\".format(args.config_file))\n with open(args.config_file, \"r\") as cf:\n config_str = \"\\n\" + cf.read()\n logger.info(config_str)\n logger.info(\"Running with config:\\n{}\".format(cfg))\n proc_gpus = [int(i) for i in args.ngpu_shared_fc]\n model = train(cfg,proc_gpus, args.distributed)\n\n if not args.skip_test:\n run_test(cfg, model, args.distributed)\nif __name__ == \"__main__\":\n main()\n\n##--nnodes=2 --node_rank=0 --master_addr=\"192.168.1.1\"\n## 杀掉所有python进程 ps aux|grep python|grep -v grep|grep -v usr|cut -c 9-15|xargs kill -9\n# python tools/Muti_GPUS_Train.py --ngpus_per_node=8 --npgpu_per_proc=1 tools/train_face_netDivFC.py --skip-test --config-file configs/face_reg/face_net_msra_celeb.yaml DATALOADER.NUM_WORKERS 16 OUTPUT_DIR" ]
[ [ "numpy.asarray", "numpy.random.randint" ], [ "torch.nn.Sequential", "torch.distributed.init_process_group", "torch.cuda.set_device", "torch.cuda.empty_cache", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
peternara/DeepHash
[ "c6f4c6733f619718d437bf39ef9fb6854476f20c" ]
[ "DeepHash/model/dtq/util.py" ]
[ "import numpy as np\nimport math\nfrom distance.npversion import distance\n\nclass Dataset(object):\n def __init__(self, dataset, output_dim, code_dim):\n self._dataset = dataset\n self.n_samples = dataset.n_samples\n self._train = dataset.train\n self._output = np.zeros((self.n_samples, output_dim), dtype=np.float32)\n # code_dim = K 수를 의미 = config.subspace * config.subcenter\n self._codes = np.zeros((self.n_samples, code_dim), dtype=np.float32)\n self._triplets = np.array([])\n self._trip_index_in_epoch = 0\n self._index_in_epoch = 0\n self._epochs_complete = 0\n self._perm = np.arange(self.n_samples)\n np.random.shuffle(self._perm)\n return\n\n def update_triplets(self, margin, n_part=10, dist_type='euclidean2', select_strategy='margin'):\n \"\"\"\n :param select_strategy: hard, all, margin\n :param dist_type: distance type, e.g. euclidean2, cosine\n :param margin: triplet margin parameter\n :n_part: number of part to split data\n \"\"\"\n n_samples = self.n_samples\n np.random.shuffle(self._perm)\n embedding = self._output[self._perm[:n_samples]]\n labels = self._dataset.get_labels()[self._perm[:n_samples]]\n n_samples_per_part = int(math.ceil(n_samples / n_part))\n triplets = []\n for i in range(n_part):\n start = n_samples_per_part * i\n end = min(n_samples_per_part * (i+1), n_samples)\n dist = distance(embedding[start:end], pair=True, dist_type=dist_type)\n for idx_anchor in range(0, end - start):\n label_anchor = np.copy(labels[idx_anchor+start, :])\n label_anchor[label_anchor==0] = -1\n all_pos = np.where(np.any(labels[start:end] == label_anchor, axis=1))[0]\n all_neg = np.array(list(set(range(end-start)) - set(all_pos)))\n\n if select_strategy == 'hard':\n idx_pos = all_pos[np.argmax(dist[idx_anchor, all_pos])]\n if idx_pos == idx_anchor:\n continue\n idx_neg = all_neg[np.argmin(dist[idx_anchor, all_neg])]\n triplets.append((idx_anchor + start, idx_pos + start, idx_neg + start))\n continue\n\n for idx_pos in all_pos:\n if idx_pos == idx_anchor:\n continue\n\n if select_strategy == 'all':\n selected_neg = all_neg\n elif select_strategy == 'margin':\n selected_neg = all_neg[np.where(dist[idx_anchor, all_neg] - dist[idx_anchor, idx_pos] < margin)[0]]\n\n if selected_neg.shape[0] > 0:\n idx_neg = np.random.choice(selected_neg)\n triplets.append((idx_anchor + start, idx_pos + start, idx_neg + start))\n self._triplets = np.array(triplets)\n np.random.shuffle(self._triplets)\n\n # assert\n anchor = labels[self._triplets[:, 0]]\n mapper = lambda anchor, other: np.any(anchor * (anchor == other), -1)\n assert(np.all(mapper(anchor, labels[self._triplets[:, 1]])))\n assert(np.all(np.invert(anchor, labels[self._triplets[:, 2]])))\n return\n\n def next_batch_triplet(self, batch_size):\n \"\"\"\n Args:\n batch_size\n Returns:\n data, label, codes\n \"\"\"\n start = self._trip_index_in_epoch\n self._trip_index_in_epoch += batch_size\n if self._trip_index_in_epoch > self.triplets.shape[0]:\n start = 0\n self._trip_index_in_epoch = batch_size\n end = self._trip_index_in_epoch\n\n # stack index of anchors, positive, negetive to one array\n arr = self.triplets[start:end]\n idx = self._perm[np.concatenate([arr[:, 0], arr[:, 1], arr[:, 2]], axis=0)]\n data, label = self._dataset.data(idx)\n\n return data, label, self._codes[idx]\n\n def next_batch(self, batch_size):\n \"\"\"\n Args:\n batch_size\n Returns:\n [batch_size, (n_inputs)]: next batch images, by stacking anchor, positive, negetive\n [batch_size, n_class]: next batch labels\n \"\"\"\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self.n_samples:\n if self._train:\n self._epochs_complete += 1\n start = 0\n self._index_in_epoch = batch_size\n else:\n # Validation stage only process once\n start = self.n_samples - batch_size\n self._index_in_epoch = self.n_samples\n end = self._index_in_epoch\n\n data, label = self._dataset.data(self._perm[start:end])\n return (data, label, self._codes[self._perm[start: end], :])\n\n def next_batch_output_codes(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n # Another epoch finish\n if self._index_in_epoch > self.n_samples:\n if self._train:\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n else:\n # Validation stage only process once\n start = self.n_samples - batch_size\n self._index_in_epoch = self.n_samples\n end = self._index_in_epoch\n\n return (self._output[self._perm[start: end], :],\n self._codes[self._perm[start: end], :])\n\n def feed_batch_output(self, batch_size, output):\n start = self._index_in_epoch - batch_size\n end = self._index_in_epoch\n self._output[self._perm[start:end], :] = output\n return\n\n def feed_batch_triplet_output(self, batch_size, triplet_output):\n anchor, pos, neg = np.split(triplet_output, 3, axis=0)\n start = self._trip_index_in_epoch - batch_size\n end = self._trip_index_in_epoch\n idx = self._perm[self._triplets[start:end, :]]\n self._output[idx[:, 0]] = anchor\n self._output[idx[:, 1]] = pos\n self._output[idx[:, 2]] = neg\n return\n\n def feed_batch_codes(self, batch_size, codes):\n \"\"\"\n Args:\n batch_size\n [batch_size, n_output]\n \"\"\"\n start = self._index_in_epoch - batch_size\n end = self._index_in_epoch\n self._codes[self._perm[start:end], :] = codes\n return\n\n @property\n def output(self):\n return self._output\n\n @property\n def codes(self):\n return self._codes\n\n @property\n def triplets(self):\n return self._triplets\n\n @property\n def label(self):\n return self._dataset.get_labels()\n\n def finish_epoch(self):\n self._index_in_epoch = 0\n" ]
[ [ "numpy.split", "numpy.invert", "numpy.random.choice", "numpy.arange", "numpy.random.shuffle", "numpy.concatenate", "numpy.copy", "numpy.argmax", "numpy.argmin", "numpy.any", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fredriko/search_with_machine_learning_course
[ "85670d7adf337fede418fa5665b3c5ee80e42b2b" ]
[ "index_queries.py" ]
[ "import click\nimport pandas as pd\nfrom opensearchpy import OpenSearch\nfrom opensearchpy.helpers import bulk\n\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nlogging.basicConfig(format='%(levelname)s:%(message)s')\n\n\ndef get_opensearch():\n host = 'localhost'\n port = 9200\n auth = ('admin', 'admin')\n client = OpenSearch(\n hosts=[{'host': host, 'port': port}],\n http_compress=True, # enables gzip compression for request bodies\n http_auth=auth,\n # client_cert = client_cert_path,\n # client_key = client_key_path,\n use_ssl=True,\n verify_certs=False,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n # ca_certs=ca_certs_path\n )\n return client\n\n\[email protected]()\[email protected]('--source_file', '-s', help='source csv file', required=True)\ndef main(source_file):\n index_name = 'bbuy_queries'\n client = get_opensearch()\n ds = pd.read_csv(source_file)\n # print(ds.columns)\n ds['click_time'] = pd.to_datetime(ds['click_time'])\n ds['query_time'] = pd.to_datetime(ds['query_time'])\n # print(ds.dtypes)\n docs = []\n for idx, row in ds.iterrows():\n doc = {}\n for col in ds.columns:\n doc[col] = row[col]\n docs.append({'_index': index_name, '_source': doc})\n if idx % 300 == 0:\n bulk(client, docs, request_timeout=60)\n logger.info(f'{idx} documents indexed')\n docs = []\n if len(docs) > 0:\n bulk(client, docs, request_timeout=60)\n logger.info(f'Done indexing {ds.shape[0]} records')\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
blester125/baseline
[ "4ad4147d4a88a42b309c6784a95b0b9f1faa2c60", "4ad4147d4a88a42b309c6784a95b0b9f1faa2c60", "4ad4147d4a88a42b309c6784a95b0b9f1faa2c60", "4ad4147d4a88a42b309c6784a95b0b9f1faa2c60" ]
[ "baseline/tf/seq2seq/training/datasets.py", "layers/eight_mile/tf/embeddings.py", "api-examples/layers_classify_pytorch.py", "api-examples/layers_classify_hf_nlp_pytorch.py" ]
[ "import tensorflow as tf\nfrom baseline.tf.tfy import TRAIN_FLAG\nfrom eight_mile.utils import listify\nfrom baseline.utils import get_model_file, get_metric_cmp\nfrom baseline.train import create_trainer, register_training_func\nfrom baseline.tf.seq2seq.training.utils import to_tensors, SHUF_BUF_SZ, NUM_PREFETCH\n\n\n@register_training_func('seq2seq', 'dataset')\ndef fit_datasets(model_params, ts, vs, es=None, **kwargs):\n \"\"\"\n Train an encoder-decoder network using TensorFlow with `tf.dataset`. This\n is the default behavior for training.\n\n :param model_params: The model (or parameters to create the model) to train\n :param ts: A training data set\n :param vs: A validation data set\n :param es: A test data set, can be None\n :param kwargs:\n See below\n\n :Keyword Arguments:\n * *do_early_stopping* (``bool``) --\n Stop after evaluation data is no longer improving. Defaults to True\n * *verbose* (`dict`) A dictionary containing `console` boolean and `file` name if on\n * *epochs* (``int``) -- how many epochs. Default to 20\n * *outfile* -- Model output file, defaults to classifier-model.pyth\n * *patience* --\n How many epochs where evaluation is no longer improving before we give up\n * *reporting* --\n Callbacks which may be used on reporting updates\n * *nsteps* (`int`) -- If we should report every n-steps, this should be passed\n * *ema_decay* (`float`) -- If we are doing an exponential moving average, what decay to us4e\n * *clip* (`int`) -- If we are doing gradient clipping, what value to use\n * *optim* (`str`) -- The name of the optimizer we are using\n * *lr* (`float`) -- The learning rate we are using\n * *mom* (`float`) -- If we are using SGD, what value to use for momentum\n * *beta1* (`float`) -- Adam-specific hyper-param, defaults to `0.9`\n * *beta2* (`float`) -- Adam-specific hyper-param, defaults to `0.999`\n * *epsilon* (`float`) -- Adam-specific hyper-param, defaults to `1e-8\n\n :return: None\n \"\"\"\n\n epochs = int(kwargs.get('epochs', 5))\n patience = int(kwargs.get('patience', epochs))\n model_file = get_model_file('seq2seq', 'tf', kwargs.get('basedir'))\n do_early_stopping = bool(kwargs.get('do_early_stopping', True))\n\n best_metric = 0\n if do_early_stopping:\n early_stopping_metric = kwargs.get('early_stopping_metric', 'perplexity')\n early_stopping_cmp, best_metric = get_metric_cmp(early_stopping_metric, kwargs.get('early_stopping_cmp'))\n patience = kwargs.get('patience', epochs)\n print('Doing early stopping on [%s] with patience [%d]' % (early_stopping_metric, patience))\n\n reporting_fns = listify(kwargs.get('reporting', []))\n print('reporting', reporting_fns)\n\n batchsz = kwargs['batchsz']\n ## First, make tf.datasets for ts, vs and es\n # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/distribute/README.md\n # effective_batch_sz = args.batchsz*args.gpus\n test_batchsz = kwargs.get('test_batchsz', batchsz)\n src_lengths_key = model_params.get('src_lengths_key')\n train_dataset = tf.data.Dataset.from_tensor_slices(to_tensors(ts, src_lengths_key))\n train_dataset = train_dataset.shuffle(buffer_size=SHUF_BUF_SZ)\n train_dataset = train_dataset.batch(batchsz, drop_remainder=False)\n train_dataset = train_dataset.repeat(epochs + 1)\n train_dataset = train_dataset.prefetch(NUM_PREFETCH)\n\n valid_dataset = tf.data.Dataset.from_tensor_slices(to_tensors(vs, src_lengths_key))\n valid_dataset = valid_dataset.batch(batchsz, drop_remainder=False)\n valid_dataset = valid_dataset.repeat(epochs + 1)\n valid_dataset = valid_dataset.prefetch(NUM_PREFETCH)\n\n iter = tf.compat.v1.data.Iterator.from_structure(tf.compat.v1.data.get_output_types(train_dataset),\n tf.compat.v1.data.get_output_shapes(train_dataset))\n\n features, tgt = iter.get_next()\n # Add features to the model params\n model_params.update(features)\n # This is kind of crazy, but seems to work, hardwire a graph op for `mx_tgt_len`\n model_params.update({'tgt': tgt, 'mx_tgt_len': tf.reduce_max(features['tgt_len'])})\n\n # create the initialization operations\n train_init_op = iter.make_initializer(train_dataset)\n valid_init_op = iter.make_initializer(valid_dataset)\n\n TRAIN_FLAG()\n trainer = create_trainer(model_params, **kwargs)\n\n last_improved = 0\n\n for epoch in range(epochs):\n trainer.sess.run(train_init_op)\n trainer.train(ts, reporting_fns)\n trainer.sess.run(valid_init_op)\n test_metrics = trainer.test(vs, reporting_fns, phase='Valid')\n\n if do_early_stopping is False:\n trainer.checkpoint()\n trainer.model.save(model_file)\n\n elif early_stopping_cmp(test_metrics[early_stopping_metric], best_metric):\n last_improved = epoch\n best_metric = test_metrics[early_stopping_metric]\n print('New best %.3f' % best_metric)\n trainer.checkpoint()\n trainer.model.save(model_file)\n\n elif (epoch - last_improved) > patience:\n print('Stopping due to persistent failures to improve')\n break\n\n if do_early_stopping is True:\n print('Best performance on %s: %.3f at epoch %d' % (early_stopping_metric, best_metric, last_improved))\n\n if es is not None:\n print('Reloading best checkpoint')\n trainer.recover_last_checkpoint()\n\n test_dataset = tf.data.Dataset.from_tensor_slices(to_tensors(es, src_lengths_key))\n test_dataset = test_dataset.batch(test_batchsz, drop_remainder=False)\n test_dataset = test_dataset.repeat(epochs + 1)\n test_dataset = test_dataset.prefetch(NUM_PREFETCH)\n test_init_op = iter.make_initializer(test_dataset)\n\n trainer.sess.run(test_init_op)\n trainer.test(es, reporting_fns, phase='Test')\n", "import math\nimport copy\nimport logging\nimport numpy as np\nimport tensorflow as tf\nfrom eight_mile.utils import write_json, Offsets, is_sequence, calc_nfeats\nfrom eight_mile.tf.layers import *\n\n\nFLOAT32 = 4\nGB2 = 1024 * 1024 * 1024 * 2\nlogger = logging.getLogger(\"baseline\")\n\n\nclass TensorFlowEmbeddings(tf.keras.layers.Layer):\n \"\"\"This provides a base for TensorFlow embeddings sub-graphs\n\n \"\"\"\n\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n # tf.kers.layers.Layer has a validation step that only allows certain kwargs\n # to be passed into it. These are not documented and you need to look into the\n # code to find this. For now just don't pass in out kwargs\n super().__init__(trainable=trainable, name=name, dtype=dtype)\n self.W = None\n\n def get_dsz(self):\n \"\"\"Get the number of output dimension of this operation\n\n :return:\n \"\"\"\n pass\n\n def get_vsz(self):\n \"\"\"Get the number of words (including <PAD>) in the vocabulary\n\n :return:\n \"\"\"\n pass\n\n def get_weights(self):\n raise NotImplementedError\n\n def encode(self, *x):\n \"\"\"This defines the computation of the sub-graph for this object and returns the output node\n\n :return:\n \"\"\"\n pass\n\n @property\n def output_dim(self):\n return self.get_dsz()\n\n def call(self, x):\n return self.encode(x)\n\n def get_feed_dict(self):\n \"\"\"Return a feed dict that is needed to initialize this embeddings.\"\"\"\n return {}\n\n\nclass LookupTableEmbeddings(TensorFlowEmbeddings):\n def __init__(self, trainable=True, name=None, dtype=tf.float32, cpu_placement=False, **kwargs):\n \"\"\"Create a lookup-table based embedding.\n\n :param name: The name of the feature/placeholder, and a key for the scope\n :param kwargs:\n\n :Keyword Arguments: See below\n * *vsz* -- (``int``) this is the vocabulary (input) size of the lookup table\n * *dsz* -- (``int``) the output dimension size of this embedding\n * *finetune* -- (``bool``) (default is `True`) should we allow the sub-graph to learn updated weights\n * *weights* -- (``numpy.ndarray``) Optional `vsz x dsz` weight matrix for initialization\n * *unif* -- (``float``) (defaults to `0.1`) If the weights should be created, what is the random initialization range\n \"\"\"\n trainable = kwargs.get(\"finetune\", trainable)\n # The layers have a filter of allowed keywords and the docs don't list what they are\n # you need to look in code. We are just not passing kwargs for now.\n super().__init__(trainable=trainable, name=name, dtype=dtype, **kwargs)\n self.vsz = kwargs.get(\"vsz\")\n self.padding_idx = kwargs.get('padding_idx', Offsets.PAD)\n self.dsz = kwargs.get(\"dsz\")\n self.finetune = kwargs.get(\"finetune\", trainable)\n self.scope = kwargs.get(\"scope\", \"LUT\")\n self.dropin = kwargs.get(\"dropin\", 0.0)\n self._weights = kwargs.get(\"weights\")\n self.drop = tf.keras.layers.Dropout(rate=self.dropin, noise_shape=(self.get_vsz(), 1))\n self.cpu_placement = cpu_placement\n if self._weights is None:\n unif = kwargs.get(\"unif\", 0.1)\n self._weights = np.random.uniform(-unif, unif, (self.vsz, self.dsz))\n else:\n self.vsz, self.dsz = self._weights.shape\n\n def build(self, input_shape):\n\n if self.cpu_placement:\n with tf.device(\"cpu:0\"):\n self.W = self.add_weight(\n name=f\"{self.scope}/Weight\",\n shape=(self.vsz, self.dsz),\n initializer=tf.constant_initializer(self._weights),\n trainable=self.finetune,\n )\n else:\n self.W = self.add_weight(\n name=f\"{self.scope}/Weight\",\n shape=(self.vsz, self.dsz),\n initializer=tf.constant_initializer(self._weights),\n trainable=self.finetune,\n )\n super().build(input_shape)\n\n def _embed_w_dropout(self, x):\n # The ablation table (4) in https://arxiv.org/pdf/1708.02182.pdf shows this has a massive impact\n embedding_w_dropout = self.drop(self.W, training=TRAIN_FLAG())\n word_embeddings = tf.nn.embedding_lookup(embedding_w_dropout, x)\n return word_embeddings\n\n def encode(self, *x):\n \"\"\"Build a simple Lookup Table and set as input `x` if it exists, or `self.x` otherwise.\n\n :param x: An optional input sub-graph to bind to this operation or use `self.x` if `None`\n :return: The sub-graph output\n \"\"\"\n self.x = x[0]\n if self.padding_idx is not None:\n e0 = tf.tensor_scatter_nd_update(\n self.W, tf.constant(self.padding_idx, dtype=tf.int32, shape=[1, 1]), tf.zeros(shape=[1, self.dsz])\n )\n with tf.control_dependencies([e0]):\n return self._embed_w_dropout(self.x)\n else:\n return self._embed_w_dropout(self.x)\n\n def get_vsz(self):\n return self.vsz\n\n def get_dsz(self):\n return self.dsz\n\n def get_weights(self):\n return self.W\n\n\nclass CharConvEmbeddings(TensorFlowEmbeddings):\n \"\"\"dos Santos embeddings extended to parallel filters (AKA Kim character-aware neural language model inputs)\n\n \"\"\"\n\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n trainable = kwargs.get(\"finetune\", trainable)\n super().__init__(trainable=trainable, name=name, dtype=dtype, **kwargs)\n self.cpu_placement = bool(kwargs.get('cpu_placement', False))\n self.scope = kwargs.get(\"scope\", \"CharConv\")\n self.finetune = kwargs.get(\"finetune\", trainable)\n self.nfeat_factor = kwargs.get(\"nfeat_factor\", None)\n self.cfiltsz = kwargs.get(\"cfiltsz\", kwargs.get(\"filtsz\", [3]))\n self.max_feat = kwargs.get(\"max_feat\", 30)\n gating = kwargs.get(\"gating\", \"skip\")\n num_gates = kwargs.get(\"num_gates\", 1)\n activation = kwargs.get(\"activation\", \"tanh\")\n self.wsz = kwargs.get(\"wsz\", 30)\n self.projsz = kwargs.get(\"projsz\")\n self.x = None\n # These are the actual final filter sizes and num features\n filtsz, nfeats = calc_nfeats(self.cfiltsz, self.nfeat_factor, self.max_feat, self.wsz)\n\n self.embed = LookupTableEmbeddings(name=f\"{self.name}/CharLUT\", finetune=self.finetune, **kwargs)\n dsz = self.embed.output_dim\n self.parallel_conv = ParallelConv(dsz, nfeats, filtsz, activation)\n self.gating_fns = tf.keras.Sequential()\n for _ in range(num_gates):\n if gating == 'skip':\n self.gating_fns.add(SkipConnection(self.parallel_conv.output_dim, activation))\n else:\n self.gating_fns.add(Highway(self.parallel_conv.output_dim))\n\n self.outsz = self.parallel_conv.output_dim\n if self.projsz is not None:\n self.outsz = self.projsz\n self.proj = tf.keras.layers.Dense(self.outsz, bias_initializer=tf.constant_initializer(0.0))\n\n\n\n @property\n def dsz(self):\n return self.outsz\n\n def encode(self, *x):\n self.x = x[0]\n\n mxlen = tf.shape(self.x)[1]\n mxwlen = tf.shape(self.x)[-1]\n char_bt_x_w = tf.reshape(self.x, [-1, mxwlen])\n cembed = self.embed(char_bt_x_w)\n cmot = self.parallel_conv(cembed)\n cmot = self.gating_fns(cmot)\n if self.projsz:\n cmot = self.proj(cmot)\n word_char = tf.reshape(cmot, [-1, mxlen, self.outsz])\n return word_char\n\n def get_vsz(self):\n return self.embed.get_vsz()\n\n def get_dsz(self):\n return self.outsz\n\n\nclass CharLSTMEmbeddings(TensorFlowEmbeddings):\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n trainable = kwargs.get(\"finetune\", trainable)\n super().__init__(trainable=trainable, name=name, dtype=dtype, **kwargs)\n self.scope = kwargs.get(\"scope\", \"CharLUT\")\n self.finetune = kwargs.get(\"finetune\", trainable)\n self.lstmsz = kwargs.get(\"lstmsz\", 50)\n self.lstm_layers = kwargs.get(\"layers\", 1)\n self.pdrop = kwargs.get(\"pdrop\", 0.5)\n self.rnn_type = kwargs.get(\"rnn_type\", \"blstm\")\n self.x = None\n self.embed = LookupTableEmbeddings(name=f\"{self.name}/CharLUT\", finetune=self.finetune, **kwargs)\n self.lstm = BiLSTMEncoderHidden(\n self.embed.output_dim,\n self.lstmsz,\n self.lstm_layers,\n pdrop=self.pdrop,\n requires_length=True,\n name=f\"{self.name}/blstm\",\n )\n\n def encode(self, *x):\n self.x = x[0]\n shape = tf.shape(self.x)\n B = shape[0]\n T = shape[1]\n W = shape[2]\n flat_chars = tf.reshape(x, [-1, W])\n embed_chars = self.embed(flat_chars)\n\n # Calculate the lengths of each word\n word_lengths = tf.reduce_sum(tf.cast(tf.not_equal(flat_chars, Offsets.PAD), tf.int32), axis=1)\n\n # cuDNN throws an error if there is an input with a length of 0, this happens when the \"word\"\n # is actually a \"<PAD>\" so there are no characters to run the LSTM over. Here we just say\n # that the lengths is 1. This will make cudnn happy and we will just get junk in that spot\n patched_lengths = tf.math.maximum(word_lengths, 1)\n\n # Run the LSTM\n result = self.lstm((embed_chars, patched_lengths))\n\n # Create a mask that is true when the length is 0 (where the word was a pad) so that\n # we can mask out the junk that the lstm created because we needed a length of 1\n result = tf.multiply(result, tf.expand_dims(tf.cast(tf.not_equal(word_lengths, 0), tf.float32), -1))\n\n return tf.reshape(result, (B, T, self.lstmsz))\n\n def call(self, inputs):\n return self.encode(inputs)\n\n def get_dsz(self):\n return self.lstmsz\n\n def get_vsz(self):\n return self.embed.get_vsz()\n\n\nclass CharTransformerEmbeddings(TensorFlowEmbeddings):\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n trainable = kwargs.get(\"finetune\", trainable)\n super().__init__(trainable=trainable, name=name, dtype=dtype, **kwargs)\n self.scope = kwargs.get(\"scope\", \"CharLUT\")\n self.finetune = kwargs.get(\"finetune\", trainable)\n self.embed = LookupTableEmbeddings(name=f\"{self.name}/CharLUT\", finetune=self.finetune, **kwargs)\n self.d_model = kwargs.get(\"wsz\", 30)\n self.num_heads = kwargs.get(\"num_heads\", 3)\n self.rpr_k = kwargs.get(\"rpr_k\", 10)\n layers = kwargs.get(\"layers\", 1)\n pdrop = kwargs.get(\"pdrop\", 0.5)\n self.char_comp = TransformerEncoderStackWithLengths(\n self.num_heads,\n self.d_model,\n pdrop,\n False,\n layers,\n rpr_k=self.rpr_k,\n input_sz=self.embed.output_dim,\n name=f\"{self.name}/transformer\",\n )\n\n def encode(self, *x):\n self.x = x[0]\n shape = tf.shape(self.x)\n B = shape[0]\n T = shape[1]\n W = shape[2]\n flat_chars = tf.reshape(x, [-1, W])\n embed_chars = self.embed(flat_chars)\n\n # Calculate the lengths of each word\n lengths = tf.reduce_sum(tf.cast(tf.not_equal(flat_chars, Offsets.PAD), tf.int32), axis=1)\n\n # Run the LSTM\n result = self.char_comp((embed_chars, lengths))\n\n pooled = tf.reduce_max(result, -2, keepdims=False)\n\n return tf.reshape(pooled, (B, T, self.d_model))\n\n def call(self, inputs):\n return self.encode(inputs)\n\n def get_dsz(self):\n return self.d_model\n\n def get_vsz(self):\n return self.embed.get_vsz()\n\n\nclass PositionalMixin(tf.keras.layers.Layer):\n def positional(self, length):\n pass\n\n\nclass SinusoidalPositionalMixin(PositionalMixin):\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n super().__init__(trainable=trainable, name=name, dtype=dtype, **kwargs)\n\n max_timescale = kwargs.get(\"max_timescale\", 1.0e4)\n # Match the mxlen pytorch has because it precomputes the timing signal\n mxlen = kwargs.get('mxlen', 10000)\n\n word_dsz = self.get_dsz()\n log_timescale_increment = math.log(max_timescale) / float(word_dsz)\n inv_timescales = np.exp(np.arange(0, word_dsz, 2, dtype=np.float32) * -log_timescale_increment)\n\n pe = np.zeros((mxlen, word_dsz), dtype=np.float32)\n position = np.expand_dims(np.arange(0, mxlen, dtype=np.float32), 1)\n\n pe[:, 0::2] = np.sin(position * inv_timescales)\n pe[:, 1::2] = np.cos(position * inv_timescales)\n\n self.pe = tf.expand_dims(pe, 0)\n\n def positional(self, length):\n return self.pe[:, :length]\n\n\nclass LearnedPositionalMixin(PositionalMixin):\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n trainable = kwargs.get(\"finetune\", trainable)\n super().__init__(trainable=trainable, name=name, dtype=dtype, **kwargs)\n self.mxlen = int(kwargs.get(\"mxlen\", 512))\n self.pos_weights = kwargs.get(\"pos_weights\")\n if self.pos_weights is None:\n unif = float(kwargs.get(\"unif\", 0.1))\n self.pos_weights = np.random.uniform(-unif, unif, (self.mxlen, self.get_dsz()))\n\n def build(self, input_shape):\n self.pos = self.add_weight(\n name=\"pos\",\n initializer=tf.constant_initializer(self.pos_weights),\n shape=[self.mxlen, self.get_dsz()],\n trainable=self.finetune,\n )\n super().build(input_shape)\n\n def positional(self, length):\n return tf.expand_dims(tf.nn.embedding_lookup(self.pos, tf.range(length, dtype=tf.int32)), 0)\n\n\nclass PositionalLookupTableEmbeddings(SinusoidalPositionalMixin, LookupTableEmbeddings):\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n super().__init__(name=name, **kwargs)\n self.scale = math.sqrt(self.get_dsz())\n self.dropout = tf.keras.layers.Dropout(kwargs.get(\"dropout\", 0.0))\n\n def encode(self, *x):\n y = super().encode(*x) * tf.constant(self.scale)\n T = tf.shape(y)[1]\n pos = self.positional(T)\n return self.dropout(y + pos, training=TRAIN_FLAG())\n\n\nclass LearnedPositionalLookupTableEmbeddings(LearnedPositionalMixin, LookupTableEmbeddings):\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n super().__init__(name=name, **kwargs)\n self.dropout = tf.keras.layers.Dropout(kwargs.get(\"dropout\", 0.0))\n\n def encode(self, *x):\n y = super().encode(*x)\n T = tf.shape(y)[1]\n pos = self.positional(T)\n return self.dropout(y + pos, training=TRAIN_FLAG())\n\n\nclass LearnedPositionalLookupTableEmbeddingsWithBias(LearnedPositionalMixin, LookupTableEmbeddings):\n \"\"\"Learned positional lookup table embeddings wih a bias and layer norm\n\n This is just a typical learned positional embedding but with a learnable\n bias and a layer norm. This is equivalent to BERT embeddings when the\n token_type is not set\n\n \"\"\"\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n super().__init__(name=name, **kwargs)\n\n def build(self, input_shape):\n super().build(input_shape)\n self.bias = self.add_weight(\n name=\"bias\",\n initializer=tf.constant_initializer(0.0),\n shape=[1, self.get_dsz()],\n trainable=self.finetune,\n )\n\n def encode(self, *x):\n y = super().encode(*x)\n T = tf.shape(y)[1]\n pos = self.positional(T)\n y = y + pos + self.bias\n return y\n\n\nclass PositionalCharConvEmbeddings(SinusoidalPositionalMixin, CharConvEmbeddings):\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n super().__init__(name=name, **kwargs)\n self.scale = math.sqrt(self.get_dsz())\n self.dropout = tf.keras.layers.Dropout(kwargs.get(\"dropout\", 0.0))\n\n def encode(self, *x):\n y = super().encode(*x) * tf.constant(self.scale)\n T = tf.shape(y)[1]\n pos = self.positional(T)\n return self.dropout(y + pos, training=TRAIN_FLAG())\n\n\nclass LearnedPositionalCharConvEmbeddings(LearnedPositionalMixin, CharConvEmbeddings):\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n super().__init__(name=name, **kwargs)\n self.dropout = tf.keras.layers.Dropout(kwargs.get(\"dropout\", 0.0))\n\n def encode(self, *x):\n y = super().encode(*x)\n T = tf.shape(y)[1]\n pos = self.positional(T)\n return self.dropout(y + pos, training=TRAIN_FLAG())\n\n\nclass PositionalCharLSTMEmbeddings(SinusoidalPositionalMixin, CharLSTMEmbeddings):\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n trainable = kwargs.get(\"finetune\", trainable)\n super().__init__(trainable=trainable, name=name, dtype=dtype, **kwargs)\n self.scale = math.sqrt(self.get_dsz())\n self.dropout = tf.keras.layers.Dropout(kwargs.get(\"dropout\", 0.0))\n\n def encode(self, *x):\n y = super().encode(*x) * tf.constant(self.scale)\n T = tf.shape(y)[1]\n pos = self.positional(T)\n return self.dropout(y + pos, training=TRAIN_FLAG())\n\n\nclass LearnedPositionalCharLSTMEmbeddings(LearnedPositionalMixin, CharLSTMEmbeddings):\n def __init__(self, trainable=True, name=None, dtype=tf.float32, **kwargs):\n trainable = kwargs.get(\"finetune\", trainable)\n super().__init__(trainable=trainable, name=name, dtype=dtype, **kwargs)\n self.dropout = tf.keras.layers.Dropout(kwargs.get(\"dropout\", 0.0))\n\n def encode(self, *x):\n y = super().encode(*x)\n T = tf.shape(y)[1]\n pos = self.positional(T)\n return self.dropout(y + pos, training=TRAIN_FLAG())\n", "import argparse\nimport baseline.embeddings\nfrom eight_mile.confusion import ConfusionMatrix\nimport baseline\nfrom eight_mile.pytorch.optz import OptimizerManager, EagerOptimizer\nimport baseline.pytorch.embeddings\nimport eight_mile.pytorch.layers as L\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader, TensorDataset\nimport logging\nimport numpy as np\nimport time\nimport torch\n\n\ndef to_device(m):\n return m.cuda()\n\n\ndef to_host(o):\n return o.cpu().float().numpy()\n\n\nparser = argparse.ArgumentParser(description='Train a Layers model with PyTorch API')\nparser.add_argument('--model_type', help='What type of model to build', type=str, default='default')\nparser.add_argument('--poolsz', help='How many hidden units for pooling', type=int, default=100)\nparser.add_argument('--stacksz', help='How many hidden units for stacking', type=int, nargs='+')\nparser.add_argument('--name', help='(optional) signature name', type=str)\nparser.add_argument('--epochs', help='Number of epochs to train', type=int, default=2)\nparser.add_argument('--batchsz', help='Batch size', type=int, default=50)\nparser.add_argument('--filts', help='Parallel convolution filter widths (if default model)', type=int, default=[3, 4, 5], nargs='+')\nparser.add_argument('--mxlen', help='Maximum post length (number of words) during training', type=int, default=100)\nparser.add_argument('--train', help='Training file', default='../data/stsa.binary.phrases.train')\nparser.add_argument('--valid', help='Validation file', default='../data/stsa.binary.dev')\nparser.add_argument('--test', help='Testing file', default='../data/stsa.binary.test')\nparser.add_argument('--embeddings', help='Pretrained embeddings file', default='/data/embeddings/GoogleNews-vectors-negative300.bin')\nparser.add_argument('--ll', help='Log level', type=str, default='info')\nparser.add_argument('--lr', help='Learning rate', type=float, default=0.001)\nparser.add_argument(\"--device\", type=str,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device (cuda or cpu)\")\nargs = parser.parse_known_args()[0]\n\n\nfeature_desc = {\n 'word': {\n 'vectorizer': baseline.Token1DVectorizer(mxlen=100, transform_fn=baseline.lowercase),\n 'embed': {'file': args.embeddings, 'type': 'default', 'unif': 0.25}\n }\n}\n# Create a reader that is using our vectorizers to parse a TSV file\n# with rows like:\n# <label>\\t<sentence>\\n\n\nclass DictionaryDatasetWrapper(Dataset):\n def __init__(self, x, x_lengths, y):\n self.tensor_dataset = TensorDataset(x, x_lengths, y)\n\n def __getitem__(self, index):\n # stuff\n x, x_length, y = self.tensor_dataset[index]\n return {'word': x.to(args.device), \"lengths\": x_length.to(args.device)}, y.to(args.device)\n\n def __len__(self):\n return len(self.tensor_dataset)\n\n\nclass Data:\n\n def __init__(self, ts, batchsz):\n self.ds = self._to_tensors(ts)\n self.batchsz = batchsz\n\n def _to_tensors(self, ts):\n x = []\n x_lengths = []\n y = []\n for sample in ts:\n x.append(sample['word'].squeeze())\n x_lengths.append(sample['word_lengths'].squeeze())\n y.append(sample['y'].squeeze())\n return DictionaryDatasetWrapper(torch.tensor(np.stack(x), dtype=torch.long), torch.tensor(np.stack(x_lengths), dtype=torch.long), torch.tensor(np.stack(y), dtype=torch.long))\n\n def get_input(self, training=False):\n return DataLoader(self.ds, batch_size=self.batchsz, shuffle=training)\n\n\nvectorizers = {k: v['vectorizer'] for k, v in feature_desc.items()}\nreader = baseline.TSVSeqLabelReader(vectorizers, clean_fn=baseline.TSVSeqLabelReader.do_clean)\n\ntrain_file = args.train\nvalid_file = args.valid\ntest_file = args.test\n\n\n# This builds a set of counters\nvocabs, labels = reader.build_vocab([train_file,\n valid_file,\n test_file])\n\n# This builds a set of embeddings objects, these are typically not DL-specific\n# but if they happen to be addons, they can be\nembeddings = dict()\nfor k, v in feature_desc.items():\n embed_config = v['embed']\n embeddings_for_k = baseline.embeddings.load_embeddings('word', embed_file=embed_config['file'], known_vocab=vocabs[k],\n embed_type=embed_config.get('type', 'default'),\n unif=embed_config.get('unif', 0.), use_mmap=True)\n\n embeddings[k] = embeddings_for_k['embeddings']\n # Reset the vocab to the embeddings one\n vocabs[k] = embeddings_for_k['vocab']\n\n\ntrain_set = Data(reader.load(train_file, vocabs=vocabs, batchsz=1), args.batchsz)\nvalid_set = Data(reader.load(valid_file, vocabs=vocabs, batchsz=1), args.batchsz)\ntest_set = Data(reader.load(test_file, vocabs=vocabs, batchsz=1), args.batchsz)\n\nstacksz = len(args.filts) * args.poolsz\nnum_epochs = 2\n\nmodel = to_device(\n L.EmbedPoolStackModel(2, L.EmbeddingsStack(embeddings), L.WithoutLength(L.ParallelConv(300, args.poolsz, args.filts)), L.Highway(stacksz))\n)\n\n\ndef loss(model, x, y):\n y_ = model(x)\n l = F.nll_loss(y_, y)\n return l\n\n\noptimizer = EagerOptimizer(loss, optim=\"adam\", lr=0.001)\n\nfor epoch in range(num_epochs):\n loss_acc = 0.\n step = 0\n start = time.time()\n for x, y in train_set.get_input(training=True):\n loss_value = optimizer.update(model, x, y)\n loss_acc += loss_value\n step += 1\n print('training time {}'.format(time.time() - start))\n mean_loss = loss_acc / step\n print('Training Loss {}'.format(mean_loss))\n cm = ConfusionMatrix(['0', '1'])\n for x, y in valid_set.get_input():\n with torch.no_grad():\n y_ = np.argmax(to_host(model(x)), axis=1)\n cm.add_batch(y, y_)\n print(cm)\n print(cm.get_all_metrics())\n\nprint('FINAL')\ncm = ConfusionMatrix(['0', '1'])\nwith torch.no_grad():\n for x, y in test_set.get_input():\n y_ = np.argmax(to_host(model(x)), axis=1)\n cm.add_batch(y, y_)\n\nprint(cm)\nprint(cm.get_all_metrics())\n", "import argparse\nimport baseline\nimport baseline.embeddings\nimport baseline.pytorch.embeddings\nfrom collections import Counter\nfrom eight_mile.confusion import ConfusionMatrix\nfrom eight_mile.pytorch.optz import EagerOptimizer\nimport eight_mile.pytorch.layers as L\nimport nlp as nlp_datasets\nimport numpy as np\nimport os\nimport time\nimport torch\nimport torch.nn.functional as F\n\n\ndef to_device(d):\n if isinstance(d, dict):\n return {k: v.cuda() for k, v in d.items() if k != 'id'}\n return d.cuda()\n\n\ndef to_host(o):\n return o.cpu().float().numpy()\n\n\ndef create_vocabs(datasets, vectorizers):\n vocabs = {k: Counter() for k in vectorizers.keys()}\n for dataset in datasets:\n for k, v in vectorizers.items():\n vocabs[k] = Counter()\n for example in dataset:\n vocabs[k] += v.count(example['sentence'].split())\n return vocabs\n\n\ndef create_featurizer(vectorizers, vocabs, primary_key='word'):\n def convert_to_features(batch):\n\n features = {k: [] for k in vectorizers.keys()}\n\n features['lengths'] = []\n features['id'] = batch['idx']\n features['y'] = batch['label']\n\n for i, text in enumerate(batch['sentence']):\n for k, v in vectorizers.items():\n vec, lengths = v.run(text.split(), vocabs[k])\n if k == primary_key:\n features['lengths'].append(lengths)\n features[k].append(vec.tolist())\n\n return features\n return convert_to_features\n\n\nparser = argparse.ArgumentParser(description='Train a Layers model with PyTorch API')\nparser.add_argument('--model_type', help='What type of model to build', type=str, default='default')\nparser.add_argument('--poolsz', help='How many hidden units for pooling', type=int, default=100)\nparser.add_argument('--dsz', help='Embeddings dimension size', type=int, default=300)\nparser.add_argument('--stacksz', help='How many hidden units for stacking', type=int, nargs='+')\nparser.add_argument('--name', help='(optional) signature name', type=str)\nparser.add_argument('--epochs', help='Number of epochs to train', type=int, default=2)\nparser.add_argument('--batchsz', help='Batch size', type=int, default=50)\nparser.add_argument('--filts', help='Parallel convolution filter widths (if default model)', type=int, default=[3, 4, 5], nargs='+')\nparser.add_argument('--mxlen', help='Maximum post length (number of words) during training', type=int, default=100)\nparser.add_argument('--dataset', help='HuggingFace Datasets id', default=['glue', 'sst2'], nargs='+')\nparser.add_argument('--embeddings', help='Pretrained embeddings file', default='https://www.dropbox.com/s/699kgut7hdb5tg9/GoogleNews-vectors-negative300.bin.gz?dl=1')\nparser.add_argument('--ll', help='Log level', type=str, default='info')\nparser.add_argument('--lr', help='Learning rate', type=float, default=0.001)\nparser.add_argument('--blcache', help='Cache for embeddings', default=os.path.expanduser('~/.bl-data'))\nparser.add_argument('--output', type=str, help='Write a glue-style file, e.g. [SST-2.tsv]')\nparser.add_argument('--device', type=str,\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device (cuda or cpu)\")\nargs = parser.parse_known_args()[0]\n\nembeddings_file = baseline.EmbeddingDownloader(args.embeddings, embedding_dsz=args.dsz, embedding_sha1=None, data_download_cache=args.blcache).download()\n\nfeature_desc = {\n 'word': {\n 'vectorizer': baseline.Token1DVectorizer(mxlen=100, transform_fn=baseline.lowercase),\n 'embed': {'file': embeddings_file, 'type': 'default', 'unif': 0.25, 'dsz': args.dsz}\n }\n}\n\nvectorizers = {k: v['vectorizer'] for k, v in feature_desc.items()}\n\ndataset = nlp_datasets.load_dataset(*args.dataset)\nvocabs = create_vocabs(dataset.values(), vectorizers)\n\n# This builds a set of embeddings objects, these are typically not DL-specific\n# but if they happen to be addons, they can be\nembeddings = dict()\nfor k, v in feature_desc.items():\n embed_config = v['embed']\n embeddings_for_k = baseline.embeddings.load_embeddings('word', embed_file=embed_config['file'], known_vocab=vocabs[k],\n embed_type=embed_config.get('type', 'default'),\n unif=embed_config.get('unif', 0.), use_mmap=True)\n\n embeddings[k] = embeddings_for_k['embeddings']\n # Reset the vocab to the embeddings one\n vocabs[k] = embeddings_for_k['vocab']\n\n\ntrain_set = dataset['train']\nvalid_set = dataset['validation']\ntest_set = dataset['test']\n\nconvert_to_features = create_featurizer(vectorizers, vocabs)\ntrain_set = train_set.map(convert_to_features, batched=True)\ntrain_set.set_format(type='torch', columns=list(vectorizers.keys()) + ['y', 'lengths'])\ntrain_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batchsz)\n\nvalid_set = valid_set.map(convert_to_features, batched=True)\nvalid_set.set_format(type='torch', columns=list(vectorizers.keys()) + ['y', 'lengths'])\nvalid_loader = torch.utils.data.DataLoader(valid_set, batch_size=args.batchsz)\n\ntest_set = test_set.map(convert_to_features, batched=True)\ntest_set.set_format(type='torch', columns=list(vectorizers.keys()) + ['y', 'id', 'lengths'])\ntest_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batchsz)\n\n\nstacksz = len(args.filts) * args.poolsz\n\n\nmodel = to_device(\n L.EmbedPoolStackModel(2, L.EmbeddingsStack(embeddings), L.WithoutLength(L.ParallelConv(args.dsz, args.poolsz, args.filts)), L.Highway(stacksz))\n)\n\n\ndef loss(model, x, y):\n y_ = model(x)\n l = F.nll_loss(y_, y)\n return l\n\n\noptimizer = EagerOptimizer(loss, optim=\"adam\", lr=0.001)\n\nfor epoch in range(args.epochs):\n loss_acc = 0.\n step = 0\n start = time.time()\n model.train()\n for x in train_loader:\n x = to_device(x)\n y = x.pop('y')\n loss_value = optimizer.update(model, x, y)\n loss_acc += loss_value\n step += 1\n print('training time {}'.format(time.time() - start))\n mean_loss = loss_acc / step\n print('Training Loss {}'.format(mean_loss))\n cm = ConfusionMatrix(['0', '1'])\n model.eval()\n for x in valid_loader:\n x = to_device(x)\n with torch.no_grad():\n y = x.pop('y')\n y_ = np.argmax(to_host(model(x)), axis=1)\n cm.add_batch(y, y_)\n print(cm)\n print(cm.get_all_metrics())\n\n\nif args.output:\n model.eval()\n print(f'Writing GLUE-style output file {args.output}')\n with open(args.output, 'w') as wf:\n wf.write('id\\tlabel\\n')\n with torch.no_grad():\n for x in test_loader:\n ids = x['id']\n x = to_device(x)\n y = x.pop('y')\n ys = np.argmax(to_host(model(x)), axis=1)\n for id, y in zip(ids, ys):\n wf.write(f'{id.item()}\\t{y.item()}\\n')\n\n\n" ]
[ [ "tensorflow.compat.v1.data.get_output_types", "tensorflow.reduce_max", "tensorflow.compat.v1.data.get_output_shapes" ], [ "tensorflow.device", "tensorflow.control_dependencies", "tensorflow.zeros", "tensorflow.keras.Sequential", "numpy.arange", "numpy.sin", "numpy.zeros", "tensorflow.shape", "tensorflow.nn.embedding_lookup", "tensorflow.not_equal", "tensorflow.reduce_max", "tensorflow.constant", "tensorflow.range", "tensorflow.reshape", "numpy.cos", "tensorflow.expand_dims", "tensorflow.constant_initializer", "numpy.random.uniform", "tensorflow.math.maximum" ], [ "torch.nn.functional.nll_loss", "torch.utils.data.TensorDataset", "torch.utils.data.DataLoader", "numpy.stack", "torch.no_grad", "torch.cuda.is_available" ], [ "torch.no_grad", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.nn.functional.nll_loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
preller/morpheus
[ "ba10271c6ace5aff3b35509ab5fbf42bcd6750b6" ]
[ "morpheus/classifier.py" ]
[ "# MIT License\n# Copyright 2018 Ryan Hausen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n# ==============================================================================\n\"\"\"An interface for interacting with Morpheus\"\"\"\nimport os\nimport time\nimport json\nfrom subprocess import Popen\nfrom typing import Iterable, List, Tuple, Callable, Dict, Union\n\nimport imageio\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom astropy.io import fits\nfrom matplotlib.colors import hsv_to_rgb\nfrom scipy import ndimage as ndi\nfrom skimage.feature import peak_local_max\nfrom skimage.filters import sobel\nfrom skimage.measure import regionprops\nfrom skimage.morphology import watershed\nfrom tqdm import tqdm\n\ntf.disable_eager_execution()\n\nimport morpheus.core.helpers as helpers\nimport morpheus.core.model as model\n\n\nclass Classifier:\n \"\"\"The primary interface for the use of Morpheus.\n\n Images can be classified by calling\n :py:meth:`~morpheus.classifier.Classifier.classify` and passing\n numpy arrays or string FITS file locations.\n\n After an image this this class offers some post processing functionality by\n generating segmentation maps using\n :py:meth:`~morpheus.classifier.Classifier.segmap_from_classified`, colorized\n morphological classifications using\n :py:meth:`~morpheus.classifier.Classifier.colorize_classification`, and\n generating catalogs using\n :py:meth:`~morpheus.classifier.Classifier.catalog_from_classified`.\n\n For more examples, see the `documentation <https://morpheus-astro.readthedocs.io/>`_.\n \"\"\"\n\n __graph = None\n __session = None\n __X = tf.placeholder(tf.float32, shape=[None, 40, 40, 4])\n\n @staticmethod\n def classify(\n h: Union[np.ndarray, str] = None,\n j: Union[np.ndarray, str] = None,\n z: Union[np.ndarray, str] = None,\n v: Union[np.ndarray, str] = None,\n out_dir: str = None,\n batch_size: int = 1000,\n out_type: str = \"rank_vote\",\n gpus: List[int] = None,\n cpus: int = None,\n parallel_check_interval: float = 1,\n ) -> dict:\n \"\"\"Generates per-pixel classifications from input images.\n\n Args:\n h (Union[np.ndarray, str]): The H band image or the path to it\n j (Union[np.ndarray, str]): The J band image or the path to it\n v (Union[np.ndarray, str]): The V band image or the path to it\n z (Union[np.ndarray, str]): The Z band image or the path to it\n out_dir (str): If provided, a directory to save the output to\n batch_size (int): The size of the batches to use when classifying the input\n out_type (str): The method by which to aggregate classifications\n for a single pixel. Can be one of \"rank_vote\",\n \"mean_var\", or \"both\"\n gpus (List[int]): The GPU ids to use for parallel classification\n the ids can be found using ``nvidia-smi``\n cpus (int): The number of cpus to use for parallel classification.\n parallel_check_interval (float): If running a parallel job, how often\n to check on the running sub-processes\n in minutes.\n\n Returns:\n Dictionary containing the classification output for the given input\n\n Raises:\n ValueError if both gpus and cpus are given\n ValueError if mixed string and numpy arrays are given for h, j, v, z\n ValueError if h, j, v, or z are None\n \"\"\"\n\n Classifier._variables_not_none([\"h\", \"j\", \"v\", \"z\"], [h, j, v, z])\n are_files = Classifier._valid_input_types_is_str(h, j, v, z)\n workers, is_gpu = Classifier._validate_parallel_params(gpus, cpus)\n\n if are_files:\n hduls, [h, j, v, z] = Classifier._parse_files(h, j, v, z)\n\n if out_dir is None:\n out_dir = \".\"\n else:\n hduls = []\n\n if len(workers) == 1:\n classified = Classifier._classify_arrays(\n h=h,\n j=j,\n v=v,\n z=z,\n out_type=out_type,\n out_dir=out_dir,\n batch_size=batch_size,\n )\n else:\n if out_dir is None:\n out_dir = \".\"\n Classifier._build_parallel_classification_structure(\n [h, j, v, z], workers, batch_size, out_dir, out_type\n )\n Classifier._run_parallel_jobs(\n workers, is_gpu, out_dir, parallel_check_interval\n )\n Classifier._stitch_parallel_classifications(workers, out_dir, out_type)\n\n classification_hduls, classified = Classifier._retrieve_classifications(\n out_dir, out_type\n )\n\n hduls.extend(classification_hduls)\n\n for hdul in hduls:\n hdul.close()\n\n return classified\n\n @staticmethod\n def catalog_from_classified(\n classified: dict,\n flux: np.ndarray,\n segmap: np.ndarray,\n aggregation_scheme: Callable = None,\n out_file: str = None,\n ) -> List[Dict]:\n \"\"\"Creates a catalog of sources and their morphologies.\n\n Args:\n classified (dict): A dictionary containing the output from morpheus.\n flux (np.ndarray): The corresponding flux image in H band\n segmap (np.ndarray): A labeled segmap where every pixel with a\n value > 0 is associated with a source.\n aggregation_scheme (func): Function that takes three arguments `classified`,\n `flux`, and `segmap`, same as this\n function, then returns a numpy array\n containing the morphological classification\n in the following order-spheroid, disk,\n irregular, and point source/compact. If\n None, then the flux weighting scheme\n in\n out_file (str): a location to save the catalog. Can either be .csv\n or .json. Anything else will raise a ValueError.\n\n\n Returns:\n A JSON-compatible list of dictionary objects with the following keys:\n {\n 'id': the id from the segmap\n 'location': a (y,x) location -- the max pixel within the segmap\n 'morphology': a dictionary containing the morphology values.\n }\n \"\"\"\n\n if out_file:\n if out_file.endswith((\".csv\", \".json\")):\n is_csv = out_file.endswith(\".csv\")\n else:\n raise ValueError(\"out_file must end with .csv or .json\")\n\n if aggregation_scheme is None:\n aggregation_scheme = Classifier.aggregation_scheme_flux_weighted\n\n catalog = []\n\n for region in regionprops(segmap, flux):\n _id = region.label\n\n if _id < 1:\n continue\n\n img = region.intensity_image\n seg = region.filled_image\n\n start_y, start_x, end_y, end_x = region.bbox\n dat = {}\n for k in classified:\n dat[k] = classified[k][start_y:end_y, start_x:end_x].copy()\n\n classification = aggregation_scheme(dat, img, seg)\n\n masked_flux = img * seg\n\n # https://stackoverflow.com/a/3584260\n y, x = np.unravel_index(masked_flux.argmax(), masked_flux.shape)\n y, x = int(start_y + y), int(start_x + x)\n\n catalog.append(\n {\"id\": _id, \"location\": [y, x], \"morphology\": classification}\n )\n\n if out_file:\n with open(out_file, \"w\") as f:\n if is_csv:\n f.write(\"source_id,y,x,sph,dsk,irr,ps\\n\")\n\n for c in catalog:\n csv = \"{},{},{},{},{},{},{}\\n\"\n f.write(\n csv.format(\n c[\"id\"],\n c[\"location\"][0],\n c[\"location\"][1],\n c[\"morphology\"][0],\n c[\"morphology\"][1],\n c[\"morphology\"][2],\n c[\"morphology\"][3],\n )\n )\n else:\n json.dump(catalog, f)\n\n return catalog\n\n # TODO: make the output file with the FITS helper if the output dir is used.\n @staticmethod\n def segmap_from_classified(\n classified: dict,\n flux: np.ndarray,\n bkg_src_threshold: float = 0.0,\n out_dir: str = None,\n min_distance: int = 20,\n mask: np.ndarray = None,\n deblend: bool = True,\n ) -> np.ndarray:\n \"\"\"Generate a segmentation map from the classification output.\n\n For more information about the segmentation process, see:\n https://arxiv.org/abs/1906.11248\n\n Args:\n data (dict): A dictionary containing the output from morpheus.\n flux (np.ndarray): The flux to use when making the segmap\n bkg_src_threshold (float): The max value that a background\n classification pixel can take and be\n considered a source. The default is 0.\n Should be between [0,1]\n out_dir (str): A path to save the segmap in.\n min_distance (int): The minimum distance for deblending\n mask (np.ndarry): A boolean mask indicating which pixels\n deblend (bool): If ``True``, perform deblending as described in 2.\n in the algorithm description. If ``False`` return\n segmap without deblending.\n\n Returns:\n A np.ndarray segmentation map\n \"\"\"\n if bkg_src_threshold < 0 or bkg_src_threshold >= 1:\n err_msg = [\n \"Invalid value for `bkg_src_threshold`, use a value in the \",\n \"range [0, 1)\",\n ]\n\n raise ValueError(err_msg)\n\n bkg = classified[\"background\"]\n markers = np.zeros_like(flux, dtype=np.uint8)\n\n print(\"Building Markers...\")\n if mask is None:\n mask = classified[\"n\"] > 0\n\n is_bkg = np.logical_and(bkg == 1, mask)\n is_src = np.logical_and(bkg <= bkg_src_threshold, mask)\n\n markers[is_bkg] = 1\n markers[is_src] = 2\n\n sobel_img = sobel(bkg)\n\n print(\"Watershedding...\")\n segmented = watershed(sobel_img, markers, mask=mask) - 1\n segmented[np.logical_not(mask)] = 0\n\n labeled, _ = ndi.label(segmented)\n\n labeled[np.logical_not(mask)] = -1\n\n if deblend:\n labeled = Classifier._deblend(labeled, flux, min_distance)\n\n if out_dir:\n fits.PrimaryHDU(data=labeled).writeto(os.path.join(out_dir, \"segmap.fits\"))\n\n return labeled\n\n @staticmethod\n def colorize_classified(\n classified: dict, out_dir: str = None, hide_unclassified: bool = True\n ) -> np.ndarray:\n \"\"\"Makes a color image from the classification output.\n\n The colorization scheme is defined in HSV and is as follows:\n\n * Spheroid = Red\n * Disk = Blue\n * Irregular = Green\n * Point Source = Yellow\n\n The hue is set to be the color associated with the highest ranked class\n for a given pixel. The saturation is set to be the difference between the\n highest ranked class and the second highest ranked class for a given\n pixel. For example, if the top two classes have nearly equal values given\n by the classifier, then the saturation will be low and the pixel will\n appear more white. If the top two classes have very different\n values, then the saturation will be high and the pixel's color will be\n vibrant and not white. The value for a pixel is set to be 1-bkg, where\n bkg is value given to the background class. If the background class has\n a high value, then the pixel will appear more black. If the background\n value is low, then the pixel will take on the color given by the hue and\n saturation values.\n\n Args:\n data (dict): A dictionary containing the output from Morpheus.\n out_dir (str): a path to save the image in.\n hide_unclassified (bool): If true, black out the edges of the image\n that are unclassified. If false, show the\n borders as white.\n\n Returns:\n A [width, height, 3] array representing the RGB image.\n \"\"\"\n red = 0.0 # spheroid\n blue = 0.7 # disk\n yellow = 0.18 # point source\n green = 0.3 # irregular\n\n shape = classified[\"n\"].shape\n\n colors = np.array([red, blue, green, yellow])\n morphs = np.dstack(\n [classified[i] for i in helpers.LabelHelper.MORPHOLOGIES[:-1]]\n )\n ordered = np.argsort(-morphs, axis=-1)\n\n hues = np.zeros(shape)\n sats = np.zeros(shape)\n vals = 1 - classified[\"background\"]\n\n # the classifier doesn't return values for this area so black it out\n if hide_unclassified:\n vals[0:5, :] = 0\n vals[-5:, :] = 0\n vals[:, 0:5] = 0\n vals[:, -5:] = 0\n\n for i in tqdm(range(shape[0])):\n for j in range(shape[1]):\n hues[i, j] = colors[ordered[i, j, 0]]\n sats[i, j] = (\n morphs[i, j, ordered[i, j, 0]] - morphs[i, j, ordered[i, j, 1]]\n )\n\n hsv = np.dstack([hues, sats, vals])\n rgb = hsv_to_rgb(hsv)\n\n if out_dir:\n png = (rgb * 255).astype(np.uint8)\n imageio.imwrite(os.path.join(out_dir, \"colorized.png\"), png)\n\n return rgb\n\n @staticmethod\n def _retrieve_classifications(\n out_dir: str, out_type: str\n ) -> Tuple[List[fits.HDUList], dict]:\n\n f_names = []\n for morph in helpers.LabelHelper.MORPHOLOGIES:\n if out_type in [\"mean_var\", \"both\"]:\n f_names.extend(\n [\n os.path.join(out_dir, f\"{morph}_mean.fits\"),\n os.path.join(out_dir, f\"{morph}_var.fits\"),\n ]\n )\n if out_type in [\"rank_vote\", \"both\"]:\n f_names.append(os.path.join(out_dir, f\"{morph}.fits\"))\n\n f_names.append(os.path.join(out_dir, \"n.fits\"))\n\n hduls, arrs = helpers.FitsHelper.get_files(f_names)\n\n classified = {\n os.path.split(n)[1].replace(\".fits\", \"\"): a for n, a in zip(f_names, arrs)\n }\n\n return hduls, classified\n\n @staticmethod\n def _valid_input_types_is_str(\n h: Union[np.ndarray, str] = None,\n j: Union[np.ndarray, str] = None,\n z: Union[np.ndarray, str] = None,\n v: Union[np.ndarray, str] = None,\n ):\n in_types = {type(val) for val in [h, j, z, v]}\n\n if len(in_types) > 1:\n raise ValueError(\n \"Mixed input type usuage. Ensure all are numpy arrays or strings.\"\n )\n\n t = in_types.pop()\n\n if t in [np.ndarray, str]:\n return t == str\n else:\n raise ValueError(\"Input type must either be numpy array or string\")\n\n # NEW API ==================================================================\n\n @staticmethod\n def _classify_arrays(\n h: np.ndarray = None,\n j: np.ndarray = None,\n z: np.ndarray = None,\n v: np.ndarray = None,\n out_dir: str = None,\n batch_size: int = 1000,\n out_type: str = \"rank_vote\",\n ) -> Dict:\n \"\"\"Classify numpy arrays using Morpheus.\n\n Args:\n h (np.ndarray): the H band values for an image\n j (np.ndarray): the J band values for an image\n z (np.ndarray): the Z band values for an image\n v (np.ndarray): the V band values for an image\n out_dir (str): The location where to save the output files\n if None returns the output in memory only.\n batch_size (int): the number of image sections blackto process at a time\n out_type (str): how to process the output from Morpheus. If\n 'mean_var' record output using mean and variance, If\n 'rank_vote' record output as the normalized vote\n count. If 'both' record both outputs.\n\n Returns:\n A dictionary containing the output classifications.\n\n Raises:\n ValueError if out_type is not one of ['mean_var', 'rank_vote', 'both']\n \"\"\"\n Classifier._variables_not_none([\"h\", \"j\", \"z\", \"v\"], [h, j, z, v])\n Classifier._arrays_same_size([h, j, z, v])\n\n if out_type not in [\"mean_var\", \"rank_vote\", \"both\"]:\n raise ValueError(\"Invalid value for `out_type`\")\n\n mean_var = out_type in [\"mean_var\", \"both\"]\n rank_vote = out_type in [\"rank_vote\", \"both\"]\n\n shape = h.shape\n\n hduls = []\n data = {}\n if out_dir:\n if mean_var:\n hs, ds = helpers.FitsHelper.create_mean_var_files(shape, out_dir)\n hduls.extend(hs)\n data.update(ds)\n if rank_vote:\n hs, ds = helpers.FitsHelper.create_rank_vote_files(shape, out_dir)\n hduls.extend(hs)\n data.update(ds)\n\n hs, ds = helpers.FitsHelper.create_n_file(shape, out_dir)\n hduls.extend(hs)\n data.update(ds)\n else:\n if mean_var:\n data.update(helpers.LabelHelper.make_mean_var_arrays(shape))\n if rank_vote:\n data.update(helpers.LabelHelper.make_rank_vote_arrays(shape))\n\n data.update(helpers.LabelHelper.make_n_array(shape))\n\n indicies = helpers.LabelHelper.windowed_index_generator(*shape)\n\n window_y, window_x = helpers.LabelHelper.UPDATE_MASK_N.shape\n batch_estimate = shape[0] - window_y + 1\n batch_estimate *= shape[1] - window_x + 1\n batch_estimate = batch_estimate // batch_size\n pbar = tqdm(total=batch_estimate, desc=\"classifying\", unit=\"batch\")\n\n while True:\n batch = []\n batch_idx = []\n\n for _ in range(batch_size):\n try:\n y, x = next(indicies)\n except StopIteration:\n break\n\n combined = np.array(\n [img[y : y + window_y, x : x + window_x] for img in [h, j, v, z]]\n )\n batch.append(Classifier._standardize_img(combined))\n batch_idx.append((y, x))\n\n if not batch:\n break\n\n batch = np.array(batch)\n\n labels = Classifier._call_morpheus(batch)\n\n helpers.LabelHelper.update_labels(data, labels, batch_idx, out_type)\n\n pbar.update()\n\n if rank_vote:\n helpers.LabelHelper.finalize_rank_vote(data)\n\n for hdul in hduls:\n hdul.close()\n\n return data\n\n @staticmethod\n def _standardize_img(img: np.ndarray) -> np.ndarray:\n \"\"\"Standardizes an input img to mean 0 and unit variance.\n\n Uses the formula described in:\n\n https://www.tensorflow.org/api_docs/python/tf/image/per_image_standardization\n\n Args:\n img (np.ndarray): the input array to standardize\n\n Returns:\n The standardized input\n \"\"\"\n num = img - img.mean()\n denom = max(img.std(), 1 / np.sqrt(np.prod(img.shape)))\n return num / denom\n\n @staticmethod\n def _arrays_same_size(arrays: List[np.ndarray]) -> None:\n \"\"\"Verifies that all arrays are the same shape.\n\n Args:\n arrays (List[np.ndarray]): List of arrays that should have the same\n shape.\n\n Returns:\n None\n\n Raises:\n ValueError if arrays are not the same shape\n \"\"\"\n\n arr_shapes = [a.shape for a in arrays]\n\n arr_comp = arr_shapes[0]\n arr_to_comp = arr_shapes[1:]\n\n if not np.array_equiv(arr_comp, arr_to_comp):\n raise ValueError(f\"All shapes not the same: {arr_shapes}.\")\n\n @staticmethod\n def _variables_not_none(names: List[str], values: List[np.ndarray]) -> None:\n \"\"\"Verifies that all variables are not None.\n\n Args:\n names (List[str]): list of names of variables in the same order as\n `values`\n names (List[np.ndarray]): list of numpy arrays that should not be\n None\n\n Returns:\n None\n\n Raises:\n ValueError if a variable is None\n\n \"\"\"\n\n nones = []\n for name, value in zip(names, values):\n if value is None:\n nones.append(name)\n\n if nones:\n raise ValueError(\"{} should not be None\".format(nones))\n\n @staticmethod\n def _parse_files(\n h: str, j: str, v: str, z: str\n ) -> Tuple[List[fits.HDUList], List[np.ndarray]]:\n \"\"\"Validates that files exist. And returns the corresponding arrays.\n\n Args:\n h (str): the file location of the H band img\n j (str): the file location of the J band img\n v (str): the file location of the V band img\n z (str): the file location of the Z bnad img\n\n Returns:\n A tuple containing the a (List[HDUL], List[np.ndarray])\n\n Raises:\n ValueError if a variable is None\n\n \"\"\"\n Classifier._variables_not_none([\"h\", \"j\", \"z\", \"v\"], [h, j, z, v])\n\n return helpers.FitsHelper.get_files([h, j, v, z])\n\n @staticmethod\n def _call_morpheus(batch: np.ndarray) -> np.ndarray:\n \"\"\"Use morpheus to classify a batch of input values.\n\n Morpheus is called as a singleton using this method.\n\n Args:\n batch (np.ndarray): The input data in the shape\n [batch, channels, width, height]\n\n Returns:\n The classified numpy array with shape [batch, width, height, channels]\n\n \"\"\"\n batch = np.transpose(batch, axes=[0, 2, 3, 1])\n\n if Classifier.__graph is None:\n config = model.Morpheus.inference_hparams()\n inference_dataset = model.Morpheus.mock_dataset()\n\n # build graph\n m = model.Morpheus(config, inference_dataset, \"channels_last\")\n Classifier.__graph = m.inference(Classifier.__X)\n\n # get weights\n saver = tf.train.Saver()\n Classifier.__session = tf.Session()\n w_location = model.Morpheus.get_weights_dir()\n saver.restore(Classifier.__session, tf.train.latest_checkpoint(w_location))\n\n return Classifier.__session.run(\n Classifier.__graph, feed_dict={Classifier.__X: batch}\n )\n\n @staticmethod\n def _get_split_length(shape: List[int], num_workers: int) -> int:\n \"\"\"Calculate the size of the sub images for classification.\n\n Args:\n shape (List[int]): the shape of the array to be split\n num_workers (int): the number of splits to make\n\n Returns:\n The length of each split along axis 0\n\n TODO: Implement splits along other axes\n \"\"\"\n\n return (shape[0] + (num_workers - 1) * 40) // num_workers\n\n @staticmethod\n def _get_split_slice_generator(\n shape: Tuple[int], num_workers: int, slice_length: int\n ) -> Iterable[slice]:\n \"\"\"Creates a generator that yields `slice` objects to split imgs.\n\n Args:\n shape (Tuple[int]): The shape of the array to be split\n num_workers (int): The number of splits to make\n split_length (int): The length each slice should be\n\n Returns\n A generator that yields slice objects\n\n TODO: Implement splits along other axes\n \"\"\"\n\n idx = 0\n for i in range(num_workers):\n start_idx = max(idx - 39, 0)\n\n if i == num_workers - 1:\n end_idx = shape[0]\n else:\n end_idx = start_idx + slice_length - 1\n\n idx = end_idx\n\n yield slice(start_idx, end_idx)\n\n @staticmethod\n def _make_runnable_file(\n path: str, batch_size: int = 1000, out_type: str = \"rank_vote\"\n ) -> None:\n \"\"\"Creates a file at `path` that classfies local FITS files.\n\n Args:\n path (str): The dir to save the file in\n batch_size (int): The batch size for Morpheus to use when classifying\n the input\n out_type (str): how to process the output from Morpheus. If\n 'mean_var' record output using mean and variance, If\n 'rank_vote' record output as the normalized vote\n count. If 'both' record both outputs.\n\n Returns:\n None\n \"\"\"\n\n local = os.path.dirname(os.path.dirname(__file__))\n text = [\n \"import sys\",\n f'sys.path.append(\"{local}\")',\n \"import os\",\n \"import numpy as np\",\n \"from tqdm import tqdm\",\n \"from morpheus.classifier import Classifier\",\n \"def main():\",\n \" data_dir = '.'\",\n \" output_dir = './output'\",\n \" if 'output' not in os.listdir():\",\n \" os.mkdir('./output')\",\n \" files = {\",\n \" 'h':os.path.join(data_dir, 'h.fits'),\",\n \" 'j':os.path.join(data_dir, 'j.fits'),\",\n \" 'v':os.path.join(data_dir, 'v.fits'),\",\n \" 'z':os.path.join(data_dir, 'z.fits')\",\n \" }\",\n \" Classifier.classify(h=files['h'],\",\n \" j=files['j'],\",\n \" v=files['v'],\",\n \" z=files['z'],\",\n f\" batch_size={batch_size},\",\n f' out_type=\"{out_type}\",',\n \" out_dir=output_dir)\",\n \" sys.exit(0)\",\n \"if __name__=='__main__':\",\n \" main()\",\n ]\n\n with open(os.path.join(path, \"main.py\"), \"w\") as f:\n f.write(\"\\n\".join(text))\n\n @staticmethod\n def _build_parallel_classification_structure(\n arrs: List[np.ndarray],\n workers: List[int],\n batch_size: int,\n out_dir: str,\n out_type: str,\n ) -> None:\n \"\"\"Sets up the subdirs and files to run the parallel classification.\n\n Args:\n arrs (List[np.ndarray]): List of arrays to split up in the order HJVZ\n workers (List[int]): A list of worker ID's that can either be CUDA GPU\n ID's or a list dummy numbers for cpu workers\n batch_size (int): The batch size for Morpheus to use when classifying\n the input.\n out_dir (str): the location to place the subdirs in\n\n Returns:\n None\n \"\"\"\n\n shape = arrs[0].shape\n num_workers = len(workers)\n split_slices = Classifier._get_split_slice_generator(\n shape, num_workers, Classifier._get_split_length(shape, num_workers)\n )\n\n for worker, split_slice in tqdm(zip(sorted(workers), split_slices)):\n sub_output_dir = os.path.join(out_dir, str(worker))\n os.mkdir(sub_output_dir)\n\n for name, data in zip([\"h\", \"j\", \"v\", \"z\"], arrs):\n tmp_location = os.path.join(sub_output_dir, \"{}.fits\".format(name))\n fits.PrimaryHDU(data=data[split_slice, :]).writeto(tmp_location)\n\n Classifier._make_runnable_file(sub_output_dir, batch_size, out_type)\n\n @staticmethod\n def _stitch_parallel_classifications(\n workers: List[int], out_dir: str, out_type: str\n ) -> None:\n \"\"\"Stitch the seperate outputs made from the parallel classifications.\n\n Args:\n workers (List[int]): A list of worker ID's that can either be CUDA GPU\n ID's or a list dummy numbers for cpu workers\n out_dir (str): the location that contains the parallel classified\n subdirs\n out_type (str): how to process the output from Morpheus. If\n 'mean_var' record output using mean and variance, If\n 'rank_vote' record output as the normalized vote\n count. If 'both' record both outputs.\n\n Returns:\n None\n \"\"\"\n jobs = []\n if out_type in [\"mean_var\", \"both\"]:\n jobs.append(\"mean_var\")\n if out_type in [\"rank_vote\", \"both\"]:\n jobs.append(\"rank_vote\")\n\n for morph in helpers.LabelHelper.MORPHOLOGIES:\n for job in jobs:\n if job == \"mean_var\":\n to_be_stitched = []\n for worker_id in workers: # each worker was assinged a dir by id\n dir_list = [out_dir, str(worker_id), \"output\"]\n f_mean = os.path.join(*(dir_list + [f\"{morph}_mean.fits\"]))\n f_var = os.path.join(*(dir_list + [f\"{morph}_var.fits\"]))\n f_n = os.path.join(*(dir_list + [\"n.fits\"]))\n\n to_be_stitched.append(\n (\n fits.getdata(f_mean),\n fits.getdata(f_var),\n fits.getdata(f_n),\n )\n )\n\n new_y = sum(t[0].shape[0] for t in to_be_stitched)\n new_y -= 39 * (len(to_be_stitched) - 1)\n\n new_x = to_be_stitched[0][0].shape[1]\n\n combined_mean = np.zeros(shape=[new_y, new_x], dtype=np.float32)\n combined_var = np.zeros(shape=[new_y, new_x], dtype=np.float32)\n combined_n = np.zeros(shape=[new_y, new_x], dtype=np.float32)\n\n start_y = 0\n for new_mean, new_var, new_n in to_be_stitched:\n Classifier._merge_parallel_means_vars(\n combined_mean,\n combined_var,\n combined_n,\n new_mean,\n new_var,\n new_n,\n start_y,\n )\n\n start_y += new_n.shape[0] - 39\n\n to_write = [\n (combined_mean, f\"{morph}_mean.fits\"),\n (combined_var, f\"{morph}_var.fits\"),\n (combined_n, \"n.fits\"),\n ]\n\n for f, n in to_write:\n fits.PrimaryHDU(data=f).writeto(\n os.path.join(out_dir, n), overwrite=True\n )\n\n if job == \"rank_vote\":\n to_be_stitched = []\n for worker_id in workers: # each worker was assinged a dir by id\n dir_list = [out_dir, str(worker_id), \"output\"]\n f_votes = os.path.join(*(dir_list + [f\"{morph}.fits\"]))\n f_n = os.path.join(*(dir_list + [\"n.fits\"]))\n\n to_be_stitched.append(\n (fits.getdata(f_votes), fits.getdata(f_n))\n )\n\n new_y = sum(t[0].shape[0] for t in to_be_stitched)\n new_y -= 39 * (len(to_be_stitched) - 1)\n\n new_x = to_be_stitched[0][0].shape[1]\n\n combined_votes = np.zeros(shape=[new_y, new_x], dtype=np.float32)\n combined_n = np.zeros(shape=[new_y, new_x], dtype=np.float32)\n\n start_y = 0\n for new_votes, new_n in to_be_stitched:\n Classifier._merge_parallel_rank_votes(\n combined_votes, combined_n, new_votes, new_n, start_y\n )\n\n start_y += new_n.shape[0] - 39\n\n to_write = [\n (combined_votes, f\"{morph}.fits\"),\n (combined_n, \"n.fits\"),\n ]\n\n for f, n in to_write:\n fits.PrimaryHDU(data=f).writeto(\n os.path.join(out_dir, n), overwrite=True\n )\n\n @staticmethod\n def _merge_parallel_means_vars(\n total_mean: np.ndarray,\n total_var: np.ndarray,\n total_n: np.ndarray,\n new_mean: np.ndarray,\n new_var: np.ndarray,\n new_n: np.ndarray,\n y_idx: int,\n ) -> None:\n \"\"\"Merge merge means/vars from a new piece to total.\n\n Derived from:\n https://www.emathzone.com/tutorials/basic-statistics/combined-variance.html\n\n Args:\n total (np.ndarray): The array of means to add ``new`` to\n total_n (np.ndarray): The array of counts to add ``new_n`` to\n new (np.ndarray): the new means to add to ``total``\n new_n (np.ndarray): the new counts to add to ``total``\n y_idx (int): index for placement of ``new`` into ``total`` along y axis\n\n Returns:\n None\n \"\"\"\n ys = slice(y_idx, y_idx + new_mean.shape[0])\n\n x1, x2 = total_mean[ys, :].copy(), new_mean.copy()\n s1, s2 = total_var[ys, :].copy(), new_var.copy()\n n1, n2 = total_n[ys, :].copy(), new_n.copy()\n\n denominator = n1 + n2\n\n xc_numerator = n1 * x1 + n2 * x2\n xc = np.where(denominator > 0, xc_numerator / denominator, 0)\n\n sc_numerator = (n1 * (s1 + np.square(x1 - xc))) + (\n n2 * (s2 + np.square(x2 - xc))\n )\n sc = np.where(denominator > 0, sc_numerator / denominator, 0)\n\n total_mean[ys, :] = xc\n total_var[ys, :] = sc\n total_n[ys, :] = denominator\n\n @staticmethod\n def _merge_parallel_rank_votes(\n total_votes: np.ndarray,\n total_n: np.ndarray,\n new_votes: np.ndarray,\n new_n: np.ndarray,\n y_idx: int,\n ) -> None:\n \"\"\"Merge vote counts from a new piece to total\n\n Args:\n total_count (np.ndarray): The array of votes to add ``new`` to\n total_n (np.ndarray): The array of counts to add ``new_n`` to\n new_votes (np.ndarray): The array of votes to add to ``total``\n new_n (np.ndarray): The array of counts to add to ``new``\n y_idx (int): index for placement pf ``new`` into ``total`` along y axis\n\n Returns:\n None\n \"\"\"\n ys = slice(y_idx, y_idx + new_votes.shape[0])\n\n x1, x2 = total_votes[ys, :].copy(), new_votes.copy()\n n1, n2 = total_n[ys, :].copy(), new_n.copy()\n\n numerator = (n1 * x1) + (n2 * x2)\n denominator = n1 + n2\n mean = np.where(denominator > 0, numerator / denominator, 0)\n\n total_votes[ys, :] = mean\n total_n[ys, :] = denominator\n\n # TODO: Add an informative output.\n @staticmethod\n def _run_parallel_jobs(\n workers: List[int], is_gpu: bool, out_dir: str, parallel_check_interval: float\n ) -> None:\n \"\"\"Starts and tracks parallel job runs.\n\n WARNING: This will not finish running until all subprocesses are complete\n\n Args:\n workers (List[int]): A list of worker ID's to assign to a portion of an\n image.\n is_gpu (bool): if True the worker ID's belong to NVIDIA GPUs and will\n be used as an argument in CUDA_VISIBLE_DEVICES. If False,\n then the ID's are assocaited with CPU workers\n out_dir (str): the location with the partitioned data\n parallel_check_interval (float): If gpus are given, then this is the number\n of minutes to wait between polling each\n subprocess for completetion\n\n Returns:\n None\n \"\"\"\n\n processes = {}\n\n for worker in workers:\n if is_gpu:\n cmd_string = f\"CUDA_VISIBLE_DEVICES={worker} python main.py\"\n else:\n cmd_string = f\"CUDA_VISIBLE_DEVICES=-1 python main.py\"\n\n sub_dir = os.path.join(out_dir, str(worker))\n processes[worker] = Popen(cmd_string, shell=True, cwd=sub_dir)\n\n is_running = np.ones([len(workers)], dtype=np.bool)\n while is_running.any():\n for i, g in enumerate(sorted(workers)):\n if is_running[i] and (processes[g].poll() is not None):\n is_running[i] = False\n\n if is_running.any():\n time.sleep(parallel_check_interval * 60)\n else: # we're done we can skip sleep\n break\n\n @staticmethod\n def _validate_parallel_params(\n gpus: List[int] = None, cpus: int = None\n ) -> Tuple[List[int], bool]:\n \"\"\"Validates that the parallelism scheme.\n\n Only one of the arguments should be given.\n\n Args:\n gpus (List[int]): A list of the CUDA gpu ID's to use for a\n parallel classification.\n cpus (int): Number of cpus to use foa a parallel classification\n\n Returns:\n A tuple containing the list of worker ids and a boolean indicating\n wheter or not the ids belong to GPUS\n\n Raises:\n ValueError if both cpus and gpus are not None\n \"\"\"\n\n # invalid params\n if (gpus is not None) and (cpus is not None):\n raise ValueError(\"Please only give a value cpus or gpus, not both.\")\n\n # Simple serial run\n if (gpus is None) and (cpus is None):\n return [0], False\n\n if gpus is not None:\n if len(gpus) == 1:\n err = \"Only one gpus indicated. If you are trying to select \"\n err += \"a single gpu, then use the CUDA_VISIBLE_DEVICES environment \"\n err += \"variable. For more information visit: \"\n err += \"https://devblogs.nvidia.com/cuda-pro-tip-control-gpu-visibility-cuda_visible_devices/\"\n\n raise ValueError(err)\n else:\n return gpus, True\n else:\n if cpus < 2:\n raise ValueError(\n \"If passing cpus please indicate a value greater than 1.\"\n )\n\n return np.arange(cpus), False\n\n @staticmethod\n def _deblend(segmap: np.ndarray, flux: np.ndarray, min_distance: int) -> np.ndarray:\n \"\"\"Deblends a segmentation map according to the description in make_segmap.\n\n Args:\n segmap (np.ndarray): The segmentation map image to deblend\n flux (np.ndarray): The corresponding flux image in H band\n min_distance (int): The radius of the PSF for the instrument used on H band\n\n Returns:\n A np.ndarray representing the deblended segmap\n \"\"\"\n\n max_id = segmap.max()\n\n for region in tqdm(regionprops(segmap, flux), desc=\"Deblending\"):\n\n # greater than 1 indicates that the region is not background\n if region.label > 0:\n flx = region.intensity_image\n seg = region.filled_image\n flux_map = flx * seg\n\n maxes = peak_local_max(\n flux_map, min_distance=min_distance, num_peaks=20\n )\n\n # more than 1 source found, deblend\n if maxes.shape[0] > 1:\n start_y, start_x, end_y, end_x = region.bbox\n markers = np.zeros_like(seg, dtype=np.int)\n\n for y, x in maxes:\n max_id += 1\n markers[y, x] = max_id\n\n deblended = watershed(-flux_map, markers, mask=seg)\n\n local_segmap = segmap[start_y:end_y, start_x:end_x].copy()\n local_segmap = np.where(seg, deblended, local_segmap)\n segmap[start_y:end_y, start_x:end_x] = local_segmap\n\n return segmap\n\n @staticmethod\n def aggregation_scheme_flux_weighted(\n data: dict, flux: np.ndarray, segmap: np.ndarray\n ) -> List[float]:\n \"\"\"Aggregates pixel level morphological classifications to the source level.\n\n Uses a flux-weighted mean of the pixel level morphologies to calculate\n the aggregate source level morphology.\n\n Args:\n data (dict): A dictionary containing the output from morpheus.\n flux (np.ndarray): The corresponding flux image in H band\n segmap (int): The binary map indicating pixels that belong to the\n source\n\n Returns:\n The morphological classification as a list of floats in the\n following order: ['spheroid', 'disk', 'irregular', 'point source']\n \"\"\"\n classifications = np.zeros([4])\n\n morphs = [\"spheroid\", \"disk\", \"irregular\", \"point_source\"]\n\n morphs = [data[m] for m in morphs]\n\n for i, m in enumerate(morphs):\n classifications[i] = np.mean(m[segmap] * flux[segmap])\n\n return (classifications / classifications.sum()).tolist()\n" ]
[ [ "matplotlib.colors.hsv_to_rgb", "numpy.zeros_like", "numpy.mean", "tensorflow.compat.v1.train.Saver", "numpy.where", "numpy.square", "numpy.arange", "numpy.zeros", "numpy.logical_not", "scipy.ndimage.label", "numpy.transpose", "tensorflow.compat.v1.disable_eager_execution", "numpy.argsort", "tensorflow.compat.v1.train.latest_checkpoint", "numpy.array_equiv", "numpy.array", "numpy.logical_and", "numpy.dstack", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.placeholder", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
KDD2022-MSCMT/MSCMT
[ "6a3e1e6230aa519a57345f6dbb0731b3ed6fe1ce" ]
[ "object_detector/mrcnn/config.py" ]
[ "\n\nimport math\nimport numpy as np\n\n\n# Base Configuration Class\n# Don't use this class directly. Instead, sub-class it and override\n# the configurations you need to change.\n\nclass Config(object):\n \"\"\"Base configuration class. For custom configurations, create a\n sub-class that inherits from this one and override properties\n that need to be changed.\n \"\"\"\n # Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.\n # Useful if your code needs to do things differently depending on which\n # experiment is running.\n NAME = None # Override in sub-classes\n\n # NUMBER OF GPUs to use. For CPU training, use 1\n GPU_COUNT = 1\n\n # Number of images to train with on each GPU. A 12GB GPU can typically\n # handle 2 images of 1024x1024px.\n # Adjust based on your GPU memory and image sizes. Use the highest\n # number that your GPU can handle for best performance.\n IMAGES_PER_GPU = 2\n\n # Number of training steps per epoch\n # This doesn't need to match the size of the training set. Tensorboard\n # updates are saved at the end of each epoch, so setting this to a\n # smaller number means getting more frequent TensorBoard updates.\n # Validation stats are also calculated at each epoch end and they\n # might take a while, so don't set this too small to avoid spending\n # a lot of time on validation stats.\n STEPS_PER_EPOCH = 1000\n\n # Number of validation steps to run at the end of every training epoch.\n # A bigger number improves accuracy of validation stats, but slows\n # down the training.\n VALIDATION_STEPS = 50\n\n # Backbone network architecture\n # Supported values are: resnet50, resnet101\n BACKBONE = \"resnet101\"\n\n # The strides of each layer of the FPN Pyramid. These values\n # are based on a Resnet101 backbone.\n BACKBONE_STRIDES = [4, 8, 16, 32, 64]\n\n # Number of classification classes (including background)\n NUM_CLASSES = 1 # Override in sub-classes\n\n # Length of square anchor side in pixels\n RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512)\n\n # Ratios of anchors at each cell (width/height)\n # A value of 1 represents a square anchor, and 0.5 is a wide anchor\n RPN_ANCHOR_RATIOS = [0.5, 1, 2]\n\n # Anchor stride\n # If 1 then anchors are created for each cell in the backbone feature map.\n # If 2, then anchors are created for every other cell, and so on.\n RPN_ANCHOR_STRIDE = 1\n\n # Non-max suppression threshold to filter RPN proposals.\n # You can increase this during training to generate more propsals.\n RPN_NMS_THRESHOLD = 0.7\n\n # How many anchors per image to use for RPN training\n RPN_TRAIN_ANCHORS_PER_IMAGE = 256\n\n # ROIs kept after non-maximum supression (training and inference)\n POST_NMS_ROIS_TRAINING = 2000\n POST_NMS_ROIS_INFERENCE = 1000\n\n # If enabled, resizes instance masks to a smaller size to reduce\n # memory load. Recommended when using high-resolution images.\n USE_MINI_MASK = True\n MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask\n\n # Input image resizing\n # Generally, use the \"square\" resizing mode for training and inferencing\n # and it should work well in most cases. In this mode, images are scaled\n # up such that the small side is = IMAGE_MIN_DIM, but ensuring that the\n # scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is\n # padded with zeros to make it a square so multiple images can be put\n # in one batch.\n # Available resizing modes:\n # none: No resizing or padding. Return the image unchanged.\n # square: Resize and pad with zeros to get a square image\n # of size [max_dim, max_dim].\n # pad64: Pads width and height with zeros to make them multiples of 64.\n # If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales\n # up before padding. IMAGE_MAX_DIM is ignored in this mode.\n # The multiple of 64 is needed to ensure smooth scaling of feature\n # maps up and down the 6 levels of the FPN pyramid (2**6=64).\n # crop: Picks random crops from the image. First, scales the image based\n # on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of\n # size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only.\n # IMAGE_MAX_DIM is not used in this mode.\n IMAGE_RESIZE_MODE = \"square\"\n IMAGE_MIN_DIM = 800\n IMAGE_MAX_DIM = 1024\n # Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further\n # up scaling. For example, if set to 2 then images are scaled up to double\n # the width and height, or more, even if MIN_IMAGE_DIM doesn't require it.\n # Howver, in 'square' mode, it can be overruled by IMAGE_MAX_DIM.\n IMAGE_MIN_SCALE = 0\n\n # Image mean (RGB)\n MEAN_PIXEL = np.array([123.7, 116.8, 103.9])\n\n # Number of ROIs per image to feed to classifier/mask heads\n # The Mask RCNN paper uses 512 but often the RPN doesn't generate\n # enough positive proposals to fill this and keep a positive:negative\n # ratio of 1:3. You can increase the number of proposals by adjusting\n # the RPN NMS threshold.\n TRAIN_ROIS_PER_IMAGE = 200\n\n # Percent of positive ROIs used to train classifier/mask heads\n ROI_POSITIVE_RATIO = 0.33\n\n # Pooled ROIs\n POOL_SIZE = 7\n MASK_POOL_SIZE = 14\n\n # Shape of output mask\n # To change this you also need to change the neural network mask branch\n MASK_SHAPE = [28, 28]\n\n # Maximum number of ground truth instances to use in one image\n MAX_GT_INSTANCES = 100\n\n # Bounding box refinement standard deviation for RPN and final detections.\n RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])\n BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])\n\n # Max number of final detections\n DETECTION_MAX_INSTANCES = 100\n\n # Minimum probability value to accept a detected instance\n # ROIs below this threshold are skipped\n DETECTION_MIN_CONFIDENCE = 0.7\n\n # Non-maximum suppression threshold for detection\n DETECTION_NMS_THRESHOLD = 0.3\n\n # Learning rate and momentum\n # The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes\n # weights to explode. Likely due to differences in optimzer\n # implementation.\n LEARNING_RATE = 0.001\n LEARNING_MOMENTUM = 0.9\n\n # Weight decay regularization\n WEIGHT_DECAY = 0.0001\n\n # Loss weights for more precise optimization.\n # Can be used for R-CNN training setup.\n LOSS_WEIGHTS = {\n \"rpn_class_loss\": 1.,\n \"rpn_bbox_loss\": 1.,\n \"mrcnn_class_loss\": 1.,\n \"mrcnn_bbox_loss\": 1.,\n \"mrcnn_mask_loss\": 1.\n }\n\n # Use RPN ROIs or externally generated ROIs for training\n # Keep this True for most situations. Set to False if you want to train\n # the head branches on ROI generated by code rather than the ROIs from\n # the RPN. For example, to debug the classifier head without having to\n # train the RPN.\n USE_RPN_ROIS = True\n\n # Train or freeze batch normalization layers\n # None: Train BN layers. This is the normal mode\n # False: Freeze BN layers. Good when using a small batch size\n # True: (don't use). Set layer in training mode even when inferencing\n TRAIN_BN = False # Defaulting to False since batch size is often small\n\n # Gradient norm clipping\n GRADIENT_CLIP_NORM = 5.0\n\n def __init__(self):\n \"\"\"Set values of computed attributes.\"\"\"\n # Effective batch size\n self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT\n\n # Input image size\n if self.IMAGE_RESIZE_MODE == \"crop\":\n self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, 3])\n else:\n self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, 3])\n\n # Image meta data length\n # See compose_image_meta() for details\n self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES\n\n def display(self):\n \"\"\"Display Configuration values.\"\"\"\n print(\"\\nConfigurations:\")\n for a in dir(self):\n if not a.startswith(\"__\") and not callable(getattr(self, a)):\n print(\"{:30} {}\".format(a, getattr(self, a)))\n print(\"\\n\")\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
janEbert/pySDC
[ "167d78c4118bc3a5a446ec973fe65fb35db94471", "167d78c4118bc3a5a446ec973fe65fb35db94471" ]
[ "pySDC/playgrounds/Boris/spiraling_particle_ProblemClass.py", "pySDC/projects/FastWaveSlowWave/AcousticAdvection_1D_FD_imex_multiscale.py" ]
[ "\nimport numpy as np\n\nfrom pySDC.core.Problem import ptype\nfrom pySDC.implementations.datatype_classes.particles import particles, fields, acceleration\n\n\nclass planewave_single(ptype):\n \"\"\"\n Example implementing a single particle spiraling in a trap\n \"\"\"\n\n def __init__(self, cparams, dtype_u=particles, dtype_f=fields):\n \"\"\"\n Initialization routine\n\n Args:\n cparams: custom parameters for the example\n dtype_u: particle data type (will be passed parent class)\n dtype_f: fields data type (will be passed parent class)\n \"\"\"\n\n # these parameters will be used later, so assert their existence\n assert 'delta' in cparams # polarization\n assert 'a0' in cparams # normalized amplitude\n assert 'u0' in cparams # initial position and velocity\n\n # add parameters as attributes for further reference\n for k, v in cparams.items():\n setattr(self, k, v)\n\n # set nparts to one (lonely particle, you know)\n self.nparts = 1\n # invoke super init, passing nparts, dtype_u and dtype_f\n super(planewave_single, self).__init__(self.nparts, dtype_u, dtype_f, cparams)\n\n def eval_f(self, part, t):\n \"\"\"\n Routine to compute the electric and magnetic fields\n\n Args:\n t: current time\n part: the current particle\n Returns:\n E and B field for the particle (external only)\n \"\"\"\n\n f = self.dtype_f((3, self.nparts))\n\n R = np.linalg.norm(part.pos.values[:, 0], 2)\n f.elec.values[0, 0] = self.params.a0 / (R ** 3) * part.pos.values[0, 0]\n f.elec.values[1, 0] = self.params.a0 / (R ** 3) * part.pos.values[1, 0]\n f.elec.values[2, 0] = 0\n\n f.magn.values[0, 0] = 0\n f.magn.values[1, 0] = 0\n f.magn.values[2, 0] = R\n\n return f\n\n def u_init(self):\n \"\"\"\n Initialization routine for the single particle\n\n Returns:\n particle type\n \"\"\"\n\n u0 = self.params.u0\n # some abbreviations\n u = self.dtype_u((3, 1))\n\n u.pos.values[0, 0] = u0[0][0]\n u.pos.values[1, 0] = u0[0][1]\n u.pos.values[2, 0] = u0[0][2]\n\n u.vel.values[0, 0] = u0[1][0]\n u.vel.values[1, 0] = u0[1][1]\n u.vel.values[2, 0] = u0[1][2]\n\n u.q[:] = u0[2][0]\n u.m[:] = u0[3][0]\n\n return u\n\n def build_f(self, f, part, t):\n \"\"\"\n Helper function to assemble the correct right-hand side out of B and E field\n\n Args:\n f: wannabe right-hand side, actually the E field\n part: particle data\n t: current time\n Returns:\n correct RHS of type acceleration\n \"\"\"\n\n assert isinstance(part, particles)\n rhs = acceleration((3, self.nparts))\n rhs.values[:, 0] = part.q[:] / part.m[:] * \\\n (f.elec.values[:, 0] + np.cross(part.vel.values[:, 0], f.magn.values[:, 0]))\n\n return rhs\n\n def boris_solver(self, c, dt, old_fields, new_fields, old_parts):\n \"\"\"\n The actual Boris solver for static (!) B fields, extended by the c-term\n\n Args:\n c: the c term gathering the known values from the previous iteration\n dt: the (probably scaled) time step size\n old_fields: the field values at the previous node m\n new_fields: the field values at the current node m+1\n old_parts: the particles at the previous node m\n Returns:\n the velocities at the (m+1)th node\n \"\"\"\n\n N = self.nparts\n vel = particles.velocity((3, 1))\n\n Emean = 1.0 / 2.0 * (old_fields.elec + new_fields.elec)\n\n for n in range(N):\n a = old_parts.q[n] / old_parts.m[n]\n\n c.values[:, n] += dt / 2 * a * \\\n np.cross(old_parts.vel.values[:, n], old_fields.magn.values[:, n] - new_fields.magn.values[:, n])\n\n # pre-velocity, separated by the electric forces (and the c term)\n vm = old_parts.vel.values[:, n] + dt / 2 * a * Emean.values[:, n] + c.values[:, n] / 2\n # rotation\n t = dt / 2 * a * new_fields.magn.values[:, n]\n s = 2 * t / (1 + np.linalg.norm(t, 2) ** 2)\n vp = vm + np.cross(vm + np.cross(vm, t), s)\n # post-velocity\n vel.values[:, n] = vp + dt / 2 * a * Emean.values[:, n] + c.values[:, n] / 2\n\n return vel\n", "import numpy as np\n\nfrom pySDC.implementations.problem_classes.AcousticAdvection_1D_FD_imex import acoustic_1d_imex\n\n\n# noinspection PyUnusedLocal\nclass acoustic_1d_imex_multiscale(acoustic_1d_imex):\n \"\"\"\n Example implementing the one-dimensional IMEX acoustic-advection with multiscale initial values\n \"\"\"\n\n def u_exact(self, t):\n \"\"\"\n Routine to compute the exact solution at time t\n\n Args:\n t (float): current time\n\n Returns:\n dtype_u: exact solution\n \"\"\"\n\n sigma_0 = 0.1\n k = 7.0 * 2.0 * np.pi\n x_0 = 0.75\n x_1 = 0.25\n\n ms = 1.0\n\n me = self.dtype_u(self.init)\n me.values[0, :] = np.exp(-np.square(self.mesh - x_0 - self.params.cs * t) / (sigma_0 * sigma_0)) + \\\n ms * np.exp(-np.square(self.mesh - x_1 - self.params.cs * t) / (sigma_0 * sigma_0)) * \\\n np.cos(k * (self.mesh - self.params.cs * t) / sigma_0)\n me.values[1, :] = me.values[0, :]\n\n return me\n" ]
[ [ "numpy.linalg.norm", "numpy.cross" ], [ "numpy.square", "numpy.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sausagecy/Relation-Shape-CNN
[ "a91ac768cd720773359dda9b3e234815f88d88b4" ]
[ "utils/pointnet2_modules.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport utils.pointnet2_utils as pointnet2_utils\nimport utils.pytorch_utils as pt_utils\nfrom typing import List\nimport numpy as np\nimport time\nimport math\n\nclass _PointnetSAModuleBase(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.npoint = None\n self.groupers = None\n self.mlps = None\n\n def forward(self, xyz: torch.Tensor,\n features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):\n r\"\"\"\n Parameters\n ----------\n xyz : torch.Tensor\n (B, N, 3) tensor of the xyz coordinates of the points\n features : torch.Tensor\n (B, C, N) tensor of the descriptors of the the points\n\n Returns\n -------\n new_xyz : torch.Tensor\n (B, npoint, 3) tensor of the new points' xyz\n new_features : torch.Tensor\n (B, npoint, \\sum_k(mlps[k][-1])) tensor of the new_points descriptors\n \"\"\"\n\n new_features_list = []\n xyz_flipped = xyz.transpose(1, 2).contiguous()\n if self.npoint is not None:\n fps_idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint) # (B, npoint)\n new_xyz = pointnet2_utils.gather_operation(xyz_flipped, fps_idx).transpose(1, 2).contiguous()\n fps_idx = fps_idx.data\n else:\n new_xyz = None\n fps_idx = None\n \n for i in range(len(self.groupers)):\n new_features = self.groupers[i](xyz, new_xyz, features, fps_idx) if self.npoint is not None else self.groupers[i](xyz, new_xyz, features) # (B, C, npoint, nsample)\n new_features = self.mlps[i](\n new_features\n ) # (B, mlp[-1], npoint)\n\n new_features_list.append(new_features)\n \n return new_xyz, torch.cat(new_features_list, dim=1)\n\n\nclass PointnetSAModuleMSG(_PointnetSAModuleBase):\n r\"\"\"Pointnet set abstrction layer with multiscale grouping\n\n Parameters\n ----------\n npoint : int\n Number of points\n radii : list of float32\n list of radii to group with\n nsamples : list of int32\n Number of samples in each ball query\n mlps : list of list of int32\n Spec of the pointnet before the global max_pool for each scale\n bn : bool\n Use batchnorm\n \"\"\"\n\n def __init__(\n self,\n *,\n npoint: int,\n radii: List[float],\n nsamples: List[int],\n mlps: List[List[int]],\n use_xyz: bool = True,\n bias = True,\n init = nn.init.kaiming_normal_,\n first_layer = False,\n relation_prior = 1\n ):\n super().__init__()\n assert len(radii) == len(nsamples) == len(mlps)\n self.npoint = npoint\n self.groupers = nn.ModuleList()\n self.mlps = nn.ModuleList()\n \n # initialize shared mapping functions\n C_in = (mlps[0][0] + 3) if use_xyz else mlps[0][0]\n C_out = mlps[0][1]\n\n \n if relation_prior == 0:\n in_channels = 1\n elif relation_prior == 1 or relation_prior == 2:\n in_channels = 10\n else:\n assert False, \"relation_prior can only be 0, 1, 2.\"\n \n if first_layer:\n mapping_func1 = nn.Conv2d(in_channels = in_channels, out_channels = math.floor(C_out / 2), kernel_size = (1, 1), \n stride = (1, 1), bias = bias)\n mapping_func2 = nn.Conv2d(in_channels = math.floor(C_out / 2), out_channels = 16, kernel_size = (1, 1), \n stride = (1, 1), bias = bias)\n xyz_raising = nn.Conv2d(in_channels = C_in, out_channels = 16, kernel_size = (1, 1), \n stride = (1, 1), bias = bias)\n init(xyz_raising.weight)\n if bias:\n nn.init.constant_(xyz_raising.bias, 0)\n elif npoint is not None:\n mapping_func1 = nn.Conv2d(in_channels = in_channels, out_channels = math.floor(C_out / 4), kernel_size = (1, 1), \n stride = (1, 1), bias = bias)\n mapping_func2 = nn.Conv2d(in_channels = math.floor(C_out / 4), out_channels = C_in, kernel_size = (1, 1), \n stride = (1, 1), bias = bias)\n if npoint is not None:\n init(mapping_func1.weight)\n init(mapping_func2.weight)\n if bias:\n nn.init.constant_(mapping_func1.bias, 0)\n nn.init.constant_(mapping_func2.bias, 0) \n \n # channel raising mapping\n cr_mapping = nn.Conv1d(in_channels = C_in if not first_layer else 16, out_channels = C_out, kernel_size = 1, \n stride = 1, bias = bias)\n init(cr_mapping.weight)\n nn.init.constant_(cr_mapping.bias, 0)\n \n if first_layer:\n mapping = [mapping_func1, mapping_func2, cr_mapping, xyz_raising]\n elif npoint is not None:\n mapping = [mapping_func1, mapping_func2, cr_mapping]\n \n for i in range(len(radii)):\n radius = radii[i]\n nsample = nsamples[i]\n self.groupers.append(\n pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)\n if npoint is not None else pointnet2_utils.GroupAll(use_xyz)\n )\n mlp_spec = mlps[i]\n if use_xyz:\n mlp_spec[0] += 3\n if npoint is not None:\n self.mlps.append(pt_utils.SharedRSConv(mlp_spec, mapping = mapping, relation_prior = relation_prior, first_layer = first_layer))\n else: # global convolutional pooling\n self.mlps.append(pt_utils.GloAvgConv(C_in = C_in, C_out = C_out))\n\n\nclass PointnetSAModule(PointnetSAModuleMSG):\n r\"\"\"Pointnet set abstrction layer\n\n Parameters\n ----------\n npoint : int\n Number of features\n radius : float\n Radius of ball\n nsample : int\n Number of samples in the ball query\n mlp : list\n Spec of the pointnet before the global max_pool\n bn : bool\n Use batchnorm\n \"\"\"\n\n def __init__(\n self,\n *,\n mlp: List[int],\n npoint: int = None,\n radius: float = None,\n nsample: int = None,\n use_xyz: bool = True,\n ):\n super().__init__(\n mlps=[mlp],\n npoint=npoint,\n radii=[radius],\n nsamples=[nsample],\n use_xyz=use_xyz\n )\n\n\nclass PointnetFPModule(nn.Module):\n r\"\"\"Propigates the features of one set to another\n\n Parameters\n ----------\n mlp : list\n Pointnet module parameters\n bn : bool\n Use batchnorm\n \"\"\"\n\n def __init__(self, *, mlp: List[int], bn: bool = True):\n super().__init__()\n self.mlp = pt_utils.SharedMLP(mlp, bn=bn)\n\n def forward(\n self, unknown: torch.Tensor, known: torch.Tensor,\n unknow_feats: torch.Tensor, known_feats: torch.Tensor\n ) -> torch.Tensor:\n r\"\"\"\n Parameters\n ----------\n unknown : torch.Tensor\n (B, n, 3) tensor of the xyz positions of the unknown features\n known : torch.Tensor\n (B, m, 3) tensor of the xyz positions of the known features\n unknow_feats : torch.Tensor\n (B, C1, n) tensor of the features to be propigated to\n known_feats : torch.Tensor\n (B, C2, m) tensor of features to be propigated\n\n Returns\n -------\n new_features : torch.Tensor\n (B, mlp[-1], n) tensor of the features of the unknown features\n \"\"\"\n\n dist, idx = pointnet2_utils.three_nn(unknown, known)\n dist_recip = 1.0 / (dist + 1e-8)\n norm = torch.sum(dist_recip, dim=2, keepdim=True)\n weight = dist_recip / norm\n\n interpolated_feats = pointnet2_utils.three_interpolate(\n known_feats, idx, weight\n )\n if unknow_feats is not None:\n new_features = torch.cat([interpolated_feats, unknow_feats],\n dim=1) #(B, C2 + C1, n)\n else:\n new_features = interpolated_feats\n \n new_features = new_features.unsqueeze(-1)\n new_features = self.mlp(new_features)\n\n return new_features.squeeze(-1)\n \n\nif __name__ == \"__main__\":\n from torch.autograd import Variable\n torch.manual_seed(1)\n torch.cuda.manual_seed_all(1)\n xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True)\n xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True)\n\n test_module = PointnetSAModuleMSG(\n npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]]\n )\n test_module.cuda()\n print(test_module(xyz, xyz_feats))\n\n # test_module = PointnetFPModule(mlp=[6, 6])\n # test_module.cuda()\n # from torch.autograd import gradcheck\n # inputs = (xyz, xyz, None, xyz_feats)\n # test = gradcheck(test_module, inputs, eps=1e-6, atol=1e-4)\n # print(test)\n\n for _ in range(1):\n _, new_features = test_module(xyz, xyz_feats)\n new_features.backward(\n torch.cuda.FloatTensor(*new_features.size()).fill_(1)\n )\n print(new_features)\n print(xyz.grad)\n" ]
[ [ "torch.cat", "torch.nn.init.constant_", "torch.manual_seed", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.sum", "torch.randn", "torch.cuda.manual_seed_all", "torch.nn.Conv1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chuanglaipv/realtime_object_detection_xavier
[ "694c91db953a2e18440fcdcf5f38df90ba063bfc" ]
[ "lib/detection_nms_v1.py" ]
[ "import numpy as np\nfrom tf_utils import visualization_utils_cv2 as vis_util\nfrom lib.session_worker import SessionWorker\nfrom lib.load_graph_nms_v1 import LoadFrozenGraph\nfrom lib.load_label_map import LoadLabelMap\nfrom lib.mpvariable import MPVariable\nfrom lib.mpvisualizeworker import MPVisualizeWorker, visualization\nfrom lib.mpio import start_sender\n\nimport time\nimport cv2\nimport tensorflow as tf\nimport os\n\nimport sys\nPY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\nif PY2:\n import Queue\nelif PY3:\n import queue as Queue\n\n\nclass NMSV1():\n def __init__(self):\n return\n\n def start(self, cfg):\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n GET CONFIG\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n FORCE_GPU_COMPATIBLE = cfg['force_gpu_compatible']\n SAVE_TO_FILE = cfg['save_to_file']\n VISUALIZE = cfg['visualize']\n VIS_WORKER = cfg['vis_worker']\n VIS_TEXT = cfg['vis_text']\n MAX_FRAMES = cfg['max_frames']\n WIDTH = cfg['width']\n HEIGHT = cfg['height']\n FPS_INTERVAL = cfg['fps_interval']\n DET_INTERVAL = cfg['det_interval']\n DET_TH = cfg['det_th']\n SPLIT_MODEL = cfg['split_model']\n LOG_DEVICE = cfg['log_device']\n ALLOW_MEMORY_GROWTH = cfg['allow_memory_growth']\n SPLIT_SHAPE = cfg['split_shape']\n DEBUG_MODE = cfg['debug_mode']\n LABEL_PATH = cfg['label_path']\n NUM_CLASSES = cfg['num_classes']\n SRC_FROM = cfg['src_from']\n CAMERA = 0\n MOVIE = 1\n IMAGE = 2\n if SRC_FROM == 'camera':\n SRC_FROM = CAMERA\n VIDEO_INPUT = cfg['camera_input']\n elif SRC_FROM == 'movie':\n SRC_FROM = MOVIE\n VIDEO_INPUT = cfg['movie_input']\n elif SRC_FROM == 'image':\n SRC_FROM = IMAGE\n VIDEO_INPUT = cfg['image_input']\n \"\"\" \"\"\"\n\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n LOAD FROZEN_GRAPH\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n load_frozen_graph = LoadFrozenGraph(cfg)\n graph = load_frozen_graph.load_graph()\n \"\"\" \"\"\"\n\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n LOAD LABEL MAP\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n llm = LoadLabelMap()\n category_index = llm.load_label_map(cfg)\n \"\"\" \"\"\"\n\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n PREPARE TF CONFIG OPTION\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n # Session Config: allow seperate GPU/CPU adressing and limit memory allocation\n config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=LOG_DEVICE)\n config.gpu_options.allow_growth = ALLOW_MEMORY_GROWTH\n config.gpu_options.force_gpu_compatible = FORCE_GPU_COMPATIBLE\n #config.gpu_options.per_process_gpu_memory_fraction = 0.01 # 80MB memory is enough to run on TX2\n \"\"\" \"\"\"\n\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n PREPARE GRAPH I/O TO VARIABLE\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n # Define Input and Ouput tensors\n image_tensor = graph.get_tensor_by_name('image_tensor:0')\n detection_boxes = graph.get_tensor_by_name('detection_boxes:0')\n detection_scores = graph.get_tensor_by_name('detection_scores:0')\n detection_classes = graph.get_tensor_by_name('detection_classes:0')\n num_detections = graph.get_tensor_by_name('num_detections:0')\n\n if SPLIT_MODEL:\n SPLIT_TARGET_NAME = ['Postprocessor/convert_scores',\n 'Postprocessor/ExpandDims_1',\n ]\n split_out = []\n split_in = []\n for stn in SPLIT_TARGET_NAME:\n split_out += [graph.get_tensor_by_name(stn+':0')]\n split_in += [graph.get_tensor_by_name(stn+'_1:0')]\n \"\"\" \"\"\"\n\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n START WORKER THREAD\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n # gpu_worker uses in split_model and non-split_model\n gpu_tag = 'GPU'\n cpu_tag = 'CPU'\n gpu_worker = SessionWorker(gpu_tag, graph, config)\n if SPLIT_MODEL:\n gpu_opts = split_out\n cpu_worker = SessionWorker(cpu_tag, graph, config)\n cpu_opts = [detection_boxes, detection_scores, detection_classes, num_detections]\n else:\n gpu_opts = [detection_boxes, detection_scores, detection_classes, num_detections]\n \"\"\" \"\"\"\n\n \"\"\"\n START VISUALIZE WORKER\n \"\"\"\n if VISUALIZE and VIS_WORKER:\n q_out = Queue.Queue()\n vis_worker = MPVisualizeWorker(cfg, MPVariable.vis_in_con)\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n START SENDER THREAD\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n start_sender(MPVariable.det_out_con, q_out)\n proc_frame_counter = 0\n vis_proc_time = 0\n\n\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n WAIT UNTIL THE FIRST DUMMY IMAGE DONE\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n print('Loading...')\n sleep_interval = 0.1\n \"\"\"\n PUT DUMMY DATA INTO GPU WORKER\n \"\"\"\n gpu_feeds = {image_tensor: [np.zeros((300, 300, 3))]}\n gpu_extras = {}\n gpu_worker.put_sess_queue(gpu_opts, gpu_feeds, gpu_extras)\n if SPLIT_MODEL:\n \"\"\"\n PUT DUMMY DATA INTO CPU WORKER\n \"\"\"\n cpu_feeds = {split_in[0]: np.zeros((1, SPLIT_SHAPE, NUM_CLASSES)),\n split_in[1]: np.zeros((1, SPLIT_SHAPE, 1, 4))}\n cpu_extras = {}\n cpu_worker.put_sess_queue(cpu_opts, cpu_feeds, cpu_extras)\n \"\"\"\n WAIT UNTIL JIT-COMPILE DONE\n \"\"\"\n while True:\n g = gpu_worker.get_result_queue()\n if g is None:\n time.sleep(sleep_interval)\n else:\n break\n if SPLIT_MODEL:\n while True:\n c = cpu_worker.get_result_queue()\n if c is None:\n time.sleep(sleep_interval)\n else:\n break\n \"\"\" \"\"\"\n\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n START CAMERA\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n if SRC_FROM == CAMERA:\n from lib.webcam import WebcamVideoStream as VideoReader\n elif SRC_FROM == MOVIE:\n from lib.video import VideoReader\n elif SRC_FROM == IMAGE:\n from lib.image import ImageReader as VideoReader\n video_reader = VideoReader()\n\n if SRC_FROM == IMAGE:\n video_reader.start(VIDEO_INPUT, save_to_file=SAVE_TO_FILE)\n else: # CAMERA, MOVIE\n video_reader.start(VIDEO_INPUT, WIDTH, HEIGHT, save_to_file=SAVE_TO_FILE)\n frame_cols, frame_rows = video_reader.getSize()\n \"\"\" STATISTICS FONT \"\"\"\n fontScale = frame_rows/1000.0\n if fontScale < 0.4:\n fontScale = 0.4\n fontThickness = 1 + int(fontScale)\n fontFace = cv2.FONT_HERSHEY_SIMPLEX\n if SRC_FROM == MOVIE:\n dir_path, filename = os.path.split(VIDEO_INPUT)\n filepath_prefix = filename\n elif SRC_FROM == CAMERA:\n filepath_prefix = 'frame'\n \"\"\" \"\"\"\n\n\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n DETECTION LOOP\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n print('Starting Detection')\n sleep_interval = 0.005\n top_in_time = None\n frame_in_processing_counter = 0\n try:\n if not video_reader.running:\n raise IOError((\"Input src error.\"))\n while MPVariable.running.value:\n if top_in_time is None:\n top_in_time = time.time()\n \"\"\"\n SPRIT/NON-SPLIT MODEL CAMERA TO WORKER\n \"\"\"\n if video_reader.running:\n if gpu_worker.is_sess_empty(): # must need for speed\n cap_in_time = time.time()\n if SRC_FROM == IMAGE:\n frame, filepath = video_reader.read()\n if frame is not None:\n frame_in_processing_counter += 1\n else:\n frame = video_reader.read()\n if frame is not None:\n filepath = filepath_prefix+'_'+str(proc_frame_counter)+'.png'\n frame_in_processing_counter += 1\n if frame is not None:\n image_expanded = np.expand_dims(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), axis=0) # np.expand_dims is faster than []\n #image_expanded = np.expand_dims(frame, axis=0) # BGR image for input. Of couse, bad accuracy in RGB trained model, but speed up.\n cap_out_time = time.time()\n # put new queue\n gpu_feeds = {image_tensor: image_expanded}\n gpu_extras = {'image':frame, 'top_in_time':top_in_time, 'cap_in_time':cap_in_time, 'cap_out_time':cap_out_time, 'filepath': filepath} # always image draw.\n gpu_worker.put_sess_queue(gpu_opts, gpu_feeds, gpu_extras)\n elif frame_in_processing_counter <= 0:\n MPVariable.running.value = False\n break\n\n g = gpu_worker.get_result_queue()\n if SPLIT_MODEL:\n # if g is None: gpu thread has no output queue. ok skip, let's check cpu thread.\n if g is not None:\n # gpu thread has output queue.\n result_slice_out, extras = g['results'], g['extras']\n\n if cpu_worker.is_sess_empty():\n # When cpu thread has no next queue, put new queue.\n # else, drop gpu queue.\n cpu_feeds = {}\n for i in range(len(result_slice_out)):\n cpu_feeds.update({split_in[i]:result_slice_out[i]})\n cpu_extras = extras\n cpu_worker.put_sess_queue(cpu_opts, cpu_feeds, cpu_extras)\n else:\n # else: cpu thread is busy. don't put new queue. let's check cpu result queue.\n frame_in_processing_counter -= 1\n # check cpu thread.\n q = cpu_worker.get_result_queue()\n else:\n \"\"\"\n NON-SPLIT MODEL\n \"\"\"\n q = g\n if q is None:\n \"\"\"\n SPLIT/NON-SPLIT MODEL\n \"\"\"\n # detection is not complete yet. ok nothing to do.\n time.sleep(sleep_interval)\n continue\n\n frame_in_processing_counter -= 1\n boxes, scores, classes, num, extras = q['results'][0], q['results'][1], q['results'][2], q['results'][3], q['extras']\n boxes, scores, classes = np.squeeze(boxes), np.squeeze(scores), np.squeeze(classes)\n det_out_time = time.time()\n\n \"\"\"\n ALWAYS BOX DRAW ON IMAGE\n \"\"\"\n vis_in_time = time.time()\n image = extras['image']\n if SRC_FROM == IMAGE:\n filepath = extras['filepath']\n frame_rows, frame_cols = image.shape[:2]\n \"\"\" STATISTICS FONT \"\"\"\n fontScale = frame_rows/1000.0\n if fontScale < 0.4:\n fontScale = 0.4\n fontThickness = 1 + int(fontScale)\n else:\n filepath = extras['filepath']\n image = visualization(category_index, image, boxes, scores, classes, DEBUG_MODE, VIS_TEXT, FPS_INTERVAL,\n fontFace=fontFace, fontScale=fontScale, fontThickness=fontThickness)\n\n \"\"\"\n VISUALIZATION\n \"\"\"\n if VISUALIZE:\n if (MPVariable.vis_skip_rate.value == 0) or (proc_frame_counter % MPVariable.vis_skip_rate.value < 1):\n if VIS_WORKER:\n q_out.put({'image':image, 'vis_in_time':vis_in_time})\n else:\n \"\"\"\n SHOW\n \"\"\"\n cv2.imshow(\"Object Detection\", image)\n # Press q to quit\n if cv2.waitKey(1) & 0xFF == 113: #ord('q'):\n break\n MPVariable.vis_frame_counter.value += 1\n vis_out_time = time.time()\n \"\"\"\n PROCESSING TIME\n \"\"\"\n vis_proc_time = vis_out_time - vis_in_time\n MPVariable.vis_proc_time.value += vis_proc_time\n else:\n \"\"\"\n NO VISUALIZE\n \"\"\"\n for box, score, _class in zip(boxes, scores, classes):\n if proc_frame_counter % DET_INTERVAL == 0 and score > DET_TH:\n label = category_index[_class]['name']\n print(\"label: {}\\nscore: {}\\nbox: {}\".format(label, score, box))\n\n vis_out_time = time.time()\n \"\"\"\n PROCESSING TIME\n \"\"\"\n vis_proc_time = vis_out_time - vis_in_time\n\n if SAVE_TO_FILE:\n if SRC_FROM == IMAGE:\n video_reader.save(image, filepath)\n else:\n video_reader.save(image)\n\n proc_frame_counter += 1\n if proc_frame_counter > 100000:\n proc_frame_counter = 0\n \"\"\"\n PROCESSING TIME\n \"\"\"\n top_in_time = extras['top_in_time']\n cap_proc_time = extras['cap_out_time'] - extras['cap_in_time']\n gpu_proc_time = extras[gpu_tag+'_out_time'] - extras[gpu_tag+'_in_time']\n if SPLIT_MODEL:\n cpu_proc_time = extras[cpu_tag+'_out_time'] - extras[cpu_tag+'_in_time']\n else:\n cpu_proc_time = 0\n lost_proc_time = det_out_time - top_in_time - cap_proc_time - gpu_proc_time - cpu_proc_time\n total_proc_time = det_out_time - top_in_time\n MPVariable.cap_proc_time.value += cap_proc_time\n MPVariable.gpu_proc_time.value += gpu_proc_time\n MPVariable.cpu_proc_time.value += cpu_proc_time\n MPVariable.lost_proc_time.value += lost_proc_time\n MPVariable.total_proc_time.value += total_proc_time\n\n if DEBUG_MODE:\n if SPLIT_MODEL:\n sys.stdout.write('snapshot FPS:{: ^5.1f} total:{: ^10.5f} cap:{: ^10.5f} gpu:{: ^10.5f} cpu:{: ^10.5f} lost:{: ^10.5f} | vis:{: ^10.5f}\\n'.format(\n MPVariable.fps.value, total_proc_time, cap_proc_time, gpu_proc_time, cpu_proc_time, lost_proc_time, vis_proc_time))\n else:\n sys.stdout.write('snapshot FPS:{: ^5.1f} total:{: ^10.5f} cap:{: ^10.5f} gpu:{: ^10.5f} lost:{: ^10.5f} | vis:{: ^10.5f}\\n'.format(\n MPVariable.fps.value, total_proc_time, cap_proc_time, gpu_proc_time, lost_proc_time, vis_proc_time))\n \"\"\"\n EXIT WITHOUT GUI\n \"\"\"\n if not VISUALIZE and MAX_FRAMES > 0:\n if proc_frame_counter >= MAX_FRAMES:\n MPVariable.running.value = False\n break\n\n \"\"\"\n CHANGE SLEEP INTERVAL\n \"\"\"\n if MPVariable.frame_counter.value == 0 and MPVariable.fps.value > 0:\n sleep_interval = 0.1 / MPVariable.fps.value\n MPVariable.sleep_interval.value = sleep_interval\n MPVariable.frame_counter.value += 1\n top_in_time = None\n \"\"\"\n END while\n \"\"\"\n except KeyboardInterrupt:\n pass\n except:\n import traceback\n traceback.print_exc()\n finally:\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n CLOSE\n \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\" \"\"\"\n if VISUALIZE and VIS_WORKER:\n q_out.put(None)\n MPVariable.running.value = False\n gpu_worker.stop()\n if SPLIT_MODEL:\n cpu_worker.stop()\n video_reader.stop()\n\n if VISUALIZE:\n cv2.destroyAllWindows()\n \"\"\" \"\"\"\n\n return\n\n" ]
[ [ "tensorflow.ConfigProto", "numpy.squeeze", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ricklupton/py-bem
[ "026b2b82b6f09d57b213cfe17aa849dbdf3c7b71" ]
[ "bem/fast_interpolation.py" ]
[ "\"\"\"\nFrom http://stackoverflow.com/a/13504757\n\"\"\"\n\nfrom scipy.interpolate import interp1d\nfrom scipy.interpolate._fitpack import _bspleval\nimport numpy as np\n\n\nclass fast_interpolation:\n def __init__(self, x, y, axis=-1):\n assert len(x) == y.shape[axis]\n self.x = x\n self.y = y\n self.axis = axis\n self._f = interp1d(x, y, axis=axis, kind='slinear', copy=False)\n\n def __getstate__(self):\n return dict(x=self.x, y=self.y, axis=self.axis)\n\n def __setstate__(self, state):\n self.x = state['x']\n self.y = state['y']\n self.axis = state['axis']\n self._f = interp1d(self.x, self.y, axis=self.axis,\n kind='slinear', copy=False)\n\n def __call__(self, new_x):\n #assert new_x.shape == y.shape\n xj, cvals, k = self._f._spline\n result = np.empty_like(new_x)\n for i, value in enumerate(new_x.flat):\n result.flat[i] = _bspleval(value, self.x, cvals[:, i], k, 0)\n return result\n" ]
[ [ "numpy.empty_like", "scipy.interpolate.interp1d", "scipy.interpolate._fitpack._bspleval" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
noelevans/playground
[ "da529e967a15bcb217fff091ac0ec5c4dc1821ce", "da529e967a15bcb217fff091ac0ec5c4dc1821ce", "da529e967a15bcb217fff091ac0ec5c4dc1821ce", "da529e967a15bcb217fff091ac0ec5c4dc1821ce", "da529e967a15bcb217fff091ac0ec5c4dc1821ce", "da529e967a15bcb217fff091ac0ec5c4dc1821ce" ]
[ "kaggle/sf-crime/logistic_regressions.py", "kaggle/washington_bike_share/svm_simple.py", "iris/classification_report.py", "bayesian_methods_for_hackers/linear_best_fit_with_mcmc_ch05.py", "decay_fns/uni_ranking_analysis.py", "bayesian_methods_for_hackers/exponential_ch03.py" ]
[ "from sklearn.linear_model import LogisticRegression\n\n\nclass MultivariateLogisticOvrModel(object):\n\n def model_and_predict(self, X_train, y_train, X_test):\n model = LogisticRegression(dual=True, fit_intercept=True, \n multi_class='ovr')\n model.fit(X_train, y_train)\n return model.predict(X_test)\n\n\nclass MultivariateLogisticMultinomialModel(object):\n\n def model_and_predict(self, X_train, y_train, X_test):\n model = LogisticRegression(dual=False, fit_intercept=False, \n multi_class='multinomial')\n model.fit(X_train, y_train)\n return model.predict(X_test)\n\n", "import numpy as np\nfrom sklearn.svm import SVR\nimport matplotlib.pyplot as plt\n\n###############################################################################\n# Generate sample data\nX = np.sort(5 * np.random.rand(40, 1), axis=0)\ny = np.sin(X).ravel()\n\n###############################################################################\n# Add noise to targets\ny[::5] += 3 * (0.5 - np.random.rand(8))\n\n###############################################################################\n# Fit regression model\nsvr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)\ny_rbf = svr_rbf.fit(X, y).predict(X)\n\n###############################################################################\n# look at the results\nplt.scatter(X, y, c='k', label='data')\nplt.hold('on')\nplt.plot(X, y_rbf, c='g', label='RBF model')\nplt.xlabel('data')\nplt.ylabel('target')\nplt.title('Support Vector Regression')\nplt.legend()\nplt.show()\n", "from sklearn import datasets\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.cross_validation import train_test_split\n\n\ndef main():\n iris = datasets.load_iris()\n X = iris.data\n y = iris.target\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.25, random_state=2)\n\n clf = RandomForestClassifier(n_estimators=10)\n clf.fit(X_train, y_train)\n y_hat = clf.predict(X_test)\n\n print(y_test == y_hat)\n print(classification_report(\n y_test, y_hat, target_names=iris.target_names))\n\n\n\nif __name__ == '__main__':\n main()\n", "import numpy as np\nimport pymc as pm\nfrom pymc.Matplot import plot as mcplot\n\n\ndef main():\n # Code to create artificial data\n N = 100\n X = 0.025 * np.random.randn(N)\n Y = 0.5 * X + 0.01 * np.random.randn(N)\n\n std = pm.Uniform(\"std\", 0, 100, trace=False)\n\n @pm.deterministic\n def prec(U=std):\n return 1.0 / (U) ** 2\n\n beta = pm.Normal(\"beta\", 0, 0.0001)\n alpha = pm.Normal(\"alpha\", 0, 0.0001)\n\n\n @pm.deterministic\n def mean(X=X, alpha=alpha, beta=beta):\n return alpha + beta * X\n\n obs = pm.Normal(\"obs\", mean, prec, value=Y, observed=True)\n mcmc = pm.MCMC([obs, beta, alpha, std, prec])\n\n mcmc.sample(100000, 80000)\n mcplot(mcmc)\n\n\nif __name__ == '__main__':\n main()\n", "import numpy as np\nimport pandas as pd\n \n \ndef rename_unis(df, renames):\n \"\"\" Update institution if they appear in the renames dictionary. \"\"\"\n \n for before, after in renames.items():\n df.ix[df.institution == before, 'institution'] = after\n return df\n \n \ndef main():\n \"\"\" Read some university records and analyse. \"\"\"\n \n newer_records = pd.read_csv('2016_to_2017.csv', sep='\\t')\n older_records = pd.read_csv('2012_to_2014.csv', sep='\\t')\n \n newer_records = newer_records.rename(\n columns={'Institution': 'institution'})\n \n new_renames = {'Newman': 'Newman University',\n \"St Mary's\": \"St Mary's UC\",\n 'Leeds Beckett': 'Leeds Met',\n 'University Campus Suffolk': 'UC Suffolk'}\n newer_records = rename_unis(newer_records, new_renames)\n \n old_renames = {'Glynd': 'Glyndwr'}\n older_records = rename_unis(older_records, old_renames)\n \n recs = pd.merge(newer_records, older_records,\n how='outer', on='institution')\n \n # Fix outer join null values\n recs = recs.fillna(501)\n \n # Fix columns\n recs.columns = [2017, 2016, 'institution', 2014, 2013, 2012]\n recs = recs[['institution', 2012, 2013, 2014, 2016, 2017]]\n years = recs.columns.values[1:].astype('int')\n \n # Different ways of calculating the mean\n recs['mean'] = recs[list(years)].mean(axis=1)\n recs['mean'] = recs.mean(axis=1)\n my_mean = lambda xs: sum(xs[1:]) / len(xs[1:])\n recs['mean'] = recs.apply(my_mean, axis=1)\n \n \n # Add decay_mean: weights are heavier for dates closer to today\n half_life = 5 # (years)\n today = 2016 + 1\n elapsed_time = today - years\n \n # very aggresive decay weights\n weights = np.e ** - (elapsed_time * half_life)\n # more even weighting but still bias towards recent data\n weights = 0.5 ** (elapsed_time / half_life)\n print(weights)\n \n decay_mean = lambda x: sum(x * weights) / sum(weights)\n recs['decay_mean'] = recs[list(years)].apply(decay_mean, axis=1)\n \n print('Order by 2017')\n print(recs.sort(2017)[:10])\n print('Order by mean')\n print(recs.sort('mean')[:10])\n print('Order by decay mean')\n print(recs.sort('decay_mean')[:10])\n\n\nif __name__ == '__main__':\n main()\n", "from matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport scipy.stats as stats\n\nfrom IPython.core.pylabtools import figsize\n\n\ndef main():\n figsize(12.5, 5)\n fig = plt.figure()\n jet = plt.cm.jet\n plt.subplot(121)\n\n x = y = np.linspace(0, 5, 100)\n X, Y = np.meshgrid(x, y)\n\n exp_x = stats.expon.pdf(x, scale=3)\n exp_y = stats.expon.pdf(x, scale=10)\n M = np.dot(exp_x[:, None], exp_y[None, :])\n CS = plt.contour(X, Y, M)\n im = plt.imshow(M, interpolation='none', origin='lower',\n cmap=jet, extent=(0, 5, 0, 5))\n plt.xlabel(\"prior on $p_1$\")\n plt.ylabel(\"prior on $p_2$\")\n plt.title(\"$Exp(3), Exp(10)$ prior landscape\")\n\n ax = fig.add_subplot(122, projection='3d')\n ax.plot_surface(X, Y, M, cmap=jet)\n ax.view_init(azim=390)\n plt.title(\"$Exp(3), Exp(10)$ prior landscape; \\nalternate view\")\n plt.show()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "sklearn.linear_model.LogisticRegression" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "matplotlib.pyplot.hold", "numpy.sin", "sklearn.svm.SVR", "matplotlib.pyplot.plot", "numpy.random.rand", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "sklearn.cross_validation.train_test_split", "sklearn.datasets.load_iris", "sklearn.metrics.classification_report", "sklearn.ensemble.RandomForestClassifier" ], [ "numpy.random.randn" ], [ "pandas.merge", "pandas.read_csv" ], [ "numpy.dot", "matplotlib.pyplot.imshow", "numpy.linspace", "matplotlib.pyplot.title", "scipy.stats.expon.pdf", "matplotlib.pyplot.subplot", "matplotlib.pyplot.contour", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "numpy.meshgrid", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Onkar627/cupy
[ "8eef1ad5393c0a92c5065bc05137bf997f37044a", "8eef1ad5393c0a92c5065bc05137bf997f37044a", "8eef1ad5393c0a92c5065bc05137bf997f37044a", "8eef1ad5393c0a92c5065bc05137bf997f37044a" ]
[ "cupyx/scipy/ndimage/_measurements.py", "tests/cupy_tests/core_tests/test_ndarray_elementwise_op.py", "cupyx/scipy/sparse/_compressed.py", "tests/cupyx_tests/scipy_tests/sparse_tests/test_coo.py" ]
[ "import warnings\n\nimport numpy\n\nimport cupy\nfrom cupy import _core\nfrom cupy import _util\n\n\ndef label(input, structure=None, output=None):\n \"\"\"Labels features in an array.\n\n Args:\n input (cupy.ndarray): The input array.\n structure (array_like or None): A structuring element that defines\n feature connections. ```structure``` must be centersymmetric. If\n None, structure is automatically generated with a squared\n connectivity equal to one.\n output (cupy.ndarray, dtype or None): The array in which to place the\n output.\n Returns:\n label (cupy.ndarray): An integer array where each unique feature in\n ```input``` has a unique label in the array.\n\n num_features (int): Number of features found.\n\n .. warning::\n\n This function may synchronize the device.\n\n .. seealso:: :func:`scipy.ndimage.label`\n \"\"\"\n if not isinstance(input, cupy.ndarray):\n raise TypeError('input must be cupy.ndarray')\n if input.dtype.char in 'FD':\n raise TypeError('Complex type not supported')\n if structure is None:\n structure = _generate_binary_structure(input.ndim, 1)\n elif isinstance(structure, cupy.ndarray):\n structure = cupy.asnumpy(structure)\n structure = numpy.array(structure, dtype=bool)\n if structure.ndim != input.ndim:\n raise RuntimeError('structure and input must have equal rank')\n for i in structure.shape:\n if i != 3:\n raise ValueError('structure dimensions must be equal to 3')\n\n if isinstance(output, cupy.ndarray):\n if output.shape != input.shape:\n raise ValueError(\"output shape not correct\")\n caller_provided_output = True\n else:\n caller_provided_output = False\n if output is None:\n output = cupy.empty(input.shape, numpy.int32)\n else:\n output = cupy.empty(input.shape, output)\n\n if input.size == 0:\n # empty\n maxlabel = 0\n elif input.ndim == 0:\n # 0-dim array\n maxlabel = 0 if input.item() == 0 else 1\n output[...] = maxlabel\n else:\n if output.dtype != numpy.int32:\n y = cupy.empty(input.shape, numpy.int32)\n else:\n y = output\n maxlabel = _label(input, structure, y)\n if output.dtype != numpy.int32:\n output[...] = y[...]\n\n if caller_provided_output:\n return maxlabel\n else:\n return output, maxlabel\n\n\ndef _generate_binary_structure(rank, connectivity):\n if connectivity < 1:\n connectivity = 1\n if rank < 1:\n return numpy.array(True, dtype=bool)\n output = numpy.fabs(numpy.indices([3] * rank) - 1)\n output = numpy.add.reduce(output, 0)\n return output <= connectivity\n\n\ndef _label(x, structure, y):\n elems = numpy.where(structure != 0)\n vecs = [elems[dm] - 1 for dm in range(x.ndim)]\n offset = vecs[0]\n for dm in range(1, x.ndim):\n offset = offset * 3 + vecs[dm]\n indxs = numpy.where(offset < 0)[0]\n dirs = [[vecs[dm][dr] for dm in range(x.ndim)] for dr in indxs]\n dirs = cupy.array(dirs, dtype=numpy.int32)\n ndirs = indxs.shape[0]\n y_shape = cupy.array(y.shape, dtype=numpy.int32)\n count = cupy.zeros(2, dtype=numpy.int32)\n _kernel_init()(x, y)\n _kernel_connect()(y_shape, dirs, ndirs, x.ndim, y, size=y.size)\n _kernel_count()(y, count, size=y.size)\n maxlabel = int(count[0])\n labels = cupy.empty(maxlabel, dtype=numpy.int32)\n _kernel_labels()(y, count, labels, size=y.size)\n _kernel_finalize()(maxlabel, cupy.sort(labels), y, size=y.size)\n return maxlabel\n\n\ndef _kernel_init():\n return _core.ElementwiseKernel(\n 'X x', 'Y y', 'if (x == 0) { y = -1; } else { y = i; }',\n 'cupyx_scipy_ndimage_label_init')\n\n\ndef _kernel_connect():\n return _core.ElementwiseKernel(\n 'raw int32 shape, raw int32 dirs, int32 ndirs, int32 ndim',\n 'raw Y y',\n '''\n if (y[i] < 0) continue;\n for (int dr = 0; dr < ndirs; dr++) {\n int j = i;\n int rest = j;\n int stride = 1;\n int k = 0;\n for (int dm = ndim-1; dm >= 0; dm--) {\n int pos = rest % shape[dm] + dirs[dm + dr * ndim];\n if (pos < 0 || pos >= shape[dm]) {\n k = -1;\n break;\n }\n k += pos * stride;\n rest /= shape[dm];\n stride *= shape[dm];\n }\n if (k < 0) continue;\n if (y[k] < 0) continue;\n while (1) {\n while (j != y[j]) { j = y[j]; }\n while (k != y[k]) { k = y[k]; }\n if (j == k) break;\n if (j < k) {\n int old = atomicCAS( &y[k], k, j );\n if (old == k) break;\n k = old;\n }\n else {\n int old = atomicCAS( &y[j], j, k );\n if (old == j) break;\n j = old;\n }\n }\n }\n ''',\n 'cupyx_scipy_ndimage_label_connect')\n\n\ndef _kernel_count():\n return _core.ElementwiseKernel(\n '', 'raw Y y, raw int32 count',\n '''\n if (y[i] < 0) continue;\n int j = i;\n while (j != y[j]) { j = y[j]; }\n if (j != i) y[i] = j;\n else atomicAdd(&count[0], 1);\n ''',\n 'cupyx_scipy_ndimage_label_count')\n\n\ndef _kernel_labels():\n return _core.ElementwiseKernel(\n '', 'raw Y y, raw int32 count, raw int32 labels',\n '''\n if (y[i] != i) continue;\n int j = atomicAdd(&count[1], 1);\n labels[j] = i;\n ''',\n 'cupyx_scipy_ndimage_label_labels')\n\n\ndef _kernel_finalize():\n return _core.ElementwiseKernel(\n 'int32 maxlabel', 'raw int32 labels, raw Y y',\n '''\n if (y[i] < 0) {\n y[i] = 0;\n continue;\n }\n int yi = y[i];\n int j_min = 0;\n int j_max = maxlabel - 1;\n int j = (j_min + j_max) / 2;\n while (j_min < j_max) {\n if (yi == labels[j]) break;\n if (yi < labels[j]) j_max = j - 1;\n else j_min = j + 1;\n j = (j_min + j_max) / 2;\n }\n y[i] = j + 1;\n ''',\n 'cupyx_scipy_ndimage_label_finalize')\n\n\n_ndimage_variance_kernel = _core.ElementwiseKernel(\n 'T input, R labels, raw X index, uint64 size, raw float64 mean',\n 'raw float64 out',\n \"\"\"\n for (ptrdiff_t j = 0; j < size; j++) {\n if (labels == index[j]) {\n atomicAdd(&out[j], (input - mean[j]) * (input - mean[j]));\n break;\n }\n }\n \"\"\",\n 'cupyx_scipy_ndimage_variance')\n\n\n_ndimage_sum_kernel = _core.ElementwiseKernel(\n 'T input, R labels, raw X index, uint64 size',\n 'raw float64 out',\n \"\"\"\n for (ptrdiff_t j = 0; j < size; j++) {\n if (labels == index[j]) {\n atomicAdd(&out[j], input);\n break;\n }\n }\n \"\"\",\n 'cupyx_scipy_ndimage_sum')\n\n\ndef _ndimage_sum_kernel_2(input, labels, index, sum_val, batch_size=4):\n for i in range(0, index.size, batch_size):\n matched = labels == index[i:i + batch_size].reshape(\n (-1,) + (1,) * input.ndim)\n sum_axes = tuple(range(1, 1 + input.ndim))\n sum_val[i:i + batch_size] = cupy.where(matched, input, 0).sum(\n axis=sum_axes)\n return sum_val\n\n\n_ndimage_mean_kernel = _core.ElementwiseKernel(\n 'T input, R labels, raw X index, uint64 size',\n 'raw float64 out, raw uint64 count',\n \"\"\"\n for (ptrdiff_t j = 0; j < size; j++) {\n if (labels == index[j]) {\n atomicAdd(&out[j], input);\n atomicAdd(&count[j], 1);\n break;\n }\n }\n \"\"\",\n 'cupyx_scipy_ndimage_mean')\n\n\ndef _ndimage_mean_kernel_2(input, labels, index, batch_size=4,\n return_count=False):\n sum_val = cupy.empty_like(index, dtype=cupy.float64)\n count = cupy.empty_like(index, dtype=cupy.uint64)\n for i in range(0, index.size, batch_size):\n matched = labels == index[i:i + batch_size].reshape(\n (-1,) + (1,) * input.ndim)\n mean_axes = tuple(range(1, 1 + input.ndim))\n count[i:i + batch_size] = matched.sum(axis=mean_axes)\n sum_val[i:i + batch_size] = cupy.where(matched, input, 0).sum(\n axis=mean_axes)\n if return_count:\n return sum_val / count, count\n return sum_val / count\n\n\ndef _mean_driver(input, labels, index, return_count=False, use_kern=False):\n if use_kern:\n return _ndimage_mean_kernel_2(input, labels, index,\n return_count=return_count)\n\n out = cupy.zeros_like(index, cupy.float64)\n count = cupy.zeros_like(index, dtype=cupy.uint64)\n sum, count = _ndimage_mean_kernel(input,\n labels, index, index.size, out, count)\n if return_count:\n return sum / count, count\n return sum / count\n\n\ndef variance(input, labels=None, index=None):\n \"\"\"Calculates the variance of the values of an n-D image array, optionally\n at specified sub-regions.\n\n Args:\n input (cupy.ndarray): Nd-image data to process.\n labels (cupy.ndarray or None): Labels defining sub-regions in `input`.\n If not None, must be same shape as `input`.\n index (cupy.ndarray or None): `labels` to include in output. If None\n (default), all values where `labels` is non-zero are used.\n\n Returns:\n cupy.ndarray: Values of variance, for each sub-region if\n `labels` and `index` are specified.\n\n .. seealso:: :func:`scipy.ndimage.variance`\n \"\"\"\n if not isinstance(input, cupy.ndarray):\n raise TypeError('input must be cupy.ndarray')\n\n if input.dtype in (cupy.complex64, cupy.complex128):\n raise TypeError(\"cupyx.scipy.ndimage.variance doesn't support %{}\"\n \"\".format(input.dtype.type))\n\n use_kern = False\n # There are constraints on types because of atomicAdd() in CUDA.\n if input.dtype not in [cupy.int32, cupy.float16, cupy.float32,\n cupy.float64, cupy.uint32, cupy.uint64,\n cupy.ulonglong]:\n warnings.warn(\n 'Using the slower implementation because the provided '\n f'type {input.dtype} is not supported by cupyx.scipy.ndimage.sum. '\n 'Consider using an array of type int32, float16, '\n 'float32, float64, uint32, uint64 as data types '\n 'for the fast implementation', _util.PerformanceWarning)\n use_kern = True\n\n def calc_var_with_intermediate_float(input):\n vals_c = input - input.mean()\n count = vals_c.size\n # Does not use `ndarray.mean()` here to return the same results as\n # SciPy does, especially in case `input`'s dtype is float16.\n return cupy.square(vals_c).sum() / cupy.asanyarray(count).astype(float)\n\n if labels is None:\n return calc_var_with_intermediate_float(input)\n\n if not isinstance(labels, cupy.ndarray):\n raise TypeError('label must be cupy.ndarray')\n\n input, labels = cupy.broadcast_arrays(input, labels)\n\n if index is None:\n return calc_var_with_intermediate_float(input[labels > 0])\n\n if cupy.isscalar(index):\n return calc_var_with_intermediate_float(input[labels == index])\n\n if not isinstance(index, cupy.ndarray):\n if not isinstance(index, int):\n raise TypeError('index must be cupy.ndarray or a scalar int')\n else:\n return (input[labels == index]).var().astype(cupy.float64,\n copy=False)\n\n mean_val, count = _mean_driver(input, labels, index, True, use_kern)\n if use_kern:\n new_axis = (..., *(cupy.newaxis for _ in range(input.ndim)))\n return cupy.where(labels[None, ...] == index[new_axis],\n cupy.square(input - mean_val[new_axis]),\n 0).sum(tuple(range(1, input.ndim + 1))) / count\n out = cupy.zeros_like(index, dtype=cupy.float64)\n return _ndimage_variance_kernel(input, labels, index, index.size, mean_val,\n out) / count\n\n\ndef sum_labels(input, labels=None, index=None):\n \"\"\"Calculates the sum of the values of an n-D image array, optionally\n at specified sub-regions.\n\n Args:\n input (cupy.ndarray): Nd-image data to process.\n labels (cupy.ndarray or None): Labels defining sub-regions in `input`.\n If not None, must be same shape as `input`.\n index (cupy.ndarray or None): `labels` to include in output. If None\n (default), all values where `labels` is non-zero are used.\n\n Returns:\n sum (cupy.ndarray): sum of values, for each sub-region if\n `labels` and `index` are specified.\n\n .. seealso:: :func:`scipy.ndimage.sum_labels`\n \"\"\"\n if not isinstance(input, cupy.ndarray):\n raise TypeError('input must be cupy.ndarray')\n\n if input.dtype in (cupy.complex64, cupy.complex128):\n raise TypeError(\"cupyx.scipy.ndimage.sum does not support %{}\".format(\n input.dtype.type))\n\n use_kern = False\n # There is constraints on types because of atomicAdd() in CUDA.\n if input.dtype not in [cupy.int32, cupy.float16, cupy.float32,\n cupy.float64, cupy.uint32, cupy.uint64,\n cupy.ulonglong]:\n warnings.warn(\n 'Using the slower implmentation as '\n 'cupyx.scipy.ndimage.sum supports int32, float16, '\n 'float32, float64, uint32, uint64 as data types'\n 'for the fast implmentation', _util.PerformanceWarning)\n use_kern = True\n\n if labels is None:\n return input.sum()\n\n if not isinstance(labels, cupy.ndarray):\n raise TypeError('label must be cupy.ndarray')\n\n input, labels = cupy.broadcast_arrays(input, labels)\n\n if index is None:\n return input[labels != 0].sum()\n\n if not isinstance(index, cupy.ndarray):\n if not isinstance(index, int):\n raise TypeError('index must be cupy.ndarray or a scalar int')\n else:\n return (input[labels == index]).sum()\n\n if index.size == 0:\n return cupy.array([], dtype=cupy.int64)\n\n out = cupy.zeros_like(index, dtype=cupy.float64)\n\n # The following parameters for sum where determined using a Tesla P100.\n if (input.size >= 262144 and index.size <= 4) or use_kern:\n return _ndimage_sum_kernel_2(input, labels, index, out)\n return _ndimage_sum_kernel(input, labels, index, index.size, out)\n\n\ndef sum(input, labels=None, index=None):\n \"\"\"Calculates the sum of the values of an n-D image array, optionally\n at specified sub-regions.\n\n Args:\n input (cupy.ndarray): Nd-image data to process.\n labels (cupy.ndarray or None): Labels defining sub-regions in `input`.\n If not None, must be same shape as `input`.\n index (cupy.ndarray or None): `labels` to include in output. If None\n (default), all values where `labels` is non-zero are used.\n\n Returns:\n sum (cupy.ndarray): sum of values, for each sub-region if\n `labels` and `index` are specified.\n\n Notes:\n This is an alias for `cupyx.scipy.ndimage.sum_labels` kept for\n backwards compatibility reasons. For new code please prefer\n `sum_labels`.\n\n .. seealso:: :func:`scipy.ndimage.sum`\n \"\"\"\n return sum_labels(input, labels, index)\n\n\ndef mean(input, labels=None, index=None):\n \"\"\"Calculates the mean of the values of an n-D image array, optionally\n at specified sub-regions.\n\n Args:\n input (cupy.ndarray): Nd-image data to process.\n labels (cupy.ndarray or None): Labels defining sub-regions in `input`.\n If not None, must be same shape as `input`.\n index (cupy.ndarray or None): `labels` to include in output. If None\n (default), all values where `labels` is non-zero are used.\n\n Returns:\n mean (cupy.ndarray): mean of values, for each sub-region if\n `labels` and `index` are specified.\n\n\n .. seealso:: :func:`scipy.ndimage.mean`\n \"\"\"\n if not isinstance(input, cupy.ndarray):\n raise TypeError('input must be cupy.ndarray')\n\n if input.dtype in (cupy.complex64, cupy.complex128):\n raise TypeError(\"cupyx.scipy.ndimage.mean does not support %{}\".format(\n input.dtype.type))\n\n use_kern = False\n # There is constraints on types because of atomicAdd() in CUDA.\n if input.dtype not in [cupy.int32, cupy.float16, cupy.float32,\n cupy.float64, cupy.uint32, cupy.uint64,\n cupy.ulonglong]:\n warnings.warn(\n 'Using the slower implmentation as '\n 'cupyx.scipy.ndimage.mean supports int32, float16, '\n 'float32, float64, uint32, uint64 as data types '\n 'for the fast implmentation', _util.PerformanceWarning)\n use_kern = True\n\n def calc_mean_with_intermediate_float(input):\n sum = input.sum()\n count = input.size\n # Does not use `ndarray.mean()` here to return the same results as\n # SciPy does, especially in case `input`'s dtype is float16.\n return sum / cupy.asanyarray(count).astype(float)\n\n if labels is None:\n return calc_mean_with_intermediate_float(input)\n\n if not isinstance(labels, cupy.ndarray):\n raise TypeError('label must be cupy.ndarray')\n\n input, labels = cupy.broadcast_arrays(input, labels)\n\n if index is None:\n return calc_mean_with_intermediate_float(input[labels > 0])\n\n if cupy.isscalar(index):\n return calc_mean_with_intermediate_float(input[labels == index])\n\n if not isinstance(index, cupy.ndarray):\n if not isinstance(index, int):\n raise TypeError('index must be cupy.ndarray or a scalar int')\n else:\n return (input[labels == index]).mean(dtype=cupy.float64)\n\n return _mean_driver(input, labels, index, use_kern=use_kern)\n\n\ndef standard_deviation(input, labels=None, index=None):\n \"\"\"Calculates the standard deviation of the values of an n-D image array,\n optionally at specified sub-regions.\n\n Args:\n input (cupy.ndarray): Nd-image data to process.\n labels (cupy.ndarray or None): Labels defining sub-regions in `input`.\n If not None, must be same shape as `input`.\n index (cupy.ndarray or None): `labels` to include in output. If None\n (default), all values where `labels` is non-zero are used.\n\n Returns:\n standard_deviation (cupy.ndarray): standard deviation of values, for\n each sub-region if `labels` and `index` are specified.\n\n .. seealso:: :func:`scipy.ndimage.standard_deviation`\n \"\"\"\n return cupy.sqrt(variance(input, labels, index))\n\n\ndef _safely_castable_to_int(dt):\n \"\"\"Test whether the NumPy data type `dt` can be safely cast to an int.\"\"\"\n int_size = cupy.dtype(int).itemsize\n safe = (\n cupy.issubdtype(dt, cupy.signedinteger) and dt.itemsize <= int_size\n ) or (cupy.issubdtype(dt, cupy.unsignedinteger) and dt.itemsize < int_size)\n return safe\n\n\ndef _get_values(arrays, func):\n \"\"\"Concatenated result of applying func to a list of arrays.\n\n func should be cupy.min, cupy.max or cupy.median\n \"\"\"\n dtype = arrays[0].dtype\n return cupy.concatenate(\n [\n func(a, keepdims=True)\n if a.size != 0 else cupy.asarray([0], dtype=dtype)\n for a in arrays\n ]\n )\n\n\ndef _get_positions(arrays, position_arrays, arg_func):\n \"\"\"Concatenated positions from applying arg_func to arrays.\n\n arg_func should be cupy.argmin or cupy.argmax\n \"\"\"\n return cupy.concatenate(\n [\n pos[arg_func(a, keepdims=True)]\n if a.size != 0 else cupy.asarray([0], dtype=int)\n for pos, a in zip(position_arrays, arrays)\n ]\n )\n\n\ndef _select_via_looping(input, labels, idxs, positions, find_min,\n find_min_positions, find_max, find_max_positions,\n find_median):\n \"\"\"Internal helper routine for _select.\n\n With relatively few labels it is faster to call this function rather than\n using the implementation based on cupy.lexsort.\n \"\"\"\n find_positions = find_min_positions or find_max_positions\n\n # extract labeled regions into separate arrays\n arrays = []\n position_arrays = []\n for i in idxs:\n label_idx = labels == i\n arrays.append(input[label_idx])\n if find_positions:\n position_arrays.append(positions[label_idx])\n\n result = []\n # the order below matches the order expected by cupy.ndimage.extrema\n if find_min:\n result += [_get_values(arrays, cupy.min)]\n if find_min_positions:\n result += [_get_positions(arrays, position_arrays, cupy.argmin)]\n if find_max:\n result += [_get_values(arrays, cupy.max)]\n if find_max_positions:\n result += [_get_positions(arrays, position_arrays, cupy.argmax)]\n if find_median:\n result += [_get_values(arrays, cupy.median)]\n return result\n\n\ndef _select(input, labels=None, index=None, find_min=False, find_max=False,\n find_min_positions=False, find_max_positions=False,\n find_median=False):\n \"\"\"Return one or more of: min, max, min position, max position, median.\n\n If neither `labels` or `index` is provided, these are the global values\n in `input`. If `index` is None, but `labels` is provided, a global value\n across all non-zero labels is given. When both `labels` and `index` are\n provided, lists of values are provided for each labeled region specified\n in `index`. See further details in :func:`cupyx.scipy.ndimage.minimum`,\n etc.\n\n Used by minimum, maximum, minimum_position, maximum_position, extrema.\n \"\"\"\n find_positions = find_min_positions or find_max_positions\n positions = None\n if find_positions:\n positions = cupy.arange(input.size).reshape(input.shape)\n\n def single_group(vals, positions):\n result = []\n if find_min:\n result += [vals.min()]\n if find_min_positions:\n result += [positions[vals == vals.min()][0]]\n if find_max:\n result += [vals.max()]\n if find_max_positions:\n result += [positions[vals == vals.max()][0]]\n if find_median:\n result += [cupy.median(vals)]\n return result\n\n if labels is None:\n return single_group(input, positions)\n\n # ensure input and labels match sizes\n input, labels = cupy.broadcast_arrays(input, labels)\n\n if index is None:\n mask = labels > 0\n masked_positions = None\n if find_positions:\n masked_positions = positions[mask]\n return single_group(input[mask], masked_positions)\n\n if cupy.isscalar(index):\n mask = labels == index\n masked_positions = None\n if find_positions:\n masked_positions = positions[mask]\n return single_group(input[mask], masked_positions)\n\n index = cupy.asarray(index)\n\n safe_int = _safely_castable_to_int(labels.dtype)\n min_label = labels.min()\n max_label = labels.max()\n\n # Remap labels to unique integers if necessary, or if the largest label is\n # larger than the number of values.\n if (not safe_int or min_label < 0 or max_label > labels.size):\n # Remap labels, and indexes\n unique_labels, labels = cupy.unique(labels, return_inverse=True)\n idxs = cupy.searchsorted(unique_labels, index)\n\n # Make all of idxs valid\n idxs[idxs >= unique_labels.size] = 0\n found = unique_labels[idxs] == index\n else:\n # Labels are an integer type, and there aren't too many\n idxs = cupy.asanyarray(index, int).copy()\n found = (idxs >= 0) & (idxs <= max_label)\n\n idxs[~found] = max_label + 1\n\n input = input.ravel()\n labels = labels.ravel()\n if find_positions:\n positions = positions.ravel()\n\n using_cub = _core._accelerator.ACCELERATOR_CUB in \\\n cupy._core.get_routine_accelerators()\n\n if using_cub:\n # Cutoff values below were determined empirically for relatively large\n # input arrays.\n if find_positions or find_median:\n n_label_cutoff = 15\n else:\n n_label_cutoff = 30\n else:\n n_label_cutoff = 0\n\n if n_label_cutoff and len(idxs) <= n_label_cutoff:\n return _select_via_looping(\n input, labels, idxs, positions, find_min, find_min_positions,\n find_max, find_max_positions, find_median\n )\n\n order = cupy.lexsort(cupy.stack((input.ravel(), labels.ravel())))\n input = input[order]\n labels = labels[order]\n if find_positions:\n positions = positions[order]\n\n # Determine indices corresponding to the min or max value for each label\n label_change_index = cupy.searchsorted(labels,\n cupy.arange(1, max_label + 2))\n if find_min or find_min_positions or find_median:\n # index corresponding to the minimum value at each label\n min_index = label_change_index[:-1]\n if find_max or find_max_positions or find_median:\n # index corresponding to the maximum value at each label\n max_index = label_change_index[1:] - 1\n\n result = []\n # the order below matches the order expected by cupy.ndimage.extrema\n if find_min:\n mins = cupy.zeros(int(labels.max()) + 2, input.dtype)\n mins[labels[min_index]] = input[min_index]\n result += [mins[idxs]]\n if find_min_positions:\n minpos = cupy.zeros(labels.max().item() + 2, int)\n minpos[labels[min_index]] = positions[min_index]\n result += [minpos[idxs]]\n if find_max:\n maxs = cupy.zeros(int(labels.max()) + 2, input.dtype)\n maxs[labels[max_index]] = input[max_index]\n result += [maxs[idxs]]\n if find_max_positions:\n maxpos = cupy.zeros(labels.max().item() + 2, int)\n maxpos[labels[max_index]] = positions[max_index]\n result += [maxpos[idxs]]\n if find_median:\n locs = cupy.arange(len(labels))\n lo = cupy.zeros(int(labels.max()) + 2, int)\n lo[labels[min_index]] = locs[min_index]\n hi = cupy.zeros(int(labels.max()) + 2, int)\n hi[labels[max_index]] = locs[max_index]\n lo = lo[idxs]\n hi = hi[idxs]\n # lo is an index to the lowest value in input for each label,\n # hi is an index to the largest value.\n # move them to be either the same ((hi - lo) % 2 == 0) or next\n # to each other ((hi - lo) % 2 == 1), then average.\n step = (hi - lo) // 2\n lo += step\n hi -= step\n if input.dtype.kind in 'iub':\n # fix for https://github.com/scipy/scipy/issues/12836\n result += [(input[lo].astype(float) + input[hi].astype(float)) /\n 2.0]\n else:\n result += [(input[lo] + input[hi]) / 2.0]\n\n return result\n\n\ndef minimum(input, labels=None, index=None):\n \"\"\"Calculate the minimum of the values of an array over labeled regions.\n\n Args:\n input (cupy.ndarray):\n Array of values. For each region specified by `labels`, the\n minimal values of `input` over the region is computed.\n labels (cupy.ndarray, optional): An array of integers marking different\n regions over which the minimum value of `input` is to be computed.\n `labels` must have the same shape as `input`. If `labels` is not\n specified, the minimum over the whole array is returned.\n index (array_like, optional): A list of region labels that are taken\n into account for computing the minima. If `index` is None, the\n minimum over all elements where `labels` is non-zero is returned.\n\n Returns:\n cupy.ndarray: Array of minima of `input` over the regions\n determined by `labels` and whose index is in `index`. If `index` or\n `labels` are not specified, a 0-dimensional cupy.ndarray is\n returned: the minimal value of `input` if `labels` is None,\n and the minimal value of elements where `labels` is greater than\n zero if `index` is None.\n\n .. seealso:: :func:`scipy.ndimage.minimum`\n \"\"\"\n return _select(input, labels, index, find_min=True)[0]\n\n\ndef maximum(input, labels=None, index=None):\n \"\"\"Calculate the maximum of the values of an array over labeled regions.\n\n Args:\n input (cupy.ndarray):\n Array of values. For each region specified by `labels`, the\n maximal values of `input` over the region is computed.\n labels (cupy.ndarray, optional): An array of integers marking different\n regions over which the maximum value of `input` is to be computed.\n `labels` must have the same shape as `input`. If `labels` is not\n specified, the maximum over the whole array is returned.\n index (array_like, optional): A list of region labels that are taken\n into account for computing the maxima. If `index` is None, the\n maximum over all elements where `labels` is non-zero is returned.\n\n Returns:\n cupy.ndarray: Array of maxima of `input` over the regions\n determaxed by `labels` and whose index is in `index`. If `index` or\n `labels` are not specified, a 0-dimensional cupy.ndarray is\n returned: the maximal value of `input` if `labels` is None,\n and the maximal value of elements where `labels` is greater than\n zero if `index` is None.\n\n .. seealso:: :func:`scipy.ndimage.maximum`\n \"\"\"\n return _select(input, labels, index, find_max=True)[0]\n\n\ndef median(input, labels=None, index=None):\n \"\"\"Calculate the median of the values of an array over labeled regions.\n\n Args:\n input (cupy.ndarray):\n Array of values. For each region specified by `labels`, the\n median values of `input` over the region is computed.\n labels (cupy.ndarray, optional): An array of integers marking different\n regions over which the median value of `input` is to be computed.\n `labels` must have the same shape as `input`. If `labels` is not\n specified, the median over the whole array is returned.\n index (array_like, optional): A list of region labels that are taken\n into account for computing the medians. If `index` is None, the\n median over all elements where `labels` is non-zero is returned.\n\n Returns:\n cupy.ndarray: Array of medians of `input` over the regions\n determined by `labels` and whose index is in `index`. If `index` or\n `labels` are not specified, a 0-dimensional cupy.ndarray is\n returned: the median value of `input` if `labels` is None,\n and the median value of elements where `labels` is greater than\n zero if `index` is None.\n\n .. seealso:: :func:`scipy.ndimage.median`\n \"\"\"\n return _select(input, labels, index, find_median=True)[0]\n\n\ndef minimum_position(input, labels=None, index=None):\n \"\"\"Find the positions of the minimums of the values of an array at labels.\n\n For each region specified by `labels`, the position of the minimum\n value of `input` within the region is returned.\n\n Args:\n input (cupy.ndarray):\n Array of values. For each region specified by `labels`, the\n minimal values of `input` over the region is computed.\n labels (cupy.ndarray, optional): An array of integers marking different\n regions over which the position of the minimum value of `input` is\n to be computed. `labels` must have the same shape as `input`. If\n `labels` is not specified, the location of the first minimum over\n the whole array is returned.\n\n The `labels` argument only works when `index` is specified.\n index (array_like, optional): A list of region labels that are taken\n into account for finding the location of the minima. If `index` is\n None, the ``first`` minimum over all elements where `labels` is\n non-zero is returned.\n\n The `index` argument only works when `labels` is specified.\n\n Returns:\n Tuple of ints or list of tuples of ints that specify the location of\n minima of `input` over the regions determined by `labels` and whose\n index is in `index`.\n\n If `index` or `labels` are not specified, a tuple of ints is returned\n specifying the location of the first minimal value of `input`.\n\n .. note::\n When `input` has multiple identical minima within a labeled region,\n the coordinates returned are not guaranteed to match those returned by\n SciPy.\n\n .. seealso:: :func:`scipy.ndimage.minimum_position`\n \"\"\"\n dims = numpy.asarray(input.shape)\n # see numpy.unravel_index to understand this line.\n dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]\n\n result = _select(input, labels, index, find_min_positions=True)[0]\n\n # have to transfer result back to the CPU to return index tuples\n if result.ndim == 0:\n result = int(result) # synchronize\n else:\n result = cupy.asnumpy(result) # synchronize\n\n if cupy.isscalar(result):\n return tuple((result // dim_prod) % dims)\n\n return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]\n\n\ndef maximum_position(input, labels=None, index=None):\n \"\"\"Find the positions of the maximums of the values of an array at labels.\n\n For each region specified by `labels`, the position of the maximum\n value of `input` within the region is returned.\n\n Args:\n input (cupy.ndarray):\n Array of values. For each region specified by `labels`, the\n maximal values of `input` over the region is computed.\n labels (cupy.ndarray, optional): An array of integers marking different\n regions over which the position of the maximum value of `input` is\n to be computed. `labels` must have the same shape as `input`. If\n `labels` is not specified, the location of the first maximum over\n the whole array is returned.\n\n The `labels` argument only works when `index` is specified.\n index (array_like, optional): A list of region labels that are taken\n into account for finding the location of the maxima. If `index` is\n None, the ``first`` maximum over all elements where `labels` is\n non-zero is returned.\n\n The `index` argument only works when `labels` is specified.\n\n Returns:\n Tuple of ints or list of tuples of ints that specify the location of\n maxima of `input` over the regions determaxed by `labels` and whose\n index is in `index`.\n\n If `index` or `labels` are not specified, a tuple of ints is returned\n specifying the location of the first maximal value of `input`.\n\n .. note::\n When `input` has multiple identical maxima within a labeled region,\n the coordinates returned are not guaranteed to match those returned by\n SciPy.\n\n .. seealso:: :func:`scipy.ndimage.maximum_position`\n \"\"\"\n dims = numpy.asarray(input.shape)\n # see numpy.unravel_index to understand this line.\n dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]\n\n result = _select(input, labels, index, find_max_positions=True)[0]\n\n # have to transfer result back to the CPU to return index tuples\n if result.ndim == 0:\n result = int(result)\n else:\n result = cupy.asnumpy(result)\n\n if cupy.isscalar(result):\n return tuple((result // dim_prod) % dims)\n\n return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims]\n\n\ndef extrema(input, labels=None, index=None):\n \"\"\"Calculate the minimums and maximums of the values of an array at labels,\n along with their positions.\n\n Args:\n input (cupy.ndarray): N-D image data to process.\n labels (cupy.ndarray, optional): Labels of features in input. If not\n None, must be same shape as `input`.\n index (int or sequence of ints, optional): Labels to include in output.\n If None (default), all values where non-zero `labels` are used.\n\n Returns:\n A tuple that contains the following values.\n\n **minimums (cupy.ndarray)**: Values of minimums in each feature.\n\n **maximums (cupy.ndarray)**: Values of maximums in each feature.\n\n **min_positions (tuple or list of tuples)**: Each tuple gives the N-D\n coordinates of the corresponding minimum.\n\n **max_positions (tuple or list of tuples)**: Each tuple gives the N-D\n coordinates of the corresponding maximum.\n\n .. seealso:: :func:`scipy.ndimage.extrema`\n \"\"\"\n dims = numpy.array(input.shape)\n # see numpy.unravel_index to understand this line.\n dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1]\n\n minimums, min_positions, maximums, max_positions = _select(\n input,\n labels,\n index,\n find_min=True,\n find_max=True,\n find_min_positions=True,\n find_max_positions=True,\n )\n\n if min_positions.ndim == 0:\n # scalar output case\n min_positions = min_positions.item()\n max_positions = max_positions.item()\n return (\n minimums,\n maximums,\n tuple((min_positions // dim_prod) % dims),\n tuple((max_positions // dim_prod) % dims),\n )\n\n # convert indexes to tuples on the host\n min_positions = cupy.asnumpy(min_positions)\n max_positions = cupy.asnumpy(max_positions)\n min_positions = [\n tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims\n ]\n max_positions = [\n tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims\n ]\n\n return minimums, maximums, min_positions, max_positions\n\n\ndef center_of_mass(input, labels=None, index=None):\n \"\"\"\n Calculate the center of mass of the values of an array at labels.\n\n Args:\n input (cupy.ndarray): Data from which to calculate center-of-mass. The\n masses can either be positive or negative.\n labels (cupy.ndarray, optional): Labels for objects in `input`, as\n enerated by `ndimage.label`. Only used with `index`. Dimensions\n must be the same as `input`.\n index (int or sequence of ints, optional): Labels for which to\n calculate centers-of-mass. If not specified, all labels greater\n than zero are used. Only used with `labels`.\n\n Returns:\n tuple or list of tuples: Coordinates of centers-of-mass.\n\n .. seealso:: :func:`scipy.ndimage.center_of_mass`\n \"\"\"\n normalizer = sum(input, labels, index)\n grids = cupy.ogrid[[slice(0, i) for i in input.shape]]\n\n results = [\n sum(input * grids[dir].astype(float), labels, index) / normalizer\n for dir in range(input.ndim)\n ]\n\n # have to transfer 0-dim array back to CPU?\n # may want to modify to avoid this\n is_0dim_array = (\n isinstance(results[0], cupy.ndarray) and results[0].ndim == 0\n )\n if is_0dim_array:\n # tuple of 0-dimensional cupy arrays\n return tuple(res for res in results)\n # list of cupy coordinate arrays\n return [v for v in cupy.stack(results, axis=-1)]\n\n\ndef labeled_comprehension(\n input, labels, index, func, out_dtype, default, pass_positions=False\n):\n \"\"\"Array resulting from applying ``func`` to each labeled region.\n\n Roughly equivalent to [func(input[labels == i]) for i in index].\n\n Sequentially applies an arbitrary function (that works on array_like input)\n to subsets of an N-D image array specified by `labels` and `index`.\n The option exists to provide the function with positional parameters as the\n second argument.\n\n Args:\n input (cupy.ndarray): Data from which to select `labels` to process.\n labels (cupy.ndarray or None): Labels to objects in `input`. If not\n None, array must be same shape as `input`. If None, `func` is\n applied to raveled `input`.\n index (int, sequence of ints or None): Subset of `labels` to which to\n apply `func`. If a scalar, a single value is returned. If None,\n `func` is applied to all non-zero values of `labels`.\n func (callable): Python function to apply to `labels` from `input`.\n out_dtype (dtype): Dtype to use for `result`.\n default (int, float or None): Default return value when a element of\n `index` does not exist in `labels`.\n pass_positions (bool, optional): If True, pass linear indices to `func`\n as a second argument.\n\n Returns:\n cupy.ndarray: Result of applying `func` to each of `labels` to `input`\n in `index`.\n\n .. seealso:: :func:`scipy.ndimage.labeled_comprehension`\n \"\"\"\n as_scalar = cupy.isscalar(index)\n input = cupy.asarray(input)\n\n if pass_positions:\n positions = cupy.arange(input.size).reshape(input.shape)\n\n if labels is None:\n if index is not None:\n raise ValueError('index without defined labels')\n if not pass_positions:\n return func(input.ravel())\n else:\n return func(input.ravel(), positions.ravel())\n\n try:\n input, labels = cupy.broadcast_arrays(input, labels)\n except ValueError:\n raise ValueError(\n 'input and labels must have the same shape '\n '(excepting dimensions with width 1)'\n )\n\n if index is None:\n if not pass_positions:\n return func(input[labels > 0])\n else:\n return func(input[labels > 0], positions[labels > 0])\n\n index = cupy.atleast_1d(index)\n if cupy.any(index.astype(labels.dtype).astype(index.dtype) != index):\n raise ValueError(\n 'Cannot convert index values from <%s> to <%s> '\n '(labels.dtype) without loss of precision'\n % (index.dtype, labels.dtype)\n )\n\n index = index.astype(labels.dtype)\n\n # optimization: find min/max in index, and select those parts of labels,\n # input, and positions\n lo = index.min()\n hi = index.max()\n mask = (labels >= lo) & (labels <= hi)\n\n # this also ravels the arrays\n labels = labels[mask]\n input = input[mask]\n if pass_positions:\n positions = positions[mask]\n\n # sort everything by labels\n label_order = labels.argsort()\n labels = labels[label_order]\n input = input[label_order]\n if pass_positions:\n positions = positions[label_order]\n\n index_order = index.argsort()\n sorted_index = index[index_order]\n\n def do_map(inputs, output):\n \"\"\"labels must be sorted\"\"\"\n nidx = sorted_index.size\n\n # Find boundaries for each stretch of constant labels\n # This could be faster, but we already paid N log N to sort labels.\n lo = cupy.searchsorted(labels, sorted_index, side='left')\n hi = cupy.searchsorted(labels, sorted_index, side='right')\n\n for i, low, high in zip(range(nidx), lo, hi):\n if low == high:\n continue\n output[i] = func(*[inp[low:high] for inp in inputs])\n\n if out_dtype == object:\n temp = {i: default for i in range(index.size)}\n else:\n temp = cupy.empty(index.shape, out_dtype)\n if default is None and temp.dtype.kind in 'fc':\n default = numpy.nan # match NumPy floating-point None behavior\n temp[:] = default\n\n if not pass_positions:\n do_map([input], temp)\n else:\n do_map([input, positions], temp)\n\n if out_dtype == object:\n # use a list of arrays since object arrays are not supported\n index_order = cupy.asnumpy(index_order)\n output = [temp[i] for i in index_order.argsort()]\n else:\n output = cupy.zeros(index.shape, out_dtype)\n output[cupy.asnumpy(index_order)] = temp\n if as_scalar:\n output = output[0]\n return output\n\n\ndef histogram(input, min, max, bins, labels=None, index=None):\n \"\"\"Calculate the histogram of the values of an array, optionally at labels.\n\n Histogram calculates the frequency of values in an array within bins\n determined by `min`, `max`, and `bins`. The `labels` and `index`\n keywords can limit the scope of the histogram to specified sub-regions\n within the array.\n\n Args:\n input (cupy.ndarray): Data for which to calculate histogram.\n min (int): Minimum values of range of histogram bins.\n max (int): Maximum values of range of histogram bins.\n bins (int): Number of bins.\n labels (cupy.ndarray, optional): Labels for objects in `input`. If not\n None, must be same shape as `input`.\n index (int or sequence of ints, optional): Label or labels for which to\n calculate histogram. If None, all values where label is greater\n than zero are used.\n\n Returns:\n cupy.ndarray: Histogram counts.\n\n .. seealso:: :func:`scipy.ndimage.histogram`\n \"\"\"\n _bins = cupy.linspace(min, max, bins + 1)\n\n def _hist(vals):\n return cupy.histogram(vals, _bins)[0]\n\n return labeled_comprehension(\n input, labels, index, _hist, object, None, pass_positions=False\n )\n", "import operator\n\nimport numpy\nimport pytest\n\nimport cupy\nfrom cupy import testing\n\n\nclass TestArrayElementwiseOp:\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(rtol=1e-6, accept_error=TypeError)\n def check_array_scalar_op(self, op, xp, x_type, y_type, swap=False,\n no_bool=False, no_complex=False):\n x_dtype = numpy.dtype(x_type)\n y_dtype = numpy.dtype(y_type)\n if no_bool and x_dtype == '?' and y_dtype == '?':\n return xp.array(True)\n if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):\n return xp.array(True)\n a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)\n if swap:\n return op(y_type(3), a)\n else:\n return op(a, y_type(3))\n\n def test_add_scalar(self):\n self.check_array_scalar_op(operator.add)\n\n def test_radd_scalar(self):\n self.check_array_scalar_op(operator.add, swap=True)\n\n def test_iadd_scalar(self):\n self.check_array_scalar_op(operator.iadd)\n\n def test_sub_scalar(self):\n self.check_array_scalar_op(operator.sub, no_bool=True)\n\n def test_rsub_scalar(self):\n self.check_array_scalar_op(operator.sub, swap=True, no_bool=True)\n\n def test_isub_scalar(self):\n self.check_array_scalar_op(operator.isub, no_bool=True)\n\n def test_mul_scalar(self):\n self.check_array_scalar_op(operator.mul)\n\n def test_rmul_scalar(self):\n self.check_array_scalar_op(operator.mul, swap=True)\n\n def test_imul_scalar(self):\n self.check_array_scalar_op(operator.imul)\n\n def test_truediv_scalar(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_scalar_op(operator.truediv)\n\n def test_rtruediv_scalar(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_scalar_op(operator.truediv, swap=True)\n\n def test_itruediv_scalar(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_scalar_op(operator.itruediv)\n\n def test_floordiv_scalar(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_scalar_op(operator.floordiv, no_complex=True)\n\n def test_rfloordiv_scalar(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_scalar_op(operator.floordiv, swap=True,\n no_complex=True)\n\n def test_ifloordiv_scalar(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_scalar_op(operator.ifloordiv, no_complex=True)\n\n def test_pow_scalar(self):\n self.check_array_scalar_op(operator.pow)\n\n def test_rpow_scalar(self):\n self.check_array_scalar_op(operator.pow, swap=True)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)\n def check_ipow_scalar(self, xp, x_type, y_type):\n a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)\n return operator.ipow(a, y_type(3))\n\n def test_ipow_scalar(self):\n self.check_ipow_scalar()\n\n def test_divmod0_scalar(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_scalar_op(lambda x, y: divmod(x, y)[0],\n no_complex=True)\n\n def test_divmod1_scalar(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_scalar_op(lambda x, y: divmod(x, y)[1],\n no_complex=True)\n\n def test_rdivmod0_scalar(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_scalar_op(lambda x, y: divmod(x, y)[0], swap=True,\n no_complex=True)\n\n def test_rdivmod1_scalar(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_scalar_op(lambda x, y: divmod(x, y)[1], swap=True,\n no_complex=True)\n\n def test_lt_scalar(self):\n self.check_array_scalar_op(operator.lt, no_complex=False)\n\n def test_le_scalar(self):\n self.check_array_scalar_op(operator.le, no_complex=False)\n\n def test_gt_scalar(self):\n self.check_array_scalar_op(operator.gt, no_complex=False)\n\n def test_ge_scalar(self):\n self.check_array_scalar_op(operator.ge, no_complex=False)\n\n def test_eq_scalar(self):\n self.check_array_scalar_op(operator.eq)\n\n def test_ne_scalar(self):\n self.check_array_scalar_op(operator.ne)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(accept_error=TypeError)\n def check_array_array_op(self, op, xp, x_type, y_type,\n no_bool=False, no_complex=False):\n x_dtype = numpy.dtype(x_type)\n y_dtype = numpy.dtype(y_type)\n if no_bool and x_dtype == '?' and y_dtype == '?':\n return xp.array(True)\n if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):\n return xp.array(True)\n a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)\n b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)\n return op(a, b)\n\n def test_add_array(self):\n self.check_array_array_op(operator.add)\n\n def test_iadd_array(self):\n self.check_array_array_op(operator.iadd)\n\n def test_sub_array(self):\n self.check_array_array_op(operator.sub, no_bool=True)\n\n def test_isub_array(self):\n self.check_array_array_op(operator.isub, no_bool=True)\n\n def test_mul_array(self):\n self.check_array_array_op(operator.mul)\n\n def test_imul_array(self):\n self.check_array_array_op(operator.imul)\n\n def test_truediv_array(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_array_op(operator.truediv)\n\n def test_itruediv_array(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_array_op(operator.itruediv)\n\n def test_floordiv_array(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_array_op(operator.floordiv, no_complex=True)\n\n def test_ifloordiv_array(self):\n if '1.16.1' <= numpy.lib.NumpyVersion(numpy.__version__) < '1.18.0':\n self.skipTest(\"NumPy Issue #12927\")\n with numpy.errstate(divide='ignore'):\n self.check_array_array_op(operator.ifloordiv, no_complex=True)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-6, accept_error=TypeError)\n def check_pow_array(self, xp, x_type, y_type):\n a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)\n b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)\n return operator.pow(a, b)\n\n def test_pow_array(self):\n # There are some precission issues in HIP that prevent\n # checking with atol=0\n if cupy.cuda.runtime.is_hip:\n self.check_pow_array()\n else:\n self.check_array_array_op(operator.pow)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)\n def check_ipow_array(self, xp, x_type, y_type):\n a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)\n b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)\n return operator.ipow(a, b)\n\n def test_ipow_array(self):\n self.check_ipow_array()\n\n def test_divmod0_array(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_array_op(lambda x, y: divmod(x, y)[0])\n\n def test_divmod1_array(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_array_op(lambda x, y: divmod(x, y)[1])\n\n def test_lt_array(self):\n self.check_array_array_op(operator.lt, no_complex=True)\n\n def test_le_array(self):\n self.check_array_array_op(operator.le, no_complex=True)\n\n def test_gt_array(self):\n self.check_array_array_op(operator.gt, no_complex=True)\n\n def test_ge_array(self):\n self.check_array_array_op(operator.ge, no_complex=True)\n\n def test_eq_array(self):\n self.check_array_array_op(operator.eq)\n\n def test_ne_array(self):\n self.check_array_array_op(operator.ne)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(accept_error=TypeError)\n def check_array_broadcasted_op(self, op, xp, x_type, y_type,\n no_bool=False, no_complex=False):\n x_dtype = numpy.dtype(x_type)\n y_dtype = numpy.dtype(y_type)\n if no_bool and x_dtype == '?' and y_dtype == '?':\n return xp.array(True)\n if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):\n return xp.array(True)\n a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)\n b = xp.array([[1], [2]], y_type)\n return op(a, b)\n\n def test_broadcasted_add(self):\n self.check_array_broadcasted_op(operator.add)\n\n def test_broadcasted_iadd(self):\n self.check_array_broadcasted_op(operator.iadd)\n\n def test_broadcasted_sub(self):\n # TODO(unno): sub for boolean array is deprecated in numpy>=1.13\n self.check_array_broadcasted_op(operator.sub, no_bool=True)\n\n def test_broadcasted_isub(self):\n # TODO(unno): sub for boolean array is deprecated in numpy>=1.13\n self.check_array_broadcasted_op(operator.isub, no_bool=True)\n\n def test_broadcasted_mul(self):\n self.check_array_broadcasted_op(operator.mul)\n\n def test_broadcasted_imul(self):\n self.check_array_broadcasted_op(operator.imul)\n\n def test_broadcasted_truediv(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_broadcasted_op(operator.truediv)\n\n def test_broadcasted_itruediv(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_broadcasted_op(operator.itruediv)\n\n def test_broadcasted_floordiv(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_broadcasted_op(operator.floordiv, no_complex=True)\n\n def test_broadcasted_ifloordiv(self):\n if '1.16.1' <= numpy.lib.NumpyVersion(numpy.__version__) < '1.18.0':\n self.skipTest(\"NumPy Issue #12927\")\n with numpy.errstate(divide='ignore'):\n self.check_array_broadcasted_op(operator.ifloordiv,\n no_complex=True)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-6, accept_error=TypeError)\n def check_broadcasted_pow(self, xp, x_type, y_type):\n a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)\n b = xp.array([[1], [2]], y_type)\n return operator.pow(a, b)\n\n def test_broadcasted_pow(self):\n # There are some precission issues in HIP that prevent\n # checking with atol=0\n if cupy.cuda.runtime.is_hip:\n self.check_broadcasted_pow()\n else:\n self.check_array_broadcasted_op(operator.pow)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)\n def check_broadcasted_ipow(self, xp, x_type, y_type):\n a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)\n b = xp.array([[1], [2]], y_type)\n return operator.ipow(a, b)\n\n def test_broadcasted_ipow(self):\n self.check_broadcasted_ipow()\n\n def test_broadcasted_divmod0(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_broadcasted_op(lambda x, y: divmod(x, y)[0],\n no_complex=True)\n\n def test_broadcasted_divmod1(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_broadcasted_op(lambda x, y: divmod(x, y)[1],\n no_complex=True)\n\n def test_broadcasted_lt(self):\n self.check_array_broadcasted_op(operator.lt, no_complex=True)\n\n def test_broadcasted_le(self):\n self.check_array_broadcasted_op(operator.le, no_complex=True)\n\n def test_broadcasted_gt(self):\n self.check_array_broadcasted_op(operator.gt, no_complex=True)\n\n def test_broadcasted_ge(self):\n self.check_array_broadcasted_op(operator.ge, no_complex=True)\n\n def test_broadcasted_eq(self):\n self.check_array_broadcasted_op(operator.eq)\n\n def test_broadcasted_ne(self):\n self.check_array_broadcasted_op(operator.ne)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(rtol=1e-6)\n def check_array_doubly_broadcasted_op(self, op, xp, x_type, y_type,\n no_bool=False, no_complex=False):\n x_dtype = numpy.dtype(x_type)\n y_dtype = numpy.dtype(y_type)\n if no_bool and x_dtype == '?' and y_dtype == '?':\n return xp.array(True)\n if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):\n return xp.array(True)\n a = xp.array([[[1, 2, 3]], [[4, 5, 6]]], x_type)\n b = xp.array([[1], [2], [3]], y_type)\n return op(a, b)\n\n def test_doubly_broadcasted_add(self):\n self.check_array_doubly_broadcasted_op(operator.add)\n\n def test_doubly_broadcasted_sub(self):\n self.check_array_doubly_broadcasted_op(operator.sub, no_bool=True)\n\n def test_doubly_broadcasted_mul(self):\n self.check_array_doubly_broadcasted_op(operator.mul)\n\n def test_doubly_broadcasted_truediv(self):\n with numpy.errstate(divide='ignore', invalid='ignore'):\n self.check_array_doubly_broadcasted_op(operator.truediv)\n\n def test_doubly_broadcasted_floordiv(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_doubly_broadcasted_op(operator.floordiv,\n no_complex=True)\n\n def test_doubly_broadcasted_pow(self):\n self.check_array_doubly_broadcasted_op(operator.pow)\n\n def test_doubly_broadcasted_divmod0(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_doubly_broadcasted_op(\n lambda x, y: divmod(x, y)[0],\n no_complex=True)\n\n def test_doubly_broadcasted_divmod1(self):\n with numpy.errstate(divide='ignore'):\n self.check_array_doubly_broadcasted_op(\n lambda x, y: divmod(x, y)[1],\n no_complex=True)\n\n def test_doubly_broadcasted_lt(self):\n self.check_array_doubly_broadcasted_op(operator.lt, no_complex=True)\n\n def test_doubly_broadcasted_le(self):\n self.check_array_doubly_broadcasted_op(operator.le, no_complex=True)\n\n def test_doubly_broadcasted_gt(self):\n self.check_array_doubly_broadcasted_op(operator.gt, no_complex=True)\n\n def test_doubly_broadcasted_ge(self):\n self.check_array_doubly_broadcasted_op(operator.ge, no_complex=True)\n\n def test_doubly_broadcasted_eq(self):\n self.check_array_doubly_broadcasted_op(operator.eq)\n\n def test_doubly_broadcasted_ne(self):\n self.check_array_doubly_broadcasted_op(operator.ne)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose()\n def check_array_reversed_op(self, op, xp, x_type, y_type, no_bool=False):\n if no_bool and x_type == numpy.bool_ and y_type == numpy.bool_:\n return xp.array(True)\n a = xp.array([1, 2, 3, 4, 5], x_type)\n b = xp.array([1, 2, 3, 4, 5], y_type)\n return op(a, b[::-1])\n\n def test_array_reversed_add(self):\n self.check_array_reversed_op(operator.add)\n\n def test_array_reversed_sub(self):\n self.check_array_reversed_op(operator.sub, no_bool=True)\n\n def test_array_reversed_mul(self):\n self.check_array_reversed_op(operator.mul)\n\n @testing.for_all_dtypes(no_bool=True)\n def check_typecast(self, val, dtype):\n operators = [\n operator.add, operator.sub, operator.mul, operator.truediv]\n\n for op in operators:\n with numpy.errstate(divide='ignore', invalid='ignore'):\n a = op(val, (testing.shaped_arange((5,), numpy, dtype) - 2))\n b = op(val, (testing.shaped_arange((5,), cupy, dtype) - 2))\n assert a.dtype == b.dtype\n\n def test_typecast_bool1(self):\n self.check_typecast(True)\n\n def test_typecast_bool2(self):\n self.check_typecast(False)\n\n def test_typecast_int1(self):\n self.check_typecast(0)\n\n def test_typecast_int2(self):\n self.check_typecast(-127)\n\n def test_typecast_int3(self):\n self.check_typecast(255)\n\n def test_typecast_int4(self):\n self.check_typecast(-32768)\n\n def test_typecast_int5(self):\n self.check_typecast(65535)\n\n def test_typecast_int6(self):\n self.check_typecast(-2147483648)\n\n def test_typecast_int7(self):\n self.check_typecast(4294967295)\n\n def test_typecast_float1(self):\n self.check_typecast(0.0)\n\n def test_typecast_float2(self):\n self.check_typecast(100000.0)\n\n # Skip float16 because of NumPy #19514\n @testing.for_all_dtypes(name='x_type', no_float16=True)\n @testing.numpy_cupy_allclose()\n def check_array_boolarray_op(self, op, xp, x_type):\n a = xp.array([[2, 7, 1], [8, 2, 8]], x_type)\n # Cast from np.bool8 array should not read bytes\n b = xp.array([[3, 1, 4], [-1, -5, -9]], numpy.int8).view(bool)\n return op(a, b)\n\n def test_add_array_boolarray(self):\n self.check_array_boolarray_op(operator.add)\n\n def test_iadd_array_boolarray(self):\n self.check_array_boolarray_op(operator.iadd)\n\n\nclass TestArrayIntElementwiseOp:\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(accept_error=TypeError)\n def check_array_scalar_op(self, op, xp, x_type, y_type, swap=False):\n a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)\n if swap:\n return op(y_type(2), a)\n else:\n return op(a, y_type(2))\n\n def test_lshift_scalar(self):\n self.check_array_scalar_op(operator.lshift)\n\n def test_rlshift_scalar(self):\n self.check_array_scalar_op(operator.lshift, swap=True)\n\n def test_rshift_scalar(self):\n self.check_array_scalar_op(operator.rshift)\n\n def test_rrshift_scalar(self):\n self.check_array_scalar_op(operator.rshift, swap=True)\n\n def test_and_scalar(self):\n self.check_array_scalar_op(operator.and_)\n\n def test_rand_scalar(self):\n self.check_array_scalar_op(operator.and_, swap=True)\n\n def test_or_scalar(self):\n self.check_array_scalar_op(operator.or_)\n\n def test_ror_scalar(self):\n self.check_array_scalar_op(operator.or_, swap=True)\n\n def test_xor_scalar(self):\n self.check_array_scalar_op(operator.xor)\n\n def test_rxor_scalar(self):\n self.check_array_scalar_op(operator.xor, swap=True)\n\n def test_mod_scalar(self):\n with numpy.errstate(divide='ignore', invalid='ignore'):\n self.check_array_scalar_op(operator.mod)\n\n def test_rmod_scalar(self):\n with numpy.errstate(divide='ignore', invalid='ignore'):\n self.check_array_scalar_op(operator.mod, swap=True)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(accept_error=TypeError)\n def check_array_scalarzero_op(self, op, xp, x_type, y_type, swap=False):\n a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)\n if swap:\n return op(y_type(0), a)\n else:\n return op(a, y_type(0))\n\n def test_lshift_scalarzero(self):\n self.check_array_scalarzero_op(operator.lshift)\n\n def test_rlshift_scalarzero(self):\n self.check_array_scalarzero_op(operator.lshift, swap=True)\n\n def test_rshift_scalarzero(self):\n self.check_array_scalarzero_op(operator.rshift)\n\n def test_rrshift_scalarzero(self):\n self.check_array_scalarzero_op(operator.rshift, swap=True)\n\n def test_and_scalarzero(self):\n self.check_array_scalarzero_op(operator.and_)\n\n def test_rand_scalarzero(self):\n self.check_array_scalarzero_op(operator.and_, swap=True)\n\n def test_or_scalarzero(self):\n self.check_array_scalarzero_op(operator.or_)\n\n def test_ror_scalarzero(self):\n self.check_array_scalarzero_op(operator.or_, swap=True)\n\n def test_xor_scalarzero(self):\n self.check_array_scalarzero_op(operator.xor)\n\n def test_rxor_scalarzero(self):\n self.check_array_scalarzero_op(operator.xor, swap=True)\n\n def test_mod_scalarzero(self):\n with numpy.errstate(divide='ignore', invalid='ignore'):\n self.check_array_scalarzero_op(operator.mod)\n\n def test_rmod_scalarzero(self):\n with numpy.errstate(divide='ignore', invalid='ignore'):\n self.check_array_scalarzero_op(operator.mod, swap=True)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(accept_error=TypeError)\n def check_array_array_op(self, op, xp, x_type, y_type):\n a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)\n b = xp.array([[0, 0, 1], [0, 1, 2]], dtype=y_type)\n return op(a, b)\n\n def test_lshift_array(self):\n self.check_array_array_op(operator.lshift)\n\n def test_ilshift_array(self):\n self.check_array_array_op(operator.ilshift)\n\n def test_rshift_array(self):\n self.check_array_array_op(operator.rshift)\n\n def test_irshift_array(self):\n self.check_array_array_op(operator.irshift)\n\n def test_and_array(self):\n self.check_array_array_op(operator.and_)\n\n def test_iand_array(self):\n self.check_array_array_op(operator.iand)\n\n def test_or_array(self):\n self.check_array_array_op(operator.or_)\n\n def test_ior_array(self):\n self.check_array_array_op(operator.ior)\n\n def test_xor_array(self):\n self.check_array_array_op(operator.xor)\n\n def test_ixor_array(self):\n self.check_array_array_op(operator.ixor)\n\n def test_mod_array(self):\n with numpy.errstate(divide='ignore', invalid='ignore'):\n self.check_array_array_op(operator.mod)\n\n def test_imod_array(self):\n with numpy.errstate(divide='ignore', invalid='ignore'):\n self.check_array_array_op(operator.imod)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(accept_error=TypeError)\n def check_array_broadcasted_op(self, op, xp, x_type, y_type):\n a = xp.array([[0, 1, 2], [1, 0, 2], [2, 1, 0]], dtype=x_type)\n b = xp.array([[0, 0, 1]], dtype=y_type)\n return op(a, b)\n\n def test_broadcasted_lshift(self):\n self.check_array_broadcasted_op(operator.lshift)\n\n def test_broadcasted_ilshift(self):\n self.check_array_broadcasted_op(operator.ilshift)\n\n def test_broadcasted_rshift(self):\n self.check_array_broadcasted_op(operator.rshift)\n\n def test_broadcasted_irshift(self):\n self.check_array_broadcasted_op(operator.irshift)\n\n def test_broadcasted_and(self):\n self.check_array_broadcasted_op(operator.and_)\n\n def test_broadcasted_iand(self):\n self.check_array_broadcasted_op(operator.iand)\n\n def test_broadcasted_or(self):\n self.check_array_broadcasted_op(operator.or_)\n\n def test_broadcasted_ior(self):\n self.check_array_broadcasted_op(operator.ior)\n\n def test_broadcasted_xor(self):\n self.check_array_broadcasted_op(operator.xor)\n\n def test_broadcasted_ixor(self):\n self.check_array_broadcasted_op(operator.ixor)\n\n def test_broadcasted_mod(self):\n with numpy.errstate(divide='ignore', invalid='ignore'):\n self.check_array_broadcasted_op(operator.mod)\n\n def test_broadcasted_imod(self):\n with numpy.errstate(divide='ignore', invalid='ignore'):\n self.check_array_broadcasted_op(operator.imod)\n\n @testing.for_all_dtypes_combination(names=['x_type', 'y_type'])\n @testing.numpy_cupy_allclose(accept_error=TypeError)\n def check_array_doubly_broadcasted_op(self, op, xp, x_type, y_type):\n a = xp.array([[[0, 1, 2]], [[1, 0, 2]]], dtype=x_type)\n b = xp.array([[0], [0], [1]], dtype=y_type)\n return op(a, b)\n\n def test_doubly_broadcasted_lshift(self):\n self.check_array_doubly_broadcasted_op(operator.lshift)\n\n def test_doubly_broadcasted_rshift(self):\n self.check_array_doubly_broadcasted_op(operator.rshift)\n\n def test_doubly_broadcasted_and(self):\n self.check_array_doubly_broadcasted_op(operator.and_)\n\n def test_doubly_broadcasted_or(self):\n self.check_array_doubly_broadcasted_op(operator.or_)\n\n def test_doubly_broadcasted_xor(self):\n self.check_array_doubly_broadcasted_op(operator.xor)\n\n def test_doubly_broadcasted_mod(self):\n with numpy.errstate(divide='ignore', invalid='ignore'):\n self.check_array_doubly_broadcasted_op(operator.mod)\n\n\[email protected]('value', [\n None,\n Ellipsis,\n object(),\n numpy._NoValue,\n])\nclass TestArrayObjectComparison:\n\n @pytest.mark.parametrize('swap', [False, True])\n @testing.for_all_dtypes()\n @testing.numpy_cupy_array_equal()\n def test_eq_object(self, xp, dtype, value, swap):\n a = xp.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)\n if swap:\n return value == a\n else:\n return a == value\n\n @pytest.mark.parametrize('swap', [False, True])\n @testing.for_all_dtypes()\n @testing.numpy_cupy_array_equal()\n def test_ne_object(self, xp, dtype, value, swap):\n a = xp.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)\n if swap:\n return value != a\n else:\n return a != value\n\n\nclass HasEq:\n def __eq__(self, other):\n return (other == 2) | (other == 4)\n\n\nclass HasNe:\n def __ne__(self, other):\n return (other == 2) | (other == 4)\n\n\nclass HasEqSub(HasEq):\n pass\n\n\nclass CustomInt(int):\n pass\n\n\[email protected]('dtype', ['int32', 'float64'])\[email protected]('value', [\n HasEq(),\n HasNe(), # eq test passes because `==` does not fall back to `__ne__`.\n HasEqSub(),\n CustomInt(3),\n])\nclass TestArrayObjectComparisonDifficult:\n\n # OK to raise TypeError.\n # If CuPy returns a result, it should match with NumPy's result.\n\n def test_eq_object(self, dtype, value):\n expected = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) == value\n\n a = cupy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)\n try:\n res = a == value\n except TypeError:\n pytest.skip()\n\n cupy.testing.assert_array_equal(res, expected)\n\n def test_ne_object(self, dtype, value):\n expected = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) != value\n\n a = cupy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)\n try:\n res = a != value\n except TypeError:\n pytest.skip()\n\n cupy.testing.assert_array_equal(res, expected)\n", "import string\nimport warnings\n\nimport numpy\ntry:\n import scipy.sparse\n scipy_available = True\nexcept ImportError:\n scipy_available = False\n\nimport cupy\nimport cupyx\n\nfrom cupy import _core\nfrom cupy._core import _scalar\nfrom cupy._creation import basic\nfrom cupy import cusparse\nfrom cupyx.scipy.sparse import _base\nfrom cupyx.scipy.sparse import _coo\nfrom cupyx.scipy.sparse import _data as sparse_data\nfrom cupyx.scipy.sparse import _sputils\nfrom cupyx.scipy.sparse import _util\n\nfrom cupyx.scipy.sparse import _index\n\n\nclass _compressed_sparse_matrix(sparse_data._data_matrix,\n sparse_data._minmax_mixin,\n _index.IndexMixin):\n\n _max_min_reduction_code = r'''\n extern \"C\" __global__\n void ${func}(double* data, int* x, int* y, int length,\n double* z) {\n // Get the index of the block\n int tid = blockIdx.x * blockDim.x + threadIdx.x;\n\n // Calculate the block length\n int block_length = y[tid] - x[tid];\n\n // Select initial value based on the block density\n double running_value = 0;\n if (${cond}){\n running_value = data[x[tid]];\n } else {\n running_value = 0;\n }\n\n // Iterate over the block and update\n for (int entry = x[tid]; entry < y[tid]; entry++){\n if (data[entry] != data[entry]){\n // Check for NaN\n running_value = nan(\"\");\n break;\n } else {\n // Check for a value update\n if (data[entry] ${op} running_value){\n running_value = data[entry];\n }\n }\n }\n\n // Store in the return function\n z[tid] = running_value;\n }'''\n\n _max_reduction_kern = _core.RawKernel(\n string.Template(_max_min_reduction_code).substitute(\n func='max_reduction', op='>', cond='block_length == length'),\n 'max_reduction')\n\n _max_nonzero_reduction_kern = _core.RawKernel(\n string.Template(_max_min_reduction_code).substitute(\n func='max_nonzero_reduction', op='>', cond='block_length > 0'),\n 'max_nonzero_reduction')\n\n _min_reduction_kern = _core.RawKernel(\n string.Template(_max_min_reduction_code).substitute(\n func='min_reduction', op='<', cond='block_length == length'),\n 'min_reduction')\n\n _min_nonzero_reduction_kern = _core.RawKernel(\n string.Template(_max_min_reduction_code).substitute(\n func='min_nonzero_reduction', op='<', cond='block_length > 0'),\n 'min_nonzero_reduction')\n\n # For _max_arg_reduction_mod and _min_arg_reduction_mod below, we pick\n # the right template specialization according to input dtypes at runtime.\n # The distinction in int types (T2) is important for portability in OS.\n\n _argmax_argmin_code = r'''\n template<typename T1, typename T2> __global__ void\n ${func}_arg_reduction(T1* data, int* indices, int* x, int* y,\n int length, T2* z) {\n // Get the index of the block\n int tid = blockIdx.x * blockDim.x + threadIdx.x;\n\n // Calculate the block length\n int block_length = y[tid] - x[tid];\n\n // Select initial value based on the block density\n int data_index = 0;\n double data_value = 0;\n\n if (block_length == length){\n // Block is dense. Fill the first value\n data_value = data[x[tid]];\n data_index = indices[x[tid]];\n } else if (block_length > 0) {\n // Block has at least one zero. Assign first occurrence as the\n // starting reference\n data_value = 0;\n for (data_index = 0; data_index < length; data_index++){\n if (data_index != indices[x[tid] + data_index] ||\n x[tid] + data_index >= y[tid]){\n break;\n }\n }\n } else {\n // Zero valued array\n data_value = 0;\n data_index = 0;\n }\n\n // Iterate over the section of the sparse matrix\n for (int entry = x[tid]; entry < y[tid]; entry++){\n if (data[entry] != data[entry]){\n // Check for NaN\n data_value = nan(\"\");\n data_index = 0;\n break;\n } else {\n // Check for a value update\n if (data[entry] ${op} data_value){\n data_index = indices[entry];\n data_value = data[entry];\n }\n }\n }\n\n // Store in the return function\n z[tid] = data_index;\n }'''\n\n _max_arg_reduction_mod = _core.RawModule(\n code=string.Template(_argmax_argmin_code).substitute(\n func='max', op='>'),\n options=('-std=c++11',),\n name_expressions=['max_arg_reduction<float, int>',\n 'max_arg_reduction<float, long long>',\n 'max_arg_reduction<double, int>',\n 'max_arg_reduction<double, long long>'])\n\n _min_arg_reduction_mod = _core.RawModule(\n code=string.Template(_argmax_argmin_code).substitute(\n func='min', op='<'),\n options=('-std=c++11',),\n name_expressions=['min_arg_reduction<float, int>',\n 'min_arg_reduction<float, long long>',\n 'min_arg_reduction<double, int>',\n 'min_arg_reduction<double, long long>'])\n\n # TODO(leofang): rewrite a more load-balanced approach than this naive one?\n _has_sorted_indices_kern = _core.ElementwiseKernel(\n 'raw T indptr, raw T indices',\n 'bool diff',\n '''\n bool diff_out = true;\n for (T jj = indptr[i]; jj < indptr[i+1] - 1; jj++) {\n if (indices[jj] > indices[jj+1]){\n diff_out = false;\n }\n }\n diff = diff_out;\n ''', 'cupyx_scipy_sparse_has_sorted_indices')\n\n # TODO(leofang): rewrite a more load-balanced approach than this naive one?\n _has_canonical_format_kern = _core.ElementwiseKernel(\n 'raw T indptr, raw T indices',\n 'bool diff',\n '''\n bool diff_out = true;\n if (indptr[i] > indptr[i+1]) {\n diff = false;\n return;\n }\n for (T jj = indptr[i]; jj < indptr[i+1] - 1; jj++) {\n if (indices[jj] >= indices[jj+1]) {\n diff_out = false;\n }\n }\n diff = diff_out;\n ''', 'cupyx_scipy_sparse_has_canonical_format')\n\n def __init__(self, arg1, shape=None, dtype=None, copy=False):\n if shape is not None:\n if not _util.isshape(shape):\n raise ValueError('invalid shape (must be a 2-tuple of int)')\n shape = int(shape[0]), int(shape[1])\n\n if _base.issparse(arg1):\n x = arg1.asformat(self.format)\n data = x.data\n indices = x.indices\n indptr = x.indptr\n\n if arg1.format != self.format:\n # When formats are differnent, all arrays are already copied\n copy = False\n\n if shape is None:\n shape = arg1.shape\n\n elif _util.isshape(arg1):\n m, n = arg1\n m, n = int(m), int(n)\n data = basic.zeros(0, dtype if dtype else 'd')\n indices = basic.zeros(0, 'i')\n indptr = basic.zeros(self._swap(m, n)[0] + 1, dtype='i')\n # shape and copy argument is ignored\n shape = (m, n)\n copy = False\n\n elif scipy_available and scipy.sparse.issparse(arg1):\n # Convert scipy.sparse to cupyx.scipy.sparse\n x = arg1.asformat(self.format)\n data = cupy.array(x.data)\n indices = cupy.array(x.indices, dtype='i')\n indptr = cupy.array(x.indptr, dtype='i')\n copy = False\n\n if shape is None:\n shape = arg1.shape\n\n elif isinstance(arg1, tuple) and len(arg1) == 2:\n # Note: This implementation is not efficeint, as it first\n # constructs a sparse matrix with coo format, then converts it to\n # compressed format.\n sp_coo = _coo.coo_matrix(arg1, shape=shape, dtype=dtype, copy=copy)\n sp_compressed = sp_coo.asformat(self.format)\n data = sp_compressed.data\n indices = sp_compressed.indices\n indptr = sp_compressed.indptr\n\n elif isinstance(arg1, tuple) and len(arg1) == 3:\n data, indices, indptr = arg1\n if not (_base.isdense(data) and data.ndim == 1 and\n _base.isdense(indices) and indices.ndim == 1 and\n _base.isdense(indptr) and indptr.ndim == 1):\n raise ValueError(\n 'data, indices, and indptr should be 1-D')\n\n if len(data) != len(indices):\n raise ValueError('indices and data should have the same size')\n\n elif _base.isdense(arg1):\n if arg1.ndim > 2:\n raise TypeError('expected dimension <= 2 array or matrix')\n elif arg1.ndim == 1:\n arg1 = arg1[None]\n elif arg1.ndim == 0:\n arg1 = arg1[None, None]\n data, indices, indptr = self._convert_dense(arg1)\n copy = False\n if shape is None:\n shape = arg1.shape\n\n else:\n raise ValueError(\n 'Unsupported initializer format')\n\n if dtype is None:\n dtype = data.dtype\n else:\n dtype = numpy.dtype(dtype)\n\n if dtype.char not in '?fdFD':\n raise ValueError(\n 'Only bool, float32, float64, complex64 and complex128 '\n 'are supported')\n\n data = data.astype(dtype, copy=copy)\n sparse_data._data_matrix.__init__(self, data)\n\n self.indices = indices.astype('i', copy=copy)\n self.indptr = indptr.astype('i', copy=copy)\n\n if shape is None:\n shape = self._swap(len(indptr) - 1, int(indices.max()) + 1)\n\n major, minor = self._swap(*shape)\n if len(indptr) != major + 1:\n raise ValueError('index pointer size (%d) should be (%d)'\n % (len(indptr), major + 1))\n\n self._descr = cusparse.MatDescriptor.create()\n self._shape = shape\n\n def _with_data(self, data, copy=True):\n if copy:\n return self.__class__(\n (data, self.indices.copy(), self.indptr.copy()),\n shape=self.shape,\n dtype=data.dtype)\n else:\n return self.__class__(\n (data, self.indices, self.indptr),\n shape=self.shape,\n dtype=data.dtype)\n\n def _convert_dense(self, x):\n raise NotImplementedError\n\n def _swap(self, x, y):\n raise NotImplementedError\n\n def _add_sparse(self, other, alpha, beta):\n raise NotImplementedError\n\n def _add(self, other, lhs_negative, rhs_negative):\n if cupy.isscalar(other):\n if other == 0:\n if lhs_negative:\n return -self\n else:\n return self.copy()\n else:\n raise NotImplementedError(\n 'adding a nonzero scalar to a sparse matrix is not '\n 'supported')\n elif _base.isspmatrix(other):\n alpha = -1 if lhs_negative else 1\n beta = -1 if rhs_negative else 1\n return self._add_sparse(other, alpha, beta)\n elif _base.isdense(other):\n if lhs_negative:\n if rhs_negative:\n return -self.todense() - other\n else:\n return other - self.todense()\n else:\n if rhs_negative:\n return self.todense() - other\n else:\n return self.todense() + other\n else:\n return NotImplemented\n\n def __add__(self, other):\n return self._add(other, False, False)\n\n def __radd__(self, other):\n return self._add(other, False, False)\n\n def __sub__(self, other):\n return self._add(other, False, True)\n\n def __rsub__(self, other):\n return self._add(other, True, False)\n\n def _get_intXint(self, row, col):\n major, minor = self._swap(row, col)\n data, indices, _ = _index._get_csr_submatrix_major_axis(\n self.data, self.indices, self.indptr, major, major + 1)\n dtype = data.dtype\n res = cupy.zeros((), dtype=dtype)\n if dtype.kind == 'c':\n _index._compress_getitem_complex_kern(\n data.real, data.imag, indices, minor, res.real, res.imag)\n else:\n _index._compress_getitem_kern(data, indices, minor, res)\n return res\n\n def _get_sliceXslice(self, row, col):\n major, minor = self._swap(row, col)\n copy = major.step in (1, None)\n return self._major_slice(major)._minor_slice(minor, copy=copy)\n\n def _get_arrayXarray(self, row, col, not_found_val=0):\n # inner indexing\n idx_dtype = self.indices.dtype\n M, N = self._swap(*self.shape)\n major, minor = self._swap(row, col)\n major = major.astype(idx_dtype, copy=False)\n minor = minor.astype(idx_dtype, copy=False)\n\n val = _index._csr_sample_values(\n M, N, self.indptr, self.indices, self.data,\n major.ravel(), minor.ravel(),\n not_found_val)\n\n if major.ndim == 1:\n # Scipy returns `matrix` here\n return cupy.expand_dims(val, 0)\n return self.__class__(val.reshape(major.shape))\n\n def _get_columnXarray(self, row, col):\n # outer indexing\n major, minor = self._swap(row, col)\n return self._major_index_fancy(major)._minor_index_fancy(minor)\n\n def _major_index_fancy(self, idx):\n \"\"\"Index along the major axis where idx is an array of ints.\n \"\"\"\n _, N = self._swap(*self.shape)\n M = idx.size\n new_shape = self._swap(M, N)\n if self.nnz == 0 or M == 0:\n return self.__class__(new_shape)\n\n return self.__class__(\n _index._csr_row_index(self.data, self.indices, self.indptr, idx),\n shape=new_shape, copy=False)\n\n def _minor_index_fancy(self, idx):\n \"\"\"Index along the minor axis where idx is an array of ints.\n \"\"\"\n M, _ = self._swap(*self.shape)\n N = idx.size\n new_shape = self._swap(M, N)\n if self.nnz == 0 or N == 0:\n return self.__class__(new_shape)\n\n if idx.size * M < self.nnz:\n # TODO (asi1024): Implement faster algorithm.\n pass\n\n return self._tocsx()._major_index_fancy(idx)._tocsx()\n\n def _major_slice(self, idx, copy=False):\n \"\"\"Index along the major axis where idx is a slice object.\n \"\"\"\n M, N = self._swap(*self.shape)\n start, stop, step = idx.indices(M)\n\n if start == 0 and stop == M and step == 1:\n return self.copy() if copy else self\n\n M = len(range(start, stop, step))\n new_shape = self._swap(M, N)\n\n if step == 1:\n if M == 0 or self.nnz == 0:\n return self.__class__(new_shape, dtype=self.dtype)\n return self.__class__(\n _index._get_csr_submatrix_major_axis(\n self.data, self.indices, self.indptr, start, stop),\n shape=new_shape, copy=copy)\n rows = cupy.arange(start, stop, step, dtype=self.indptr.dtype)\n return self._major_index_fancy(rows)\n\n def _minor_slice(self, idx, copy=False):\n \"\"\"Index along the minor axis where idx is a slice object.\n \"\"\"\n M, N = self._swap(*self.shape)\n start, stop, step = idx.indices(N)\n\n if start == 0 and stop == N and step == 1:\n return self.copy() if copy else self\n\n N = len(range(start, stop, step))\n new_shape = self._swap(M, N)\n\n if N == 0 or self.nnz == 0:\n return self.__class__(new_shape)\n if step == 1:\n return self.__class__(\n _index._get_csr_submatrix_minor_axis(\n self.data, self.indices, self.indptr, start, stop),\n shape=new_shape, copy=False)\n cols = cupy.arange(start, stop, step, dtype=self.indices.dtype)\n return self._minor_index_fancy(cols)\n\n def _set_intXint(self, row, col, x):\n i, j = self._swap(row, col)\n self._set_many(i, j, x)\n\n def _set_arrayXarray(self, row, col, x):\n i, j = self._swap(row, col)\n self._set_many(i, j, x)\n\n def _set_arrayXarray_sparse(self, row, col, x):\n # clear entries that will be overwritten\n self._zero_many(*self._swap(row, col))\n\n M, N = row.shape # matches col.shape\n broadcast_row = M != 1 and x.shape[0] == 1\n broadcast_col = N != 1 and x.shape[1] == 1\n r, c = x.row, x.col\n x = cupy.asarray(x.data, dtype=self.dtype)\n if broadcast_row:\n r = cupy.repeat(cupy.arange(M), r.size)\n c = cupy.tile(c, M)\n x = cupy.tile(x, M)\n if broadcast_col:\n r = cupy.repeat(r, N)\n c = cupy.tile(cupy.arange(N), c.size)\n x = cupy.repeat(x, N)\n # only assign entries in the new sparsity structure\n i, j = self._swap(row[r, c], col[r, c])\n self._set_many(i, j, x)\n\n def _prepare_indices(self, i, j):\n M, N = self._swap(*self.shape)\n\n def check_bounds(indices, bound):\n idx = indices.max()\n if idx >= bound:\n raise IndexError('index (%d) out of range (>= %d)' %\n (idx, bound))\n idx = indices.min()\n if idx < -bound:\n raise IndexError('index (%d) out of range (< -%d)' %\n (idx, bound))\n\n i = cupy.array(i, dtype=self.indptr.dtype,\n copy=True, ndmin=1).ravel()\n j = cupy.array(j, dtype=self.indices.dtype,\n copy=True, ndmin=1).ravel()\n check_bounds(i, M)\n check_bounds(j, N)\n return i, j, M, N\n\n def _set_many(self, i, j, x):\n \"\"\"Sets value at each (i, j) to x\n Here (i,j) index major and minor respectively, and must not contain\n duplicate entries.\n \"\"\"\n i, j, M, N = self._prepare_indices(i, j)\n x = cupy.array(x, dtype=self.dtype, copy=True, ndmin=1).ravel()\n\n new_sp = cupyx.scipy.sparse.csr_matrix(\n (cupy.arange(self.nnz, dtype=cupy.float32),\n self.indices, self.indptr), shape=(M, N))\n\n offsets = new_sp._get_arrayXarray(\n i, j, not_found_val=-1).astype(cupy.int32).ravel()\n\n if -1 not in offsets:\n # only affects existing non-zero cells\n self.data[offsets] = x\n return\n\n else:\n warnings.warn('Changing the sparsity structure of a '\n '{}_matrix is expensive.'\n ' lil_matrix is more efficient.'.format(self.format))\n # replace where possible\n mask = offsets > -1\n self.data[offsets[mask]] = x[mask]\n # only insertions remain\n mask = ~mask\n i = i[mask]\n i[i < 0] += M\n j = j[mask]\n j[j < 0] += N\n self._insert_many(i, j, x[mask])\n\n def _zero_many(self, i, j):\n \"\"\"Sets value at each (i, j) to zero, preserving sparsity structure.\n Here (i,j) index major and minor respectively.\n \"\"\"\n i, j, M, N = self._prepare_indices(i, j)\n\n new_sp = cupyx.scipy.sparse.csr_matrix(\n (cupy.arange(self.nnz, dtype=cupy.float32),\n self.indices, self.indptr), shape=(M, N))\n\n offsets = new_sp._get_arrayXarray(\n i, j, not_found_val=-1).astype(cupy.int32).ravel()\n\n # only assign zeros to the existing sparsity structure\n self.data[offsets[offsets > -1]] = 0\n\n def _perform_insert(self, indices_inserts, data_inserts,\n rows, row_counts, idx_dtype):\n \"\"\"Insert new elements into current sparse matrix in sorted order\"\"\"\n indptr_diff = cupy.diff(self.indptr)\n indptr_diff[rows] += row_counts\n\n new_indptr = cupy.empty(self.indptr.shape, dtype=idx_dtype)\n new_indptr[0] = idx_dtype(0)\n new_indptr[1:] = indptr_diff\n\n # Build output arrays\n cupy.cumsum(new_indptr, out=new_indptr)\n out_nnz = int(new_indptr[-1])\n\n new_indices = cupy.empty(out_nnz, dtype=idx_dtype)\n new_data = cupy.empty(out_nnz, dtype=self.data.dtype)\n\n # Build an indexed indptr that contains the offsets for each\n # row but only for in i, j, and x.\n new_indptr_lookup = cupy.zeros(new_indptr.size, dtype=idx_dtype)\n new_indptr_lookup[1:][rows] = row_counts\n cupy.cumsum(new_indptr_lookup, out=new_indptr_lookup)\n\n _index._insert_many_populate_arrays(\n indices_inserts, data_inserts, new_indptr_lookup,\n self.indptr, self.indices, self.data, new_indptr, new_indices,\n new_data, size=self.indptr.size-1)\n\n self.indptr = new_indptr\n self.indices = new_indices\n self.data = new_data\n\n def _insert_many(self, i, j, x):\n \"\"\"Inserts new nonzero at each (i, j) with value x\n Here (i,j) index major and minor respectively.\n i, j and x must be non-empty, 1d arrays.\n Inserts each major group (e.g. all entries per row) at a time.\n Maintains has_sorted_indices property.\n Modifies i, j, x in place.\n \"\"\"\n\n order = cupy.argsort(i) # stable for duplicates\n i = i.take(order)\n j = j.take(order)\n x = x.take(order)\n\n # Update index data type\n\n idx_dtype = _sputils.get_index_dtype(\n (self.indices, self.indptr), maxval=(\n self.nnz + x.size))\n\n self.indptr = self.indptr.astype(idx_dtype)\n self.indices = self.indices.astype(idx_dtype)\n self.data = self.data.astype(self.dtype)\n\n indptr_inserts, indices_inserts, data_inserts = \\\n _index._select_last_indices(i, j, x, idx_dtype)\n\n rows, ui_indptr = cupy.unique(indptr_inserts, return_index=True)\n\n to_add = cupy.empty(ui_indptr.size+1, ui_indptr.dtype)\n to_add[-1] = j.size\n to_add[:-1] = ui_indptr\n ui_indptr = to_add\n\n # Compute the counts for each row in the insertion array\n row_counts = cupy.zeros(ui_indptr.size-1, dtype=idx_dtype)\n cupyx.scatter_add(\n row_counts, cupy.searchsorted(rows, indptr_inserts), 1)\n\n self._perform_insert(indices_inserts, data_inserts,\n rows, row_counts, idx_dtype)\n\n def __get_has_canonical_format(self):\n \"\"\"Determine whether the matrix has sorted indices and no duplicates.\n\n Returns\n bool: ``True`` if the above applies, otherwise ``False``.\n\n .. note::\n :attr:`has_canonical_format` implies :attr:`has_sorted_indices`, so\n if the latter flag is ``False``, so will the former be; if the\n former is found ``True``, the latter flag is also set.\n\n .. warning::\n Getting this property might synchronize the device.\n\n \"\"\"\n # Modified from the SciPy counterpart.\n\n # In CuPy the implemented conversions do not exactly match those of\n # SciPy's, so it's hard to put this exactly as where it is in SciPy,\n # but this should do the job.\n if self.data.size == 0:\n self._has_canonical_format = True\n # check to see if result was cached\n elif not getattr(self, '_has_sorted_indices', True):\n # not sorted => not canonical\n self._has_canonical_format = False\n elif not hasattr(self, '_has_canonical_format'):\n is_canonical = self._has_canonical_format_kern(\n self.indptr, self.indices, size=self.indptr.size-1)\n self._has_canonical_format = bool(is_canonical.all())\n return self._has_canonical_format\n\n def __set_has_canonical_format(self, val):\n \"\"\"Taken from SciPy as is.\"\"\"\n self._has_canonical_format = bool(val)\n if val:\n self.has_sorted_indices = True\n\n has_canonical_format = property(fget=__get_has_canonical_format,\n fset=__set_has_canonical_format)\n\n def __get_sorted(self):\n \"\"\"Determine whether the matrix has sorted indices.\n\n Returns\n bool:\n ``True`` if the indices of the matrix are in sorted order,\n otherwise ``False``.\n\n .. warning::\n Getting this property might synchronize the device.\n\n \"\"\"\n # Modified from the SciPy counterpart.\n\n # In CuPy the implemented conversions do not exactly match those of\n # SciPy's, so it's hard to put this exactly as where it is in SciPy,\n # but this should do the job.\n if self.data.size == 0:\n self._has_sorted_indices = True\n # check to see if result was cached\n elif not hasattr(self, '_has_sorted_indices'):\n is_sorted = self._has_sorted_indices_kern(\n self.indptr, self.indices, size=self.indptr.size-1)\n self._has_sorted_indices = bool(is_sorted.all())\n return self._has_sorted_indices\n\n def __set_sorted(self, val):\n self._has_sorted_indices = bool(val)\n\n has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)\n\n def get_shape(self):\n \"\"\"Returns the shape of the matrix.\n\n Returns:\n tuple: Shape of the matrix.\n\n \"\"\"\n return self._shape\n\n def getnnz(self, axis=None):\n \"\"\"Returns the number of stored values, including explicit zeros.\n\n Args:\n axis: Not supported yet.\n\n Returns:\n int: The number of stored values.\n\n \"\"\"\n if axis is None:\n return self.data.size\n else:\n raise ValueError\n\n def sorted_indices(self):\n \"\"\"Return a copy of this matrix with sorted indices\n\n .. warning::\n Calling this function might synchronize the device.\n \"\"\"\n # Taken from SciPy as is.\n A = self.copy()\n A.sort_indices()\n return A\n\n def sort_indices(self):\n # Unlike in SciPy, here this is implemented in child classes because\n # each child needs to call its own sort function from cuSPARSE\n raise NotImplementedError\n\n def sum_duplicates(self):\n \"\"\"Eliminate duplicate matrix entries by adding them together.\n\n .. note::\n This is an *in place* operation.\n\n .. warning::\n Calling this function might synchronize the device.\n\n .. seealso::\n :meth:`scipy.sparse.csr_matrix.sum_duplicates`,\n :meth:`scipy.sparse.csc_matrix.sum_duplicates`\n \"\"\"\n if self.has_canonical_format:\n return\n # TODO(leofang): add a kernel for compressed sparse matrices without\n # converting to coo\n coo = self.tocoo()\n coo.sum_duplicates()\n self.__init__(coo.asformat(self.format))\n self.has_canonical_format = True\n\n #####################\n # Reduce operations #\n #####################\n\n def _minor_reduce(self, ufunc, axis, nonzero):\n \"\"\"Reduce nonzeros with a ufunc over the minor axis when non-empty\n\n Can be applied to a function of self.data by supplying data parameter.\n Warning: this does not call sum_duplicates()\n\n Args:\n ufunc (object): Function handle giving the operation to be\n conducted.\n axis (int): Matrix over which the reduction should be\n conducted.\n\n Returns:\n (cupy.ndarray): Reduce result for nonzeros in each\n major_index.\n\n \"\"\"\n out_shape = self.shape[1 - axis]\n # Call to the appropriate kernel function\n out = cupy.zeros(out_shape).astype(cupy.float64)\n if nonzero:\n kerns = {cupy.amax: self._max_nonzero_reduction_kern,\n cupy.amin: self._min_nonzero_reduction_kern}\n else:\n kerns = {cupy.amax: self._max_reduction_kern,\n cupy.amin: self._min_reduction_kern}\n\n kerns[ufunc]((out_shape,), (1,),\n (self.data.astype(cupy.float64),\n self.indptr[:len(self.indptr) - 1],\n self.indptr[1:], cupy.int64(self.shape[axis]),\n out))\n\n return out\n\n def _arg_minor_reduce(self, ufunc, axis):\n \"\"\"Reduce nonzeros with a ufunc over the minor axis when non-empty\n\n Can be applied to a function of self.data by supplying data parameter.\n Warning: this does not call sum_duplicates()\n\n Args:\n ufunc (object): Function handle giving the operation to be\n conducted.\n axis (int): Maxtrix over which the reduction should be conducted\n\n Returns:\n (cupy.ndarray): Reduce result for nonzeros in each\n major_index\n\n \"\"\"\n\n # Call to the appropriate kernel function\n # Create the vector to hold output\n # Note: it's important to set \"int\" here, following what SciPy\n # does, as the outcome dtype is platform dependent\n out_shape = self.shape[1 - axis]\n out = cupy.zeros(out_shape, dtype=int)\n\n # Perform the calculation\n ker_name = '_arg_reduction<{}, {}>'.format(\n _scalar.get_typename(self.data.dtype),\n _scalar.get_typename(out.dtype))\n\n if ufunc == cupy.argmax:\n ker = self._max_arg_reduction_mod.get_function('max' + ker_name)\n elif ufunc == cupy.argmin:\n ker = self._min_arg_reduction_mod.get_function('min' + ker_name)\n\n ker((out_shape,), (1,),\n (self.data, self.indices,\n self.indptr[:len(self.indptr) - 1],\n self.indptr[1:], cupy.int64(self.shape[axis]),\n out))\n\n return out\n", "import pickle\n\nimport numpy\nimport pytest\ntry:\n import scipy.sparse\n scipy_available = True\nexcept ImportError:\n scipy_available = False\n\nimport cupy\nfrom cupy import testing\nfrom cupy.cuda import driver\nfrom cupy.cuda import runtime\nfrom cupyx.scipy import sparse\n\n\ndef _make(xp, sp, dtype):\n data = xp.array([0, 1, 2, 3], dtype)\n row = xp.array([0, 0, 1, 2], 'i')\n col = xp.array([0, 1, 3, 2], 'i')\n # 0, 1, 0, 0\n # 0, 0, 0, 2\n # 0, 0, 3, 0\n return sp.coo_matrix((data, (row, col)), shape=(3, 4))\n\n\ndef _make_complex(xp, sp, dtype):\n data = xp.array([0, 1, 2, 3], dtype)\n if dtype in [numpy.complex64, numpy.complex128]:\n data = data - 1j\n row = xp.array([0, 0, 1, 2], 'i')\n col = xp.array([0, 1, 3, 2], 'i')\n # 0, 1 - 1j, 0, 0\n # 0, 0, 0, 2 - 1j\n # 0, 0, 3 - 1j, 0\n return sp.coo_matrix((data, (row, col)), shape=(3, 4))\n\n\ndef _make2(xp, sp, dtype):\n data = xp.array([1, 2, 3, 4], dtype)\n row = xp.array([0, 1, 1, 2], 'i')\n col = xp.array([2, 1, 2, 2], 'i')\n # 0, 0, 1, 0\n # 0, 2, 3, 0\n # 0, 0, 4, 0\n return sp.coo_matrix((data, (row, col)), shape=(3, 4))\n\n\ndef _make3(xp, sp, dtype):\n data = xp.array([1, 2, 3, 4, 5], dtype)\n row = xp.array([0, 1, 1, 3, 3], 'i')\n col = xp.array([0, 2, 1, 0, 2], 'i')\n # 1, 0, 0\n # 0, 3, 2\n # 0, 0, 0\n # 4, 0, 5\n return sp.coo_matrix((data, (row, col)), shape=(4, 3))\n\n\ndef _make_unordered(xp, sp, dtype):\n data = xp.array([1, 4, 3, 2], dtype)\n row = xp.array([0, 2, 1, 0], 'i')\n col = xp.array([0, 2, 3, 1], 'i')\n # 1, 2, 0, 0\n # 0, 0, 0, 3\n # 0, 0, 4, 0\n return sp.coo_matrix((data, (row, col)), shape=(3, 4))\n\n\ndef _make_duplicate(xp, sp, dtype):\n data = xp.array([0, 1, 2, 3, 4, 5], dtype)\n row = xp.array([1, 1, 1, 1, 0, 1], 'i')\n col = xp.array([0, 0, 2, 0, 0, 2], 'i')\n # 4, 0, 0, 0\n # 4, 0, 7, 0\n # 0, 0, 0, 0\n return sp.coo_matrix((data, (row, col)), shape=(3, 4))\n\n\ndef _make_empty(xp, sp, dtype):\n data = xp.array([], dtype)\n row = xp.array([], 'i')\n col = xp.array([], 'i')\n return sp.coo_matrix((data, (row, col)), shape=(3, 4))\n\n\ndef _make_square(xp, sp, dtype):\n data = xp.array([0, 1, 2, 3], dtype)\n row = xp.array([0, 0, 1, 2], 'i')\n col = xp.array([0, 2, 0, 2], 'i')\n # 0, 1, 0\n # 2, 0, 0\n # 0, 0, 3\n return sp.coo_matrix((data, (row, col)), shape=(3, 3))\n\n\ndef _make_shape(xp, sp, dtype):\n return sp.coo_matrix((3, 4))\n\n\ndef _make_sum_dup(xp, sp, dtype):\n # 1 0 0\n # 1 1 0\n # 1 1 1\n data = xp.array([1, 1, 1, 1, 1, 1], dtype)\n row = xp.array([0, 1, 1, 2, 2, 2], 'i')\n col = xp.array([0, 0, 1, 0, 1, 2], 'i')\n return sp.coo_matrix((data, (row, col)), shape=(3, 3))\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\nclass TestCooMatrix:\n\n @pytest.fixture(autouse=True)\n def setUp(self):\n self.m = _make(cupy, sparse, self.dtype)\n\n def test_dtype(self):\n assert self.m.dtype == self.dtype\n\n def test_data(self):\n assert self.m.data.dtype == self.dtype\n testing.assert_array_equal(\n self.m.data, cupy.array([0, 1, 2, 3], self.dtype))\n\n def test_row(self):\n assert self.m.row.dtype == numpy.int32\n testing.assert_array_equal(\n self.m.row, cupy.array([0, 0, 1, 2], self.dtype))\n\n def test_col(self):\n assert self.m.col.dtype == numpy.int32\n testing.assert_array_equal(\n self.m.col, cupy.array([0, 1, 3, 2], self.dtype))\n\n def test_init_copy(self):\n n = sparse.coo_matrix(self.m)\n assert n is not self.m\n cupy.testing.assert_array_equal(n.toarray(), self.m.toarray())\n\n def test_init_copy_other_sparse(self):\n n = sparse.coo_matrix(self.m.tocsr())\n cupy.testing.assert_array_equal(n.toarray(), self.m.toarray())\n\n @testing.with_requires('scipy')\n def test_init_copy_scipy_sparse(self):\n m = _make(numpy, scipy.sparse, self.dtype)\n n = sparse.coo_matrix(m)\n assert isinstance(n.data, cupy.ndarray)\n assert isinstance(n.row, cupy.ndarray)\n assert isinstance(n.col, cupy.ndarray)\n cupy.testing.assert_array_equal(n.data, m.data)\n cupy.testing.assert_array_equal(n.row, m.row)\n cupy.testing.assert_array_equal(n.col, m.col)\n assert n.shape == m.shape\n\n @testing.with_requires('scipy')\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_init_copy_other_scipy_sparse(self, xp, sp):\n m = _make(numpy, scipy.sparse, self.dtype)\n n = sp.coo_matrix(m.tocsc())\n assert len(n.data) == len(m.data)\n assert len(n.row) == len(m.row)\n assert len(n.col) == len(m.col)\n assert n.shape == m.shape\n return n\n\n def test_pickle_roundtrip(self):\n s = _make(cupy, sparse, self.dtype)\n s2 = pickle.loads(pickle.dumps(s))\n assert s.shape == s2.shape\n assert s.dtype == s2.dtype\n if scipy_available:\n assert (s.get() != s2.get()).count_nonzero() == 0\n\n def test_shape(self):\n assert self.m.shape == (3, 4)\n\n def test_ndim(self):\n assert self.m.ndim == 2\n\n def test_nnz(self):\n assert self.m.nnz == 4\n\n def test_conj(self):\n n = _make_complex(cupy, sparse, self.dtype)\n cupy.testing.assert_array_equal(n.conj().data, n.data.conj())\n\n def test_has_canonical_format(self):\n assert self.m.has_canonical_format is False\n\n @testing.with_requires('scipy')\n def test_get(self):\n m = self.m.get()\n assert isinstance(m, scipy.sparse.coo_matrix)\n expect = [\n [0, 1, 0, 0],\n [0, 0, 0, 2],\n [0, 0, 3, 0]\n ]\n numpy.testing.assert_allclose(m.toarray(), expect)\n\n @testing.with_requires('scipy')\n def test_str(self):\n if numpy.dtype(self.dtype).kind == 'f':\n expect = ''' (0, 0)\\t0.0\n (0, 1)\\t1.0\n (1, 3)\\t2.0\n (2, 2)\\t3.0'''\n elif numpy.dtype(self.dtype).kind == 'c':\n expect = ''' (0, 0)\\t0j\n (0, 1)\\t(1+0j)\n (1, 3)\\t(2+0j)\n (2, 2)\\t(3+0j)'''\n assert str(self.m) == expect\n\n def test_toarray(self):\n m = self.m.toarray()\n expect = [\n [0, 1, 0, 0],\n [0, 0, 0, 2],\n [0, 0, 3, 0]\n ]\n cupy.testing.assert_allclose(m, expect)\n\n # reshape\n def test_reshape_0(self):\n assert self.m.reshape((12, 1)).shape == (12, 1)\n\n def test_reshape_1(self):\n m = self.m.reshape((1, 12)).toarray()\n expect = [[0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 3, 0]]\n cupy.testing.assert_allclose(m, expect)\n\n def test_reshape_2(self):\n m = self.m.reshape((1, 12), order='F').toarray()\n expect = [[1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0]]\n cupy.testing.assert_allclose(m, expect)\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected]_requires('scipy')\nclass TestCooMatrixInit:\n\n @pytest.fixture(autouse=True)\n def setUp(self):\n self.shape = (3, 4)\n\n def data(self, xp):\n return xp.array([0, 1, 2, 3], self.dtype)\n\n def row(self, xp):\n return xp.array([0, 0, 1, 2], 'i')\n\n def col(self, xp):\n return xp.array([0, 1, 3, 2], 'i')\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_shape_none(self, xp, sp):\n x = sp.coo_matrix(\n (self.data(xp), (self.row(xp), self.col(xp))), shape=None)\n assert x.shape == (3, 4)\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_dtype(self, xp, sp):\n data = self.data(xp).real.astype('i')\n x = sp.coo_matrix(\n (data, (self.row(xp), self.col(xp))), dtype=self.dtype)\n assert x.dtype == self.dtype\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_copy_true(self, xp, sp):\n data = self.data(xp)\n row = self.row(xp)\n col = self.col(xp)\n x = sp.coo_matrix((data, (row, col)), copy=True)\n\n assert data is not x.data\n assert row is not x.row\n assert col is not x.col\n\n def test_init_dense(self):\n m = cupy.array([[0, 1, 0, 2],\n [0, 0, 0, 0],\n [0, 0, 3, 0]], dtype=self.dtype)\n n = sparse.coo_matrix(m)\n assert n.nnz == 3\n assert n.shape == (3, 4)\n cupy.testing.assert_array_equal(n.data, [1, 2, 3])\n cupy.testing.assert_array_equal(n.row, [0, 0, 2])\n cupy.testing.assert_array_equal(n.col, [1, 3, 2])\n\n def test_init_dense_allzero(self):\n m = cupy.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]], dtype=self.dtype)\n n = sparse.coo_matrix(m)\n assert n.nnz == 0\n assert n.shape == (3, 4)\n cupy.testing.assert_array_equal(n.data, [])\n cupy.testing.assert_array_equal(n.row, [])\n cupy.testing.assert_array_equal(n.col, [])\n\n def test_init_dense_check_if_row_major(self):\n rows, cols = 10, 9\n for order in ('C', 'F'):\n d = testing.shaped_random((rows, cols), dtype=self.dtype,\n order=order)\n mask = testing.shaped_random((rows, cols), scale=1.0)\n d[mask > 0.5] = 0\n s = sparse.coo_matrix(d)\n for i in range(s.nnz):\n assert 0 <= s.row[i] < rows\n assert 0 <= s.col[i] < cols\n assert s.data[i] == d[s.row[i], s.col[i]]\n if i == 0:\n continue\n assert ((s.row[i-1] < s.row[i]) or\n (s.row[i-1] == s.row[i] and s.col[i-1] < s.col[i]))\n assert s.has_canonical_format\n\n def test_invalid_format(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(TypeError):\n sp.coo_matrix(\n (self.data(xp), self.row(xp)), shape=self.shape)\n\n @testing.numpy_cupy_allclose(sp_name='sp', atol=1e-5)\n def test_intlike_shape(self, xp, sp):\n s = sp.coo_matrix((self.data(xp), (self.row(xp), self.col(xp))),\n shape=(xp.array(self.shape[0]),\n xp.int32(self.shape[1])))\n assert isinstance(s.shape[0], int)\n assert isinstance(s.shape[1], int)\n return s\n\n def test_shape_invalid(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(ValueError):\n sp.coo_matrix(\n (self.data(xp), (self.row(xp), self.col(xp))),\n shape=(2,))\n\n def test_data_invalid(self):\n with pytest.raises(ValueError):\n sparse.coo_matrix(\n ('invalid', (self.row(cupy), self.col(cupy))),\n shape=self.shape)\n\n def test_data_invalid_ndim(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(ValueError):\n sp.coo_matrix(\n (self.data(xp)[None], (self.row(xp), self.col(xp))),\n shape=self.shape)\n\n def test_row_invalid(self):\n with pytest.raises(ValueError):\n sparse.coo_matrix(\n (self.data(cupy), ('invalid', self.col(cupy))),\n shape=self.shape)\n\n def test_row_invalid_ndim(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(ValueError):\n sp.coo_matrix(\n (self.data(xp), (self.row(xp)[None], self.col(xp))),\n shape=self.shape)\n\n def test_col_invalid(self):\n with pytest.raises(ValueError):\n sparse.coo_matrix(\n (self.data(cupy), (self.row(cupy), 'invalid')),\n shape=self.shape)\n\n def test_col_invalid_ndim(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(ValueError):\n sp.coo_matrix(\n (self.data(xp), (self.row(xp), self.col(xp)[None])),\n shape=self.shape)\n\n def test_data_different_length(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n data = xp.arange(5, dtype=self.dtype)\n with pytest.raises(TypeError):\n sp.coo_matrix(\n (data(xp), (self.row(xp), self.col(xp))),\n shape=self.shape)\n\n def test_row_different_length(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n row = xp.arange(5, dtype=self.dtype)\n with pytest.raises(TypeError):\n sp.coo_matrix(\n (self.data(xp), (row(xp), self.col(xp))),\n shape=self.shape)\n\n def test_col_different_length(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n col = xp.arange(5, dtype=self.dtype)\n with pytest.raises(TypeError):\n sp.coo_matrix(\n (self.data(xp), (self.row(xp), col(xp))),\n shape=self.shape)\n\n def test_fail_to_infer_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n data = xp.array([], dtype=self.dtype)\n row = xp.array([], dtype='i')\n col = xp.array([], dtype='i')\n with pytest.raises(ValueError):\n sp.coo_matrix((data, (row, col)), shape=None)\n\n def test_row_too_large(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n row = xp.array([0, 0, 1, 3], 'i')\n with pytest.raises(ValueError):\n sp.coo_matrix(\n (self.data(xp), (row, self.col(xp))),\n shape=self.shape)\n\n def test_row_too_small(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n row = xp.array([0, -1, 1, 2], 'i')\n with pytest.raises(ValueError):\n sp.coo_matrix(\n (self.data(xp), (row, self.col(xp))),\n shape=self.shape)\n\n def test_col_too_large(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n col = xp.array([0, 1, 4, 2], 'i')\n with pytest.raises(ValueError):\n sp.coo_matrix(\n (self.data(xp), (self.row(xp), col)),\n shape=self.shape)\n\n def test_col_too_small(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n col = xp.array([0, -1, 3, 2], 'i')\n with pytest.raises(ValueError):\n sp.coo_matrix(\n (self.data(xp), (self.row(xp), col)),\n shape=self.shape)\n\n def test_unsupported_dtype(self):\n with pytest.raises(ValueError):\n sparse.coo_matrix(\n (self.data(cupy), (self.row(cupy), self.col(cupy))),\n shape=self.shape, dtype='i')\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_conj(self, xp, sp):\n n = _make_complex(xp, sp, self.dtype)\n cupy.testing.assert_array_equal(n.conj().data, n.data.conj())\n\n\[email protected](*testing.product({\n 'make_method': [\n '_make', '_make_unordered', '_make_empty', '_make_duplicate',\n '_make_shape'],\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected]_requires('scipy')\nclass TestCooMatrixScipyComparison:\n\n @property\n def make(self):\n return globals()[self.make_method]\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_dtype(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.dtype\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_nnz(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.getnnz()\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_asfptype(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.asfptype()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_toarray(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.toarray()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_A(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.A\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocoo(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.tocoo()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocoo_copy(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n n = m.tocoo(copy=True)\n assert m.data is not n.data\n assert m.row is not n.row\n assert m.col is not n.col\n return n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocsc(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n out = m.tocsc()\n assert out.has_canonical_format\n return out\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocsc_copy(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n n = m.tocsc(copy=True)\n assert m.data is not n.data\n assert n.has_canonical_format\n return n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocsr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n out = m.tocsr()\n assert out.has_canonical_format\n return out\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocsr_copy(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n n = m.tocsr(copy=True)\n assert m.data is not n.data\n assert n.has_canonical_format\n return n\n\n # dot\n @testing.with_requires('scipy>=1.8.0rc1')\n def test_dot_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(ValueError):\n m.dot(2.0)\n\n @testing.with_requires('scipy>=1.8.0rc1')\n def test_dot_numpy_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(ValueError):\n m.dot(numpy.dtype(self.dtype).type(2.0))\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_csr(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype)\n return m.dot(x)\n\n def test_dot_csr_invalid_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n x = sp.csr_matrix((5, 3), dtype=self.dtype)\n with pytest.raises(ValueError):\n m.dot(x)\n\n @pytest.mark.skipif(runtime.is_hip and driver.get_build_version() < 400,\n reason='no working implementation')\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_csc(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocsc()\n return m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_sparse(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocoo()\n return m.dot(x)\n\n @testing.with_requires('scipy>=1.8.0rc1')\n def test_dot_zero_dim(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n x = xp.array(2, dtype=self.dtype)\n with pytest.raises(ValueError):\n m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_dense_vector(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = xp.arange(4).astype(self.dtype)\n return m.dot(x)\n\n def test_dot_dense_vector_invalid_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n x = xp.arange(5).astype(self.dtype)\n with pytest.raises(ValueError):\n m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_dense_matrix(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = xp.arange(8).reshape(4, 2).astype(self.dtype)\n return m.dot(x)\n\n def test_dot_dense_matrix_invalid_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n x = xp.arange(10).reshape(5, 2).astype(self.dtype)\n with pytest.raises(ValueError):\n m.dot(x)\n\n def test_dot_dense_ndim3(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n x = xp.arange(24).reshape(4, 2, 3).astype(self.dtype)\n with pytest.raises(ValueError):\n m.dot(x)\n\n def test_dot_unsupported(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(TypeError):\n m.dot(None)\n\n # __add__\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_add_zero(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m + 0\n\n def test_add_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(NotImplementedError):\n m + 1\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_add_csr(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n n = _make2(xp, sp, self.dtype)\n return m + n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_add_coo(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n n = _make2(xp, sp, self.dtype).tocoo()\n return m + n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_add_dense(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n n = xp.arange(12).reshape(3, 4)\n return m + n\n\n # __radd__\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_radd_zero(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return 0 + m\n\n def test_radd_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(NotImplementedError):\n 1 + m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_radd_dense(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n n = xp.arange(12).reshape(3, 4)\n return n + m\n\n # __sub__\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_sub_zero(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m - 0\n\n def test_sub_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(NotImplementedError):\n m - 1\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sub_csr(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n n = _make2(xp, sp, self.dtype)\n return m - n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sub_coo(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n n = _make2(xp, sp, self.dtype).tocoo()\n return m - n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sub_dense(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n n = xp.arange(12).reshape(3, 4)\n return m - n\n\n # __rsub__\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_rsub_zero(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return 0 - m\n\n def test_rsub_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(NotImplementedError):\n 1 - m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rsub_dense(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n n = xp.arange(12).reshape(3, 4)\n return n - m\n\n # __mul__\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_mul_scalar(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m * 2.0\n\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_mul_numpy_scalar(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m * numpy.dtype(self.dtype).type(2.0)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_csr(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype)\n return m * x\n\n def test_mul_csr_invalid_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n x = sp.csr_matrix((5, 3), dtype=self.dtype)\n with pytest.raises(ValueError):\n m * x\n\n @pytest.mark.skipif(runtime.is_hip and driver.get_build_version() < 400,\n reason='no working implementation')\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_csc(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocsc()\n return m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_sparse(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocoo()\n return m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_mul_zero_dim(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = xp.array(2, dtype=self.dtype)\n return m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_dense_vector(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = xp.arange(4).astype(self.dtype)\n return m * x\n\n def test_mul_dense_vector_invalid_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n x = xp.arange(5).astype(self.dtype)\n with pytest.raises(ValueError):\n m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_dense_matrix(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = xp.arange(8).reshape(4, 2).astype(self.dtype)\n return m * x\n\n def test_mul_dense_matrix_invalid_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n x = xp.arange(10).reshape(5, 2).astype(self.dtype)\n with pytest.raises(ValueError):\n m * x\n\n def test_mul_dense_ndim3(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n x = xp.arange(24).reshape(4, 2, 3).astype(self.dtype)\n with pytest.raises(ValueError):\n m * x\n\n def test_mul_unsupported(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(TypeError):\n m * None\n\n # __rmul__\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_rmul_scalar(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return 2.0 * m\n\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_rmul_numpy_scalar(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return numpy.dtype(self.dtype).type(2.0) * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_csr(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype)\n return x * m\n\n @pytest.mark.skipif(runtime.is_hip and driver.get_build_version() < 400,\n reason='no working implementation')\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_rmul_csc(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocsc()\n return x * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_sparse(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocoo()\n return x * m\n\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_rmul_zero_dim(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = xp.array(2, dtype=self.dtype)\n return x * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_dense_matrix(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n x = xp.arange(12).reshape(4, 3).astype(self.dtype)\n return x * m\n\n def test_rmul_dense_ndim3(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n x = xp.arange(24).reshape(4, 2, 3).astype(self.dtype)\n with pytest.raises(ValueError):\n x * m\n\n @pytest.mark.xfail(\n numpy.lib.NumpyVersion(scipy.__version__) >= '1.8.0rc1',\n reason='See scipy/15210')\n def test_rmul_unsupported(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(TypeError):\n None * m\n\n # Note: '@' operator is almost equivalent to '*' operator. Only test the\n # cases where '@' raises an exception and '*' does not.\n def test_matmul_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = 2.0\n with pytest.raises(ValueError):\n m @ x\n with pytest.raises(ValueError):\n x @ m\n\n def test_matmul_numpy_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = numpy.dtype(self.dtype).type(2.0)\n with pytest.raises(ValueError):\n m @ x\n with pytest.raises(ValueError):\n x @ m\n\n def test_matmul_scalar_like_array(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = xp.array(2.0, self.dtype)\n with pytest.raises(ValueError):\n m @ x\n with pytest.raises(ValueError):\n x @ m\n\n # __pow__\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_pow_0(self, xp, sp):\n m = _make_square(xp, sp, self.dtype)\n return m ** 0\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_pow_1(self, xp, sp):\n m = _make_square(xp, sp, self.dtype)\n return m ** 1\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_pow_2(self, xp, sp):\n m = _make_square(xp, sp, self.dtype)\n return m ** 2\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_pow_3(self, xp, sp):\n m = _make_square(xp, sp, self.dtype)\n return m ** 3\n\n def test_pow_neg(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make_square(xp, sp, self.dtype)\n with pytest.raises(ValueError):\n m ** -1\n\n def test_sum_tuple_axis(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(TypeError):\n m.sum(axis=(0, 1))\n\n def test_sum_float_axis(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(TypeError):\n m.sum(axis=0.0)\n\n def test_sum_too_large_axis(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(ValueError):\n m.sum(axis=3)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_transpose(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.transpose()\n\n def test_transpose_axes_int(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(ValueError):\n m.transpose(axes=0)\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_eliminate_zeros(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.eliminate_zeros()\n return m.nnz\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64],\n 'ret_dtype': [None, numpy.float32, numpy.float64],\n 'axis': [None, 0, 1, -1, -2],\n}))\[email protected]_requires('scipy')\nclass TestCooMatrixSum:\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sum(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.sum(axis=self.axis, dtype=self.ret_dtype)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sum_with_out(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n if self.axis is None:\n shape = ()\n else:\n shape = list(m.shape)\n shape[self.axis] = 1\n shape = tuple(shape)\n out = xp.empty(shape, dtype=self.ret_dtype)\n if xp is numpy:\n # TODO(unno): numpy.matrix is used for scipy.sparse though\n # cupy.ndarray is used for cupyx.scipy.sparse.\n out = xp.asmatrix(out)\n return m.sum(axis=self.axis, dtype=self.ret_dtype, out=out)\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected]_requires('scipy')\nclass TestCooMatrixSumDuplicates:\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sum_duplicates(self, xp, sp):\n m = _make_duplicate(xp, sp, self.dtype)\n assert not m.has_canonical_format\n m.sum_duplicates()\n assert m.has_canonical_format\n assert m.nnz == 3\n\n m.sum_duplicates()\n assert m.has_canonical_format\n return m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sum_duplicates_canonical(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n assert not m.has_canonical_format\n m.sum_duplicates()\n assert m.has_canonical_format\n assert m.nnz == 4\n return m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sum_duplicates_empty(self, xp, sp):\n m = _make_empty(xp, sp, self.dtype)\n assert not m.has_canonical_format\n m.sum_duplicates()\n assert m.has_canonical_format\n assert m.nnz == 0\n return m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sum_duplicates_incompatibility(self, xp, sp):\n # See #3620 and #3624. CuPy's and SciPy's COO indices could mismatch\n # due to the order of lexsort, but the matrix is correct.\n m = _make_sum_dup(xp, sp, self.dtype)\n if xp is cupy:\n sorted_first = m.row.copy()\n else:\n sorted_first = m.col.copy()\n assert not m.has_canonical_format\n m.sum_duplicates()\n assert m.has_canonical_format\n # Here we ensure this sorting order is not altered by future PRs...\n sorted_first.sort()\n if xp is cupy:\n assert (m.row == sorted_first).all()\n else:\n assert (m.col == sorted_first).all()\n assert m.has_canonical_format\n # ...and now we make sure the dense matrix is the same\n return m\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n 'ufunc': [\n 'arcsin', 'arcsinh', 'arctan', 'arctanh', 'ceil', 'deg2rad', 'expm1',\n 'floor', 'log1p', 'rad2deg', 'rint', 'sign', 'sin', 'sinh', 'sqrt',\n 'tan', 'tanh', 'trunc',\n ],\n}))\[email protected]_requires('scipy')\nclass TestUfunc:\n\n @testing.numpy_cupy_allclose(sp_name='sp', atol=1e-5)\n def test_ufun(self, xp, sp):\n x = _make(xp, sp, self.dtype)\n x.data *= 0.1\n func = getattr(x, self.ufunc)\n complex_unsupported = {'ceil', 'deg2rad', 'floor', 'rad2deg', 'trunc'}\n if (numpy.dtype(self.dtype).kind == 'c' and\n self.ufunc in complex_unsupported):\n with pytest.raises(TypeError):\n func()\n return xp.array(0)\n else:\n return func()\n\n\nclass TestIsspmatrixCoo:\n\n def test_coo(self):\n x = sparse.coo_matrix(\n (cupy.array([0], 'f'),\n (cupy.array([0], 'i'), cupy.array([0], 'i'))),\n shape=(1, 1), dtype='f')\n assert sparse.isspmatrix_coo(x) is True\n\n def test_csr(self):\n x = sparse.csr_matrix(\n (cupy.array([], 'f'),\n cupy.array([], 'i'),\n cupy.array([0], 'i')),\n shape=(0, 0), dtype='f')\n assert sparse.isspmatrix_coo(x) is False\n\n\[email protected](*testing.product({\n 'shape': [(8, 5), (5, 5), (5, 8)],\n}))\[email protected]_requires('scipy>=1.5.0')\[email protected]\nclass TestCooMatrixDiagonal:\n density = 0.5\n\n def _make_matrix(self, dtype):\n a = testing.shaped_random(self.shape, numpy, dtype=dtype)\n mask = testing.shaped_random(self.shape, numpy, dtype='f', scale=1.0)\n a[mask > self.density] = 0\n scipy_a = scipy.sparse.coo_matrix(a)\n cupyx_a = sparse.coo_matrix(cupy.array(a))\n return scipy_a, cupyx_a\n\n @testing.for_dtypes('fdFD')\n def test_diagonal(self, dtype):\n scipy_a, cupyx_a = self._make_matrix(dtype)\n m, n = self.shape\n for k in range(-m, n+1):\n scipy_diag = scipy_a.diagonal(k=k)\n cupyx_diag = cupyx_a.diagonal(k=k)\n testing.assert_allclose(scipy_diag, cupyx_diag)\n\n def _test_setdiag(self, scipy_a, cupyx_a, x, k):\n scipy_a = scipy_a.copy()\n cupyx_a = cupyx_a.copy()\n scipy_a.setdiag(x, k=k)\n cupyx_a.setdiag(cupy.array(x), k=k)\n testing.assert_allclose(scipy_a.data, cupyx_a.data)\n testing.assert_array_equal(scipy_a.row, cupyx_a.row)\n testing.assert_array_equal(scipy_a.col, cupyx_a.col)\n\n @testing.for_dtypes('fdFD')\n def test_setdiag(self, dtype):\n scipy_a, cupyx_a = self._make_matrix(dtype)\n m, n = self.shape\n for k in range(-m+1, n):\n m_st, n_st = max(0, -k), max(0, k)\n for d in (-1, 0, 1):\n x_len = min(m - m_st, n - n_st) + d\n if x_len <= 0:\n continue\n x = numpy.ones((x_len,), dtype=dtype)\n self._test_setdiag(scipy_a, cupyx_a, x, k)\n\n @testing.for_dtypes('fdFD')\n def test_setdiag_scalar(self, dtype):\n scipy_a, cupyx_a = self._make_matrix(dtype)\n x = numpy.array(1.0, dtype=dtype)\n m, n = self.shape\n for k in range(-m+1, n):\n self._test_setdiag(scipy_a, cupyx_a, x, k)\n\n def test_setdiag_invalid(self):\n dtype = 'f'\n scipy_a, cupyx_a = self._make_matrix(dtype)\n x = numpy.array(1.0, dtype=dtype)\n m, n = self.shape\n for k in (-m, n):\n with pytest.raises(ValueError):\n scipy_a.setdiag(x, k=k)\n with pytest.raises(ValueError):\n cupyx_a.setdiag(x, k=k)\n" ]
[ [ "numpy.asarray", "numpy.add.reduce", "numpy.indices", "numpy.array", "numpy.where" ], [ "numpy.errstate", "numpy.lib.NumpyVersion", "numpy.array", "numpy.dtype" ], [ "numpy.dtype" ], [ "numpy.lib.NumpyVersion", "numpy.array", "numpy.dtype", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bolcom/probability
[ "4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7", "4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7", "4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7", "4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7", "4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7", "4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7", "4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7", "4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7", "4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7", "4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7", "4a11efad1ecd8a1336e4c9fdb0105efbf2375ad7" ]
[ "tensorflow_probability/python/mcmc/replica_exchange_mc.py", "tensorflow_probability/python/optimizer/sgld_test.py", "tensorflow_probability/python/distributions/dirichlet_test.py", "tensorflow_probability/python/internal/test_combinations.py", "tensorflow_probability/python/distributions/mixture_same_family_test.py", "tensorflow_probability/python/positive_semidefinite_kernels/__init__.py", "tensorflow_probability/python/experimental/auto_batching/dsl_test.py", "tensorflow_probability/python/bijectors/categorical_to_discrete.py", "tensorflow_probability/python/mcmc/sample_annealed_importance.py", "tensorflow_probability/python/distributions/truncated_normal.py", "tensorflow_probability/python/internal/dtype_util_test.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Replica Exchange Monte Carlo Transition Kernel.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.mcmc import kernel as kernel_base\nfrom tensorflow_probability.python.mcmc.internal import util as mcmc_util\nfrom tensorflow_probability.python.util.seed_stream import SeedStream\n\n__all__ = [\n 'ReplicaExchangeMC',\n 'default_exchange_proposed_fn',\n]\n\nReplicaExchangeMCKernelResults = collections.namedtuple(\n 'ReplicaExchangeMCKernelResults',\n [\n # List of states for each replica. Each state may itself be a list of\n # state parts.\n 'replica_states',\n # List of KernelResults for each replica, post exchange.\n 'replica_results',\n # List of state/state-parts with pre-exchange samples from each replica.\n 'sampled_replica_states',\n # List of kernel-results with pre-exchange samples from each replica.\n 'sampled_replica_results',\n ])\n\n\ndef default_exchange_proposed_fn(prob_exchange):\n \"\"\"Default exchange proposal function, for replica exchange MC.\n\n With probability `prob_exchange` propose combinations of replica for exchange.\n When exchanging, create combinations of adjacent replicas in\n [Replica Exchange Monte Carlo](\n https://en.wikipedia.org/wiki/Parallel_tempering)\n\n ```\n exchange_fn = default_exchange_proposed_fn(prob_exchange=0.5)\n exchange_proposed = exchange_fn(num_replica=3)\n\n exchange_proposed.eval()\n ==> [[0, 1]] # 1 exchange, 0 <--> 1\n\n exchange_proposed.eval()\n ==> [] # 0 exchanges\n ```\n\n Args:\n prob_exchange: Scalar `Tensor` giving probability that any exchanges will\n be generated.\n\n Returns:\n default_exchange_proposed_fn_: Python callable which take a number of\n replicas (a Python integer), and return combinations of replicas for\n exchange as an [n, 2] integer `Tensor`, `0 <= n <= num_replica // 2`,\n with *unique* values in the set `{0, ..., num_replica}`.\n \"\"\"\n\n def default_exchange_proposed_fn_(num_replica, seed=None):\n \"\"\"Default function for `exchange_proposed_fn` of `kernel`.\"\"\"\n seed_stream = SeedStream(seed, 'default_exchange_proposed_fn')\n\n zero_start = tf.random.uniform([], seed=seed_stream()) > 0.5\n if num_replica % 2 == 0:\n\n def _exchange():\n flat_exchange = tf.range(num_replica)\n if num_replica > 2:\n start = tf.cast(~zero_start, dtype=tf.int32)\n end = num_replica - start\n flat_exchange = flat_exchange[start:end]\n return tf.reshape(flat_exchange, [tf.size(input=flat_exchange) // 2, 2])\n else:\n\n def _exchange():\n start = tf.cast(zero_start, dtype=tf.int32)\n end = num_replica - tf.cast(~zero_start, dtype=tf.int32)\n flat_exchange = tf.range(num_replica)[start:end]\n return tf.reshape(flat_exchange, [tf.size(input=flat_exchange) // 2, 2])\n\n def _null_exchange():\n return tf.reshape(tf.cast([], dtype=tf.int32), shape=[0, 2])\n\n return tf.cond(\n pred=tf.random.uniform([], seed=seed_stream()) < prob_exchange,\n true_fn=_exchange,\n false_fn=_null_exchange)\n\n return default_exchange_proposed_fn_\n\n\nclass ReplicaExchangeMC(kernel_base.TransitionKernel):\n \"\"\"Runs one step of the Replica Exchange Monte Carlo.\n\n [Replica Exchange Monte Carlo](\n https://en.wikipedia.org/wiki/Parallel_tempering) is a Markov chain\n Monte Carlo (MCMC) algorithm that is also known as Parallel Tempering. This\n algorithm performs multiple sampling with different temperatures in parallel,\n and exchanges those samplings according to the Metropolis-Hastings criterion.\n\n The `K` replicas are parameterized in terms of `inverse_temperature`'s,\n `(beta[0], beta[1], ..., beta[K-1])`. If the target distribution has\n probability density `p(x)`, the `kth` replica has density `p(x)**beta_k`.\n\n Typically `beta[0] = 1.0`, and `1.0 > beta[1] > beta[2] > ... > 0.0`.\n\n * `beta[0] == 1` ==> First replicas samples from the target density, `p`.\n * `beta[k] < 1`, for `k = 1, ..., K-1` ==> Other replicas sample from\n \"flattened\" versions of `p` (peak is less high, valley less low). These\n distributions are somewhat closer to a uniform on the support of `p`.\n\n Samples from adjacent replicas `i`, `i + 1` are used as proposals for each\n other in a Metropolis step. This allows the lower `beta` samples, which\n explore less dense areas of `p`, to occasionally be used to help the\n `beta == 1` chain explore new regions of the support.\n\n Samples from replica 0 are returned, and the others are discarded.\n\n #### Examples\n\n ##### Sampling from the Standard Normal Distribution.\n\n ```python\n import numpy as np\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n dtype = np.float32\n\n target = tfd.Normal(loc=dtype(0), scale=dtype(1))\n\n def make_kernel_fn(target_log_prob_fn, seed):\n return tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n seed=seed, step_size=1.0, num_leapfrog_steps=3)\n\n remc = tfp.mcmc.ReplicaExchangeMC(\n target_log_prob_fn=target.log_prob,\n inverse_temperatures=[1., 0.3, 0.1, 0.03],\n make_kernel_fn=make_kernel_fn,\n seed=42)\n\n samples, _ = tfp.mcmc.sample_chain(\n num_results=1000,\n current_state=dtype(1),\n kernel=remc,\n num_burnin_steps=500,\n parallel_iterations=1) # For determinism.\n\n sample_mean = tf.reduce_mean(samples, axis=0)\n sample_std = tf.sqrt(\n tf.reduce_mean(tf.squared_difference(samples, sample_mean),\n axis=0))\n with tf.Session() as sess:\n [sample_mean_, sample_std_] = sess.run([sample_mean, sample_std])\n\n print('Estimated mean: {}'.format(sample_mean_))\n print('Estimated standard deviation: {}'.format(sample_std_))\n ```\n\n ##### Sampling from a 2-D Mixture Normal Distribution.\n\n ```python\n import numpy as np\n import tensorflow as tf\n import tensorflow_probability as tfp\n import matplotlib.pyplot as plt\n tfd = tfp.distributions\n\n dtype = np.float32\n\n target = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(probs=[0.5, 0.5]),\n components_distribution=tfd.MultivariateNormalDiag(\n loc=[[-1., -1], [1., 1.]],\n scale_identity_multiplier=[0.1, 0.1]))\n\n def make_kernel_fn(target_log_prob_fn, seed):\n return tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n seed=seed, step_size=0.3, num_leapfrog_steps=3)\n\n remc = tfp.mcmc.ReplicaExchangeMC(\n target_log_prob_fn=target.log_prob,\n inverse_temperatures=[1., 0.3, 0.1, 0.03, 0.01],\n make_kernel_fn=make_kernel_fn,\n seed=42)\n\n samples, _ = tfp.mcmc.sample_chain(\n num_results=1000,\n # Start near the [1, 1] mode. Standard HMC would get stuck there.\n current_state=np.ones(2, dtype=dtype),\n kernel=remc,\n num_burnin_steps=500,\n parallel_iterations=1) # For determinism.\n\n with tf.Session() as sess:\n samples_ = sess.run(samples)\n\n plt.figure(figsize=(8, 8))\n plt.xlim(-2, 2)\n plt.ylim(-2, 2)\n plt.plot(samples_[:, 0], samples_[:, 1], '.')\n plt.show()\n ```\n\n \"\"\"\n\n def __init__(self,\n target_log_prob_fn,\n inverse_temperatures,\n make_kernel_fn,\n exchange_proposed_fn=default_exchange_proposed_fn(1.),\n seed=None,\n name=None):\n \"\"\"Instantiates this object.\n\n Args:\n target_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the target distribution.\n inverse_temperatures: `1D` `Tensor of inverse temperatures to perform\n samplings with each replica. Must have statically known `shape`.\n `inverse_temperatures[0]` produces the states returned by samplers,\n and is typically == 1.\n make_kernel_fn: Python callable which takes target_log_prob_fn and seed\n args and returns a TransitionKernel instance.\n exchange_proposed_fn: Python callable which take a number of replicas, and\n return combinations of replicas for exchange.\n seed: Python integer to seed the random number generator.\n Default value: `None` (i.e., no seed).\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"remc_kernel\").\n\n Raises:\n ValueError: `inverse_temperatures` doesn't have statically known 1D shape.\n \"\"\"\n inverse_temperatures = tf.convert_to_tensor(\n value=inverse_temperatures, name='inverse_temperatures')\n\n # Note these are static checks, and don't need to be embedded in the graph.\n inverse_temperatures.shape.assert_is_fully_defined()\n inverse_temperatures.shape.assert_has_rank(1)\n\n self._seed_stream = SeedStream(seed, salt=name)\n self._seeded_mcmc = seed is not None\n self._parameters = dict(\n target_log_prob_fn=target_log_prob_fn,\n inverse_temperatures=inverse_temperatures,\n num_replica=tf.compat.dimension_value(inverse_temperatures.shape[0]),\n exchange_proposed_fn=exchange_proposed_fn,\n seed=seed,\n name=name)\n self.replica_kernels = []\n for i in range(self.num_replica):\n self.replica_kernels.append(\n make_kernel_fn(\n target_log_prob_fn=_replica_log_prob_fn(inverse_temperatures[i],\n target_log_prob_fn),\n seed=self._seed_stream()))\n\n @property\n def target_log_prob_fn(self):\n return self._parameters['target_log_prob_fn']\n\n @property\n def inverse_temperatures(self):\n return self._parameters['inverse_temperatures']\n\n @property\n def num_replica(self):\n return self._parameters['num_replica']\n\n @property\n def exchange_proposed_fn(self):\n return self._parameters['exchange_proposed_fn']\n\n @property\n def seed(self):\n return self._parameters['seed']\n\n @property\n def name(self):\n return self._parameters['name']\n\n @property\n def parameters(self):\n \"\"\"Return `dict` of ``__init__`` arguments and their values.\"\"\"\n return self._parameters\n\n @property\n def is_calibrated(self):\n return True\n\n def one_step(self, current_state, previous_kernel_results):\n \"\"\"Takes one step of the TransitionKernel.\n\n Args:\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s).\n previous_kernel_results: A (possibly nested) `tuple`, `namedtuple` or\n `list` of `Tensor`s representing internal calculations made within the\n previous call to this function (or as returned by `bootstrap_results`).\n\n Returns:\n next_state: `Tensor` or Python `list` of `Tensor`s representing the\n next state(s) of the Markov chain(s).\n kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of\n `Tensor`s representing internal calculations made within this function.\n This inculdes replica states.\n \"\"\"\n # Key difficulty: The type of exchanges differs from one call to the\n # next...even the number of exchanges can differ.\n # As a result, exchanges must happen dynamically, in while loops.\n with tf1.name_scope(\n name=mcmc_util.make_name(self.name, 'remc', 'one_step'),\n values=[current_state, previous_kernel_results]):\n\n # Each replica does `one_step` to get pre-exchange states/KernelResults.\n sampled_replica_states, sampled_replica_results = zip(*[\n rk.one_step(previous_kernel_results.replica_states[i],\n previous_kernel_results.replica_results[i])\n for i, rk in enumerate(self.replica_kernels)\n ])\n sampled_replica_states = list(sampled_replica_states)\n sampled_replica_results = list(sampled_replica_results)\n\n states_are_lists = mcmc_util.is_list_like(sampled_replica_states[0])\n\n if not states_are_lists:\n sampled_replica_states = [[s] for s in sampled_replica_states]\n num_state_parts = len(sampled_replica_states[0])\n\n dtype = sampled_replica_states[0][0].dtype\n\n # Must put states into TensorArrays. Why? We will read/write states\n # dynamically with Tensor index `i`, and you cannot do this with lists.\n # old_states[k][i] is Tensor of (old) state part k, for replica i.\n # The `k` will be known statically, and `i` is a Tensor.\n old_states = [\n tf.TensorArray(\n dtype,\n size=self.num_replica,\n dynamic_size=False,\n clear_after_read=False,\n tensor_array_name='old_states',\n # State part k has same shape, regardless of replica. So use 0.\n element_shape=sampled_replica_states[0][k].shape)\n for k in range(num_state_parts)\n ]\n for k in range(num_state_parts):\n for i in range(self.num_replica):\n old_states[k] = old_states[k].write(i, sampled_replica_states[i][k])\n\n exchange_proposed = self.exchange_proposed_fn(\n self.num_replica, seed=self._seed_stream())\n exchange_proposed_n = tf.shape(input=exchange_proposed)[0]\n\n exchanged_states = self._get_exchanged_states(\n old_states, exchange_proposed, exchange_proposed_n,\n sampled_replica_states, sampled_replica_results)\n\n no_exchange_proposed, _ = tf1.setdiff1d(\n tf.range(self.num_replica), tf.reshape(exchange_proposed, [-1]))\n\n exchanged_states = self._insert_old_states_where_no_exchange_was_proposed(\n no_exchange_proposed, old_states, exchanged_states)\n\n next_replica_states = []\n for i in range(self.num_replica):\n next_replica_states_i = []\n for k in range(num_state_parts):\n next_replica_states_i.append(exchanged_states[k].read(i))\n next_replica_states.append(next_replica_states_i)\n\n if not states_are_lists:\n next_replica_states = [s[0] for s in next_replica_states]\n sampled_replica_states = [s[0] for s in sampled_replica_states]\n\n # Now that states are/aren't exchanged, bootstrap next kernel_results.\n # The viewpoint is that after each exchange, we are starting anew.\n next_replica_results = [\n rk.bootstrap_results(state)\n for rk, state in zip(self.replica_kernels, next_replica_states)\n ]\n\n next_state = next_replica_states[0] # Replica 0 is the returned state(s).\n\n kernel_results = ReplicaExchangeMCKernelResults(\n replica_states=next_replica_states,\n replica_results=next_replica_results,\n sampled_replica_states=sampled_replica_states,\n sampled_replica_results=sampled_replica_results,\n )\n\n return next_state, kernel_results\n\n def _get_exchanged_states(self, old_states, exchange_proposed,\n exchange_proposed_n, sampled_replica_states,\n sampled_replica_results):\n \"\"\"Get list of TensorArrays holding exchanged states, and zeros.\"\"\"\n with tf1.name_scope('get_exchanged_states'):\n\n target_log_probs = []\n for replica in range(self.num_replica):\n replica_log_prob = _get_field(sampled_replica_results[replica],\n 'target_log_prob')\n inverse_temp = self.inverse_temperatures[replica]\n target_log_probs.append(replica_log_prob / inverse_temp)\n target_log_probs = tf.stack(target_log_probs, axis=0)\n\n dtype = target_log_probs.dtype\n num_state_parts = len(sampled_replica_states[0])\n # exchanged_states[k][i] is Tensor of (new) state part k, for replica i.\n # The `k` will be known statically, and `i` is a Tensor.\n # We will insert values into indices `i` for every replica with a proposed\n # exchange.\n exchanged_states = [\n tf.TensorArray(\n dtype,\n size=self.num_replica,\n dynamic_size=False,\n tensor_array_name='exchanged_states',\n # State part k has same shape, regardless of replica. So use 0.\n element_shape=sampled_replica_states[0][k].shape)\n for k in range(num_state_parts)\n ]\n\n # Draw random variables here, to avoid sampling in the loop (and losing\n # reproducibility). This may mean we sample too many, but we will always\n # have enough.\n sample_shape = tf.concat(\n ([self.num_replica // 2], tf.shape(input=target_log_probs)[1:]),\n axis=0)\n log_uniforms = tf.math.log(\n tf.random.uniform(\n shape=sample_shape, dtype=dtype, seed=self._seed_stream()))\n\n def _swap(is_exchange_accepted, x, y):\n \"\"\"Swap batches of x, y where accepted.\"\"\"\n with tf1.name_scope('swap_where_exchange_accepted'):\n new_x = mcmc_util.choose(is_exchange_accepted, y, x)\n new_y = mcmc_util.choose(is_exchange_accepted, x, y)\n return new_x, new_y\n\n def cond(i, unused_exchanged_states):\n return i < exchange_proposed_n\n\n def body(i, exchanged_states):\n \"\"\"Body of while loop for exchanging states.\"\"\"\n # Propose exchange between replicas indexed by m and n.\n m, n = tf.unstack(exchange_proposed[i])\n\n # Construct log_accept_ratio: -temp_diff * target_log_prob_diff.\n # Note target_log_prob_diff = -EnergyDiff (common definition is in terms\n # of energy).\n temp_diff = self.inverse_temperatures[m] - self.inverse_temperatures[n]\n # Difference of target log probs may be +- Inf or NaN. We want the\n # product of this with the temperature difference to have \"alt value\" of\n # -Inf.\n log_accept_ratio = mcmc_util.safe_sum(\n [-temp_diff * target_log_probs[m], temp_diff * target_log_probs[n]])\n\n is_exchange_accepted = log_uniforms[i] < log_accept_ratio\n\n for k in range(num_state_parts):\n new_m, new_n = _swap(is_exchange_accepted, old_states[k].read(m),\n old_states[k].read(n))\n exchanged_states[k] = exchanged_states[k].write(m, new_m)\n exchanged_states[k] = exchanged_states[k].write(n, new_n)\n\n return i + 1, exchanged_states\n\n # At this point, exchanged_states[k] is a length num_replicas TensorArray.\n return tf.while_loop(\n cond=cond, body=body, loop_vars=[tf.constant(0),\n exchanged_states])[1] # Remove `i`\n\n def _insert_old_states_where_no_exchange_was_proposed(\n self, no_exchange_proposed, old_states, exchanged_states):\n with tf1.name_scope(\n 'insert_old_states_where_no_exchange_was_proposed'):\n\n def cond(j, unused_exchanged_states):\n return j < tf.size(input=no_exchange_proposed)\n\n def body(j, exchanged_states):\n replica = no_exchange_proposed[j]\n for k in range(len(old_states)): # k indexes state part\n exchanged_states[k] = exchanged_states[k].write(\n replica, old_states[k].read(replica))\n return j + 1, exchanged_states\n\n return tf.while_loop(\n cond=cond, body=body, loop_vars=[tf.constant(0),\n exchanged_states])[1] # Remove `j`\n\n def bootstrap_results(self, init_state):\n \"\"\"Returns an object with the same type as returned by `one_step`.\n\n Args:\n init_state: `Tensor` or Python `list` of `Tensor`s representing the\n initial state(s) of the Markov chain(s).\n\n Returns:\n kernel_results: A (possibly nested) `tuple`, `namedtuple` or `list` of\n `Tensor`s representing internal calculations made within this function.\n This inculdes replica states.\n \"\"\"\n with tf1.name_scope(\n name=mcmc_util.make_name(self.name, 'remc', 'bootstrap_results'),\n values=[init_state]):\n replica_results = [\n self.replica_kernels[i].bootstrap_results(init_state)\n for i in range(self.num_replica)\n ]\n\n init_state_parts = (\n list(init_state)\n if mcmc_util.is_list_like(init_state) else [init_state])\n\n # Convert all states parts to tensor...\n replica_states = [[\n tf.convert_to_tensor(value=s) for s in init_state_parts\n ] for i in range(self.num_replica)]\n\n if not mcmc_util.is_list_like(init_state):\n replica_states = [s[0] for s in replica_states]\n\n return ReplicaExchangeMCKernelResults(\n replica_states=replica_states,\n replica_results=replica_results,\n sampled_replica_states=replica_states,\n sampled_replica_results=replica_results,\n )\n\n\ndef _replica_log_prob_fn(inverse_temperature, target_log_prob_fn):\n \"\"\"Return a log probability function made considering temperature.\"\"\"\n\n def _replica_log_prob_fn_(*x):\n return inverse_temperature * target_log_prob_fn(*x)\n\n return _replica_log_prob_fn_\n\n\n# TODO(b/111801087) Use a more standardized API when available.\ndef _get_field(kernel_results, field_name):\n \"\"\"field_name from kernel_results or kernel_results.accepted_results.\"\"\"\n if hasattr(kernel_results, field_name):\n return getattr(kernel_results, field_name)\n if hasattr(kernel_results, 'accepted_results'):\n return getattr(kernel_results.accepted_results, field_name)\n raise TypeError('Cannot extract %s from %s' % (field_name, kernel_results))\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Functional test for GradientDescent.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python.internal import test_case\nfrom tensorflow_probability.python.math import diag_jacobian\n\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass StochasticGradientLangevinDynamicsOptimizerTest(test_case.TestCase):\n\n def testBasic(self):\n if tf.executing_eagerly():\n return\n\n for dtype in [tf.half, tf.float32, tf.float64]:\n with self.cached_session():\n var0 = tf.Variable([1.1, 2.1], dtype=dtype)\n var1 = tf.Variable([3., 4.], dtype=dtype)\n grads0 = tf.constant([0.1, 0.1], dtype=dtype)\n grads1 = tf.constant([0.01, 0.01], dtype=dtype)\n decay_rate = 0.53\n sgd_optimizer = tfp.optimizer.StochasticGradientLangevinDynamics(\n 3., preconditioner_decay_rate=decay_rate)\n sgd_op = sgd_optimizer.apply_gradients(\n zip([grads0, grads1], [var0, var1]))\n\n self.evaluate(tf1.global_variables_initializer())\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0))\n self.assertAllCloseAccordingToType([3., 4.], self.evaluate(var1))\n # Run 1 step of sgd\n self.evaluate(sgd_op)\n # Validate updated params\n grads_scaled = (0.5 * 0.1 /\n np.sqrt(decay_rate + (1. - decay_rate) * 0.1**2 + 1e-8))\n # Note that `tfp.math.diag_jacobian(xs=var, ys=grad)` returns zero\n # tensor\n self.assertAllCloseAccordingToType(\n [1.1 - 3. * grads_scaled, 2.1 - 3. * grads_scaled],\n self.evaluate(var0))\n grads_scaled = (0.5 * 0.01 / np.sqrt(\n decay_rate + (1. - decay_rate) * 0.01**2 + 1e-8))\n self.assertAllCloseAccordingToType(\n [3. - 3. * grads_scaled, 4. - 3. * grads_scaled],\n self.evaluate(var1))\n self.assertAllCloseAccordingToType(\n 1, self.evaluate(sgd_optimizer.iterations))\n\n def testBasicMultiInstance(self):\n if tf.executing_eagerly():\n return\n\n for dtype in [tf.half, tf.float32, tf.float64]:\n with self.cached_session():\n var0 = tf.Variable([1.1, 2.1], dtype=dtype)\n var1 = tf.Variable([3., 4.], dtype=dtype)\n grads0 = tf.constant([0.1, 0.1], dtype=dtype)\n grads1 = tf.constant([0.01, 0.01], dtype=dtype)\n vara = tf.Variable([1.1, 2.1], dtype=dtype)\n varb = tf.Variable([3., 4.], dtype=dtype)\n gradsa = tf.constant([0.1, 0.1], dtype=dtype)\n gradsb = tf.constant([0.01, 0.01], dtype=dtype)\n decay_rate = 0.5\n sgd_optimizer = tfp.optimizer.StochasticGradientLangevinDynamics(\n 3., preconditioner_decay_rate=decay_rate)\n sgd_op = sgd_optimizer.apply_gradients(\n zip([grads0, grads1], [var0, var1]))\n sgd_optimizer2 = tfp.optimizer.StochasticGradientLangevinDynamics(\n 3., preconditioner_decay_rate=decay_rate)\n sgd_op2 = sgd_optimizer2.apply_gradients(\n zip([gradsa, gradsb], [vara, varb]))\n self.evaluate(tf1.global_variables_initializer())\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0))\n self.assertAllCloseAccordingToType([3., 4.], self.evaluate(var1))\n self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(vara))\n self.assertAllCloseAccordingToType([3., 4.], self.evaluate(varb))\n\n # Run 1 step of sgd\n self.evaluate(sgd_op)\n self.evaluate(sgd_op2)\n\n # Validate updated params\n grads_scaled = (0.5 * 0.1 /\n np.sqrt(decay_rate + (1. - decay_rate) * 0.1**2 + 1e-8))\n self.assertAllCloseAccordingToType(\n [1.1 - 3. * grads_scaled, 2.1 - 3. * grads_scaled],\n self.evaluate(var0))\n self.assertAllCloseAccordingToType(\n [1.1 - 3. * grads_scaled, 2.1 - 3. * grads_scaled],\n self.evaluate(vara))\n\n grads_scaled = (0.5 * 0.01 / np.sqrt(\n decay_rate + (1 - decay_rate) * 0.01**2 + 1e-8))\n self.assertAllCloseAccordingToType(\n [3. - 3. * grads_scaled, 4. - 3. * grads_scaled],\n self.evaluate(var1))\n self.assertAllCloseAccordingToType(\n [3. - 3. * grads_scaled, 4. - 3. * grads_scaled],\n self.evaluate(varb))\n self.assertAllCloseAccordingToType(\n 1, self.evaluate(sgd_optimizer.iterations))\n self.assertAllCloseAccordingToType(\n 1, self.evaluate(sgd_optimizer2.iterations))\n\n def testTensorLearningRate(self):\n if tf.executing_eagerly():\n return\n\n for dtype in [tf.half, tf.float32, tf.float64]:\n with self.cached_session():\n var0 = tf.Variable([1.1, 2.1], dtype=dtype)\n var1 = tf.Variable([3., 4.], dtype=dtype)\n grads0 = tf.constant([0.1, 0.1], dtype=dtype)\n grads1 = tf.constant([0.01, 0.01], dtype=dtype)\n lrate = tf.constant(3.0)\n decay_rate = 0.5\n sgd_op = tfp.optimizer.StochasticGradientLangevinDynamics(\n lrate, preconditioner_decay_rate=tf.constant(\n decay_rate)).apply_gradients(\n zip([grads0, grads1], [var0, var1]))\n self.evaluate(tf1.global_variables_initializer())\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0))\n self.assertAllCloseAccordingToType([3., 4.], self.evaluate(var1))\n # Run 1 step of sgd\n self.evaluate(sgd_op)\n # Validate updated params\n grads_scaled = (0.5 * 0.1 /\n np.sqrt(decay_rate + (1. - decay_rate) * 0.1**2 + 1e-8))\n # Note that `tfp.math.diag_jacobian(xs=var, ys=grad)` returns zero\n # tensor\n self.assertAllCloseAccordingToType(\n [1.1 - 3. * grads_scaled, 2.1 - 3. * grads_scaled],\n self.evaluate(var0))\n grads_scaled = (0.5 * 0.01 / np.sqrt(\n decay_rate + (1. - decay_rate) * 0.01**2 + 1e-8))\n self.assertAllCloseAccordingToType(\n [3. - 3. * grads_scaled, 4. - 3. * grads_scaled],\n self.evaluate(var1))\n\n @test_util.run_deprecated_v1\n def testGradWrtRef(self):\n if tf.executing_eagerly():\n return\n\n for dtype in [tf.half, tf.float32, tf.float64]:\n with self.cached_session():\n opt = tfp.optimizer.StochasticGradientLangevinDynamics(3.0)\n values = [1., 3.]\n vars_ = [tf.Variable([v], dtype=dtype) for v in values]\n loss = lambda: vars_[0] + vars_[1] # pylint: disable=cell-var-from-loop\n grads_and_vars = opt._compute_gradients(loss, vars_)\n self.evaluate(tf1.global_variables_initializer())\n for grad, _ in grads_and_vars:\n self.assertAllCloseAccordingToType([1.], self.evaluate(grad))\n\n def testBurnin(self):\n if tf.executing_eagerly():\n return\n\n for burnin_dtype in [tf.int8, tf.int16, tf.int32, tf.int64]:\n with self.cached_session():\n var0 = tf.Variable([1.1, 2.1], dtype=tf.float32)\n grads0 = tf.constant([0.1, 0.1], dtype=tf.float32)\n decay_rate = 0.53\n sgd_optimizer = tfp.optimizer.StochasticGradientLangevinDynamics(\n 3.,\n preconditioner_decay_rate=decay_rate,\n burnin=tf.constant(10, dtype=burnin_dtype))\n sgd_op = sgd_optimizer.apply_gradients([(grads0, var0)])\n\n self.evaluate(tf1.global_variables_initializer())\n # Validate that iterations is initialized to 0.\n self.assertAllCloseAccordingToType(\n 0, self.evaluate(sgd_optimizer.iterations))\n # Run 1 step of sgd\n self.evaluate(sgd_op)\n # Validate that iterations is incremented.\n self.assertAllCloseAccordingToType(\n 1, self.evaluate(sgd_optimizer.iterations))\n\n def testWithGlobalStep(self):\n if tf.executing_eagerly():\n return\n\n for dtype in [tf.float32, tf.float64]:\n with self.cached_session():\n step = tf.Variable(0, dtype=tf.int64)\n\n var0 = tf.Variable([1.1, 2.1], dtype=dtype)\n var1 = tf.Variable([3., 4.], dtype=dtype)\n grads0 = tf.constant([0.1, 0.1], dtype=dtype)\n grads1 = tf.constant([0.01, 0.01], dtype=dtype)\n decay_rate = 0.1\n\n sgd_opt = tfp.optimizer.StochasticGradientLangevinDynamics(\n 3., preconditioner_decay_rate=decay_rate)\n sgd_opt.iterations = step\n sgd_op = sgd_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n\n self.evaluate(tf1.global_variables_initializer())\n\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0))\n self.assertAllCloseAccordingToType([3., 4.], self.evaluate(var1))\n # Run 1 step of sgd\n self.evaluate(sgd_op)\n\n # Validate updated params and step\n grads_scaled = (0.5 * 0.1 /\n np.sqrt(decay_rate + (1. - decay_rate) * 0.1**2 + 1e-8))\n # Note that `tfp.math.diag_jacobian(xs=var, ys=grad)` returns zero\n # tensor\n self.assertAllCloseAccordingToType(\n [1.1 - 3. * grads_scaled, 2.1 - 3. * grads_scaled],\n self.evaluate(var0))\n grads_scaled = (0.5 * 0.01 / np.sqrt(\n decay_rate + (1. - decay_rate) * 0.01**2 + 1e-8))\n self.assertAllCloseAccordingToType(\n [3. - 3. * grads_scaled, 4. - 3. * grads_scaled],\n self.evaluate(var1))\n self.assertAllCloseAccordingToType(1, self.evaluate(step))\n\n def testSparseBasic(self):\n if tf.executing_eagerly():\n return\n\n for dtype in [tf.half, tf.float32, tf.float64]:\n with self.cached_session():\n var0 = tf.Variable([[1.1], [2.1]], dtype=dtype)\n var1 = tf.Variable([[3.], [4.]], dtype=dtype)\n grads0 = tf.IndexedSlices(\n tf.constant([0.1], shape=[1, 1], dtype=dtype),\n tf.constant([0]), tf.constant([2, 1]))\n grads1 = tf.IndexedSlices(\n tf.constant([0.01], shape=[1, 1], dtype=dtype),\n tf.constant([1]), tf.constant([2, 1]))\n decay_rate = 0.9\n sgd_op = tfp.optimizer.StochasticGradientLangevinDynamics(\n 3., preconditioner_decay_rate=decay_rate).apply_gradients(\n zip([grads0, grads1], [var0, var1]))\n self.evaluate(tf1.global_variables_initializer())\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([[1.1], [2.1]], self.evaluate(var0))\n self.assertAllCloseAccordingToType([[3.], [4.]], self.evaluate(var1))\n # Run 1 step of sgd\n self.evaluate(sgd_op)\n # Validate updated params\n grads_scaled = (0.5 * 0.1 /\n np.sqrt(decay_rate + (1. - decay_rate) * 0.1**2 + 1e-8))\n # Note that `tfp.math.diag_jacobian(xs=var, ys=grad)` returns zero\n # tensor\n self.assertAllCloseAccordingToType([[1.1 - 3. * grads_scaled], [2.1]],\n self.evaluate(var0))\n grads_scaled = (0.5 * 0.01 / np.sqrt(\n decay_rate + (1. - decay_rate) * 0.01**2 + 1e-8))\n self.assertAllCloseAccordingToType(\n [[3. - 3. * 0], [4. - 3. * grads_scaled]], self.evaluate(var1))\n\n def testPreconditionerComputedCorrectly(self):\n \"\"\"Test that SGLD step is computed correctly for a 3D Gaussian energy.\"\"\"\n if tf.executing_eagerly():\n return\n\n with self.cached_session():\n dtype = np.float32\n # Target function is the energy function of normal distribution\n true_mean = dtype([0, 0, 0])\n true_cov = dtype([[1, 0.25, 0.25], [0.25, 1, 0.25], [0.25, 0.25, 1]])\n # Target distribution is defined through the Cholesky decomposition\n chol = tf.linalg.cholesky(true_cov)\n target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol)\n var_1 = tf.Variable(name='var_1', initial_value=[1., 1.])\n var_2 = tf.Variable(name='var_2', initial_value=[1.])\n\n var = [var_1, var_2]\n\n # Set up the learning rate and the optimizer\n learning_rate = .5\n optimizer_kernel = tfp.optimizer.StochasticGradientLangevinDynamics(\n learning_rate=learning_rate, burnin=1)\n\n # Target function\n def target_fn(x, y):\n # Stack the input tensors together\n z = tf.concat([x, y], axis=-1) - true_mean\n return -target.log_prob(z)\n\n grads = tf.gradients(ys=target_fn(*var), xs=var)\n\n # Update value of `var` with one iteration of the SGLD (without the\n # normal perturbation, since `burnin > 0`)\n step = optimizer_kernel.apply_gradients(zip(grads, var))\n\n # True theoretical value of `var` after one iteration\n decay_tensor = tf.cast(optimizer_kernel._decay_tensor, var[0].dtype)\n diagonal_bias = tf.cast(optimizer_kernel._diagonal_bias, var[0].dtype)\n learning_rate = tf.cast(optimizer_kernel._learning_rate, var[0].dtype)\n velocity = [(decay_tensor * tf.ones_like(v)\n + (1 - decay_tensor) * tf.square(g))\n for v, g in zip(var, grads)]\n preconditioner = [tf.math.rsqrt(vel + diagonal_bias) for vel in velocity]\n # Compute second order gradients\n _, grad_grads = diag_jacobian(\n xs=var,\n ys=grads)\n # Compute gradient of the preconditioner (compute the gradient manually)\n preconditioner_grads = [-(g * g_g * (1. - decay_tensor) * p**3.)\n for g, g_g, p in zip(grads, grad_grads,\n preconditioner)]\n\n # True theoretical value of `var` after one iteration\n var_true = [v - learning_rate * 0.5 * (p * g - p_g)\n for v, p, g, p_g in zip(var, preconditioner, grads,\n preconditioner_grads)]\n self.evaluate(tf1.global_variables_initializer())\n var_true_ = self.evaluate(var_true)\n self.evaluate(step)\n var_ = self.evaluate(var) # new `var` after one SGLD step\n self.assertAllClose(var_true_,\n var_, atol=0.001, rtol=0.001)\n\n def testDiffusionBehavesCorrectly(self):\n \"\"\"Test that for the SGLD finds minimum of the 3D Gaussian energy.\"\"\"\n if tf.executing_eagerly():\n return\n\n with self.cached_session():\n # Set up random seed for the optimizer\n tf1.set_random_seed(42)\n dtype = np.float32\n true_mean = dtype([0, 0, 0])\n true_cov = dtype([[1, 0.25, 0.25], [0.25, 1, 0.25], [0.25, 0.25, 1]])\n # Loss is defined through the Cholesky decomposition\n chol = tf.linalg.cholesky(true_cov)\n var_1 = tf.Variable(name='var_1', initial_value=[1., 1.])\n var_2 = tf.Variable(name='var_2', initial_value=[1.])\n\n # Loss function\n def loss_fn():\n var = tf.concat([var_1, var_2], axis=-1)\n loss_part = tf.linalg.cholesky_solve(chol, tf.expand_dims(var, -1))\n return tf.linalg.matvec(loss_part, var, transpose_a=True)\n\n # Set up the learning rate with a polynomial decay\n global_step = tf1.train.get_or_create_global_step()\n starter_learning_rate = .3\n end_learning_rate = 1e-4\n decay_steps = 1e4\n learning_rate = tf1.train.polynomial_decay(\n starter_learning_rate,\n global_step,\n decay_steps,\n end_learning_rate,\n power=1.)\n\n # Set up the optimizer\n optimizer_kernel = tfp.optimizer.StochasticGradientLangevinDynamics(\n learning_rate=learning_rate, preconditioner_decay_rate=0.99)\n optimizer_kernel.iterations = global_step\n optimizer = optimizer_kernel.minimize(loss_fn, var_list=[var_1, var_2])\n\n # Number of training steps\n training_steps = 5000\n # Record the steps as and treat them as samples\n samples = [np.zeros([training_steps, 2]), np.zeros([training_steps, 1])]\n self.evaluate(tf1.global_variables_initializer())\n for step in range(training_steps):\n self.evaluate(optimizer)\n sample = [self.evaluate(var_1), self.evaluate(var_2)]\n samples[0][step, :] = sample[0]\n samples[1][step, :] = sample[1]\n\n samples_ = np.concatenate(samples, axis=-1)\n sample_mean = np.mean(samples_, 0)\n self.assertAllClose(sample_mean, true_mean, atol=0.1, rtol=0.1)\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\n# Dependency imports\nimport numpy as np\nfrom scipy import special as sp_special\nfrom scipy import stats as sp_stats\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import test_case\nfrom tensorflow_probability.python.internal import test_util as tfp_test_util\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\ntfb = tfp.bijectors\ntfd = tfp.distributions\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass DirichletTest(test_case.TestCase):\n\n def testSimpleShapes(self):\n alpha = np.random.rand(3)\n dist = tfd.Dirichlet(alpha)\n self.assertEqual(3, self.evaluate(dist.event_shape_tensor()))\n self.assertAllEqual([], self.evaluate(dist.batch_shape_tensor()))\n self.assertEqual(tf.TensorShape([3]), dist.event_shape)\n self.assertEqual(tf.TensorShape([]), dist.batch_shape)\n\n def testComplexShapes(self):\n alpha = np.random.rand(3, 2, 2)\n dist = tfd.Dirichlet(alpha)\n self.assertEqual(2, self.evaluate(dist.event_shape_tensor()))\n self.assertAllEqual([3, 2], self.evaluate(dist.batch_shape_tensor()))\n self.assertEqual(tf.TensorShape([2]), dist.event_shape)\n self.assertEqual(tf.TensorShape([3, 2]), dist.batch_shape)\n\n def testConcentrationProperty(self):\n alpha = [[1., 2, 3]]\n dist = tfd.Dirichlet(alpha)\n self.assertEqual([1, 3], dist.concentration.shape)\n self.assertAllClose(alpha, self.evaluate(dist.concentration))\n\n def testPdfXProper(self):\n alpha = [[1., 2, 3]]\n dist = tfd.Dirichlet(alpha, validate_args=True)\n self.evaluate(dist.prob([.1, .3, .6]))\n self.evaluate(dist.prob([.2, .3, .5]))\n # Either condition can trigger.\n with self.assertRaisesOpError('samples must be positive'):\n self.evaluate(dist.prob([-1., 1.5, 0.5]))\n with self.assertRaisesOpError('samples must be positive'):\n self.evaluate(dist.prob([0., .1, .9]))\n with self.assertRaisesOpError('sample last-dimension must sum to `1`'):\n self.evaluate(dist.prob([.1, .2, .8]))\n\n def testLogPdfOnBoundaryIsFiniteWhenAlphaIsOne(self):\n # Test concentration = 1. for each dimension.\n concentration = 3 * np.ones((10, 10)).astype(np.float32)\n concentration[range(10), range(10)] = 1.\n x = 1 / 9. * np.ones((10, 10)).astype(np.float32)\n x[range(10), range(10)] = 0.\n dist = tfd.Dirichlet(concentration)\n log_prob = self.evaluate(dist.log_prob(x))\n self.assertAllEqual(\n np.ones_like(log_prob, dtype=np.bool), np.isfinite(log_prob))\n\n # Test when concentration[k] = 1., and x is zero at various dimensions.\n dist = tfd.Dirichlet(10 * [1.])\n log_prob = self.evaluate(dist.log_prob(x))\n self.assertAllEqual(\n np.ones_like(log_prob, dtype=np.bool), np.isfinite(log_prob))\n\n def testPdfZeroBatches(self):\n alpha = [1., 2]\n x = [.5, .5]\n dist = tfd.Dirichlet(alpha)\n pdf = dist.prob(x)\n self.assertAllClose(1., self.evaluate(pdf))\n self.assertEqual((), pdf.shape)\n\n def testPdfZeroBatchesNontrivialX(self):\n alpha = [1., 2]\n x = [.3, .7]\n dist = tfd.Dirichlet(alpha)\n pdf = dist.prob(x)\n self.assertAllClose(7. / 5, self.evaluate(pdf))\n self.assertEqual((), pdf.shape)\n\n def testPdfUniformZeroBatches(self):\n # Corresponds to a uniform distribution\n alpha = [1., 1, 1]\n x = [[.2, .5, .3], [.3, .4, .3]]\n dist = tfd.Dirichlet(alpha)\n pdf = dist.prob(x)\n self.assertAllClose([2., 2.], self.evaluate(pdf))\n self.assertEqual((2), pdf.shape)\n\n def testPdfAlphaStretchedInBroadcastWhenSameRank(self):\n alpha = [[1., 2]]\n x = [[.5, .5], [.3, .7]]\n dist = tfd.Dirichlet(alpha)\n pdf = dist.prob(x)\n self.assertAllClose([1., 7. / 5], self.evaluate(pdf))\n self.assertEqual((2), pdf.shape)\n\n def testPdfAlphaStretchedInBroadcastWhenLowerRank(self):\n alpha = [1., 2]\n x = [[.5, .5], [.2, .8]]\n pdf = tfd.Dirichlet(alpha).prob(x)\n self.assertAllClose([1., 8. / 5], self.evaluate(pdf))\n self.assertEqual((2), pdf.shape)\n\n def testPdfXStretchedInBroadcastWhenSameRank(self):\n alpha = [[1., 2], [2., 3]]\n x = [[.5, .5]]\n pdf = tfd.Dirichlet(alpha).prob(x)\n self.assertAllClose([1., 3. / 2], self.evaluate(pdf))\n self.assertEqual((2), pdf.shape)\n\n def testPdfXStretchedInBroadcastWhenLowerRank(self):\n alpha = [[1., 2], [2., 3]]\n x = [.5, .5]\n pdf = tfd.Dirichlet(alpha).prob(x)\n self.assertAllClose([1., 3. / 2], self.evaluate(pdf))\n self.assertEqual((2), pdf.shape)\n\n def testMean(self):\n alpha = [1., 2, 3]\n dirichlet = tfd.Dirichlet(concentration=alpha)\n self.assertEqual(dirichlet.mean().shape, [3])\n expected_mean = sp_stats.dirichlet.mean(alpha)\n self.assertAllClose(self.evaluate(dirichlet.mean()), expected_mean)\n\n def testCovarianceFromSampling(self):\n alpha = np.array([[1., 2, 3],\n [2.5, 4, 0.01]], dtype=np.float32)\n dist = tfd.Dirichlet(alpha) # batch_shape=[2], event_shape=[3]\n x = dist.sample(int(250e3), seed=tfp_test_util.test_seed())\n sample_mean = tf.reduce_mean(x, axis=0)\n x_centered = x - sample_mean[None, ...]\n sample_cov = tf.reduce_mean(\n tf.matmul(x_centered[..., None], x_centered[..., None, :]), axis=0)\n sample_var = tf.linalg.diag_part(sample_cov)\n sample_stddev = tf.sqrt(sample_var)\n\n [\n sample_mean_,\n sample_cov_,\n sample_var_,\n sample_stddev_,\n analytic_mean,\n analytic_cov,\n analytic_var,\n analytic_stddev,\n ] = self.evaluate([\n sample_mean,\n sample_cov,\n sample_var,\n sample_stddev,\n dist.mean(),\n dist.covariance(),\n dist.variance(),\n dist.stddev(),\n ])\n\n self.assertAllClose(sample_mean_, analytic_mean, atol=0.04, rtol=0.)\n self.assertAllClose(sample_cov_, analytic_cov, atol=0.06, rtol=0.)\n self.assertAllClose(sample_var_, analytic_var, atol=0.03, rtol=0.)\n self.assertAllClose(sample_stddev_, analytic_stddev, atol=0.02, rtol=0.)\n\n def testVariance(self):\n alpha = [1., 2, 3]\n denominator = np.sum(alpha)**2 * (np.sum(alpha) + 1)\n dirichlet = tfd.Dirichlet(concentration=alpha)\n self.assertEqual(dirichlet.covariance().shape, (3, 3))\n expected_covariance = np.diag(sp_stats.dirichlet.var(alpha))\n expected_covariance += [[0., -2, -3], [-2, 0, -6], [-3, -6, 0]\n ] / denominator\n self.assertAllClose(\n self.evaluate(dirichlet.covariance()), expected_covariance)\n\n def testMode(self):\n alpha = np.array([1.1, 2, 3])\n expected_mode = (alpha - 1) / (np.sum(alpha) - 3)\n dirichlet = tfd.Dirichlet(concentration=alpha)\n self.assertEqual(dirichlet.mode().shape, [3])\n self.assertAllClose(self.evaluate(dirichlet.mode()), expected_mode)\n\n def testModeInvalid(self):\n alpha = np.array([1., 2, 3])\n dirichlet = tfd.Dirichlet(\n concentration=alpha, allow_nan_stats=False)\n with self.assertRaisesOpError('Condition x < y.*'):\n self.evaluate(dirichlet.mode())\n\n def testModeEnableAllowNanStats(self):\n alpha = np.array([1., 2, 3])\n dirichlet = tfd.Dirichlet(\n concentration=alpha, allow_nan_stats=True)\n expected_mode = np.zeros_like(alpha) + np.nan\n\n self.assertEqual(dirichlet.mode().shape, [3])\n self.assertAllClose(self.evaluate(dirichlet.mode()), expected_mode)\n\n def testEntropy(self):\n alpha = [1., 2, 3]\n dirichlet = tfd.Dirichlet(concentration=alpha)\n self.assertEqual(dirichlet.entropy().shape, ())\n expected_entropy = sp_stats.dirichlet.entropy(alpha)\n self.assertAllClose(self.evaluate(dirichlet.entropy()), expected_entropy)\n\n def testSample(self):\n alpha = [1., 2]\n dirichlet = tfd.Dirichlet(alpha)\n n = tf.constant(100000)\n samples = dirichlet.sample(n)\n sample_values = self.evaluate(samples)\n self.assertEqual(sample_values.shape, (100000, 2))\n self.assertTrue(np.all(sample_values > 0.0))\n self.assertLess(\n sp_stats.kstest(\n # Beta is a univariate distribution.\n sample_values[:, 0],\n sp_stats.beta(a=1., b=2.).cdf)[0],\n 0.01)\n\n def testDirichletFullyReparameterized(self):\n alpha = tf.constant([1.0, 2.0, 3.0])\n _, grad_alpha = tfp.math.value_and_gradient(\n lambda a: tfd.Dirichlet(a).sample(100), alpha)\n self.assertIsNotNone(grad_alpha)\n\n def testDirichletDirichletKL(self):\n conc1 = np.array([[1., 2., 3., 1.5, 2.5, 3.5],\n [1.5, 2.5, 3.5, 4.5, 5.5, 6.5]])\n conc2 = np.array([[0.5, 1., 1.5, 2., 2.5, 3.]])\n\n d1 = tfd.Dirichlet(conc1)\n d2 = tfd.Dirichlet(conc2)\n x = d1.sample(int(1e4), seed=tfp_test_util.test_seed())\n kl_sample = tf.reduce_mean(d1.log_prob(x) - d2.log_prob(x), axis=0)\n kl_actual = tfd.kl_divergence(d1, d2)\n\n kl_sample_val = self.evaluate(kl_sample)\n kl_actual_val = self.evaluate(kl_actual)\n\n self.assertEqual(conc1.shape[:-1], kl_actual.shape)\n\n kl_expected = (\n sp_special.gammaln(np.sum(conc1, -1))\n - sp_special.gammaln(np.sum(conc2, -1))\n - np.sum(sp_special.gammaln(conc1) - sp_special.gammaln(conc2), -1)\n + np.sum((conc1 - conc2) * (sp_special.digamma(conc1) -\n sp_special.digamma(\n np.sum(conc1, -1, keepdims=True))), -1))\n\n self.assertAllClose(kl_expected, kl_actual_val, atol=0., rtol=1e-5)\n self.assertAllClose(kl_sample_val, kl_actual_val, atol=0., rtol=1e-1)\n\n # Make sure KL(d1||d1) is 0\n kl_same = self.evaluate(tfd.kl_divergence(d1, d1))\n self.assertAllClose(kl_same, np.zeros_like(kl_expected))\n\n def testDegenerateAlignedStridedSlice(self):\n # Corresponds to the TF fix in tensorflow/tensorflow#d9b3db0\n d = tfd.Dirichlet(tf.math.softplus(tf.zeros([2, 2, 2])))\n batch_shape = [2, 2]\n self.assertAllEqual(batch_shape, d.batch_shape)\n self.assertAllEqual(np.zeros(batch_shape)[1:0].shape,\n d[1:0].batch_shape)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass DirichletFromVariableTest(test_case.TestCase):\n\n def testGradients(self):\n x = tf.Variable([1., 1.1, 1.2])\n d = tfd.Dirichlet(concentration=x, validate_args=True)\n with tf.GradientTape() as tape:\n loss = -d.log_prob([0.1, 0.2, 0.7])\n g = tape.gradient(loss, d.trainable_variables)\n self.assertLen(g, 1)\n self.assertAllNotNone(g)\n\n def testAssertions(self):\n x = tfp.util.TransformedVariable(0.3679, tfb.Exp(), shape=None)\n with self.assertRaisesRegexp(\n ValueError, 'Argument `concentration` must have rank at least 1.'):\n d = tfd.Dirichlet(concentration=x, validate_args=True)\n self.evaluate([v.initializer for v in d.variables])\n self.evaluate(d.entropy())\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2019 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Decorators for testing TFP code under combinations of TF features.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.eager import def_function # pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.framework import combinations # pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.framework import test_combinations # pylint: disable=g-direct-tensorflow-import\n\n__all__ = [\n 'test_all_tf_execution_regimes',\n 'test_graph_and_eager_modes',\n]\n\n\[email protected]\ndef _tf_function_mode_context(tf_function_mode):\n \"\"\"Context manager controlling `tf.function` behavior (enabled/disabled).\n\n Before activating, the previously set mode is stored. Then the mode is changed\n to the given `tf_function_mode` and control yielded back to the caller. Upon\n exiting the context, the mode is returned to its original state.\n\n Args:\n tf_function_mode: a Python `str`, either 'disabled' or 'enabled'. If\n 'enabled', `@tf.function`-decorated code behaves as usual (ie, a background\n graph is created). If 'disabled', `@tf.function`-decorated code will behave\n as if it had not been `@tf.function`-decorated. Since users will be able to\n do this (e.g., to debug library code that has been\n `@tf.function`-decorated), we need to ensure our tests cover the behavior\n when this is the case.\n\n Yields:\n None\n \"\"\"\n if tf_function_mode not in ['enabled', 'disabled']:\n raise ValueError(\n 'Only allowable values for tf_function_mode_context are `enabled` and '\n '`disabled`; but got `{}`'.format(tf_function_mode))\n original_mode = def_function.RUN_FUNCTIONS_EAGERLY\n try:\n tf.config.experimental_run_functions_eagerly(tf_function_mode == 'disabled')\n yield\n finally:\n tf.config.experimental_run_functions_eagerly(original_mode)\n\n\nclass ExecuteFunctionsEagerlyCombination(test_combinations.TestCombination):\n \"\"\"A `TestCombinationi` for enabling/disabling `tf.function` execution modes.\n\n For more on `TestCombination`, check out\n 'tensorflow/python/framework/test_combinations.py' in the TensorFlow code\n base.\n\n This `TestCombination` supports two values for the `tf_function`\n combination argument: 'disabled' and 'enabled'. The mode switching is\n performed using `tf.experimental_run_functions_eagerly(mode)`.\n \"\"\"\n\n def context_managers(self, kwargs):\n mode = kwargs.pop('tf_function', 'enabled')\n return [_tf_function_mode_context(mode)]\n\n def parameter_modifiers(self):\n return [test_combinations.OptionalParameter('tf_function')]\n\n\ndef test_all_tf_execution_regimes(test_class_or_method=None):\n \"\"\"Decorator for generating a collection of tests in various contexts.\n\n Must be applied to subclasses of `parameterized.TestCase` (from\n `absl/testing`), or a method of such a subclass.\n\n When applied to a test method, this decorator results in the replacement of\n that method with a collection of new test methods, each executed under a\n different set of context managers that control some aspect of the execution\n model. This decorator generates three test scenario combinations:\n\n 1. Eager mode with `tf.function` decorations enabled\n 2. Eager mode with `tf.function` decorations disabled\n 3. Graph mode (eveything)\n\n When applied to a test class, all the methods in the class are affected.\n\n Args:\n test_class_or_method: the `TestCase` class or method to decorate.\n\n Returns:\n decorator: A generated TF `test_combinations` decorator, or if\n `test_class_or_method` is not `None`, the generated decorator applied to\n that function.\n \"\"\"\n decorator = test_combinations.generate(\n (test_combinations.combine(mode='graph',\n tf_function='enabled') +\n test_combinations.combine(mode='eager',\n tf_function=['enabled', 'disabled'])),\n test_combinations=[\n combinations.EagerGraphCombination(),\n ExecuteFunctionsEagerlyCombination(),\n ])\n\n if test_class_or_method:\n return decorator(test_class_or_method)\n return decorator\n\n\ndef test_graph_and_eager_modes(test_class_or_method=None):\n \"\"\"Decorator for generating graph and eager mode tests from a single test.\n\n Must be applied to subclasses of `parameterized.TestCase` (from\n absl/testing), or a method of such a subclass.\n\n When applied to a test method, this decorator results in the replacement of\n that method with a two new test methods, one executed in graph mode and the\n other in eager mode.\n\n When applied to a test class, all the methods in the class are affected.\n\n Args:\n test_class_or_method: the `TestCase` class or method to decorate.\n\n Returns:\n decorator: A generated TF `test_combinations` decorator, or if\n `test_class_or_method` is not `None`, the generated decorator applied to\n that function.\n \"\"\"\n decorator = test_combinations.generate(\n test_combinations.combine(mode=['graph', 'eager']),\n test_combinations=[combinations.EagerGraphCombination()])\n\n if test_class_or_method:\n return decorator(test_class_or_method)\n return decorator\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for MixtureSameFamily distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.internal import test_case\nfrom tensorflow_probability.python.internal import test_util as tfp_test_util\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\ntfd = tfp.distributions\n\n\nclass _MixtureSameFamilyTest(tfp_test_util.VectorDistributionTestHelpers):\n\n def testSampleAndLogProbUnivariateShapes(self):\n gm = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(\n probs=self._build_tensor([0.3, 0.7])),\n components_distribution=tfd.Normal(\n loc=self._build_tensor([-1., 1]),\n scale=self._build_tensor([0.1, 0.5])))\n x = gm.sample([4, 5], seed=tfp_test_util.test_seed())\n log_prob_x = gm.log_prob(x)\n self.assertAllEqual([4, 5], self._shape(x))\n self.assertAllEqual([4, 5], self._shape(log_prob_x))\n\n def testSampleAndLogProbBatch(self):\n gm = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(\n probs=self._build_tensor([[0.3, 0.7]])),\n components_distribution=tfd.Normal(\n loc=self._build_tensor([[-1., 1]]),\n scale=self._build_tensor([[0.1, 0.5]])))\n x = gm.sample([4, 5], seed=tfp_test_util.test_seed())\n log_prob_x = gm.log_prob(x)\n self.assertAllEqual([4, 5, 1], self._shape(x))\n self.assertAllEqual([4, 5, 1], self._shape(log_prob_x))\n\n def testSampleAndLogProbShapesBroadcastMix(self):\n mix_probs = self._build_tensor([.3, .7])\n bern_probs = self._build_tensor([[.4, .6], [.25, .75]])\n bm = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(probs=mix_probs),\n components_distribution=tfd.Bernoulli(probs=bern_probs))\n x = bm.sample([4, 5], seed=tfp_test_util.test_seed())\n log_prob_x = bm.log_prob(x)\n x_ = self.evaluate(x)\n self.assertAllEqual([4, 5, 2], self._shape(x))\n self.assertAllEqual([4, 5, 2], self._shape(log_prob_x))\n self.assertAllEqual(\n np.ones_like(x_, dtype=np.bool), np.logical_or(x_ == 0., x_ == 1.))\n\n def testSampleAndLogProbMultivariateShapes(self):\n gm = self._build_mvndiag_mixture(\n probs=[0.3, 0.7],\n loc=[[-1., 1], [1, -1]],\n scale_identity_multiplier=[1., 0.5])\n x = gm.sample([4, 5], seed=tfp_test_util.test_seed())\n log_prob_x = gm.log_prob(x)\n self.assertAllEqual([4, 5, 2], self._shape(x))\n self.assertAllEqual([4, 5], self._shape(log_prob_x))\n\n def testSampleAndLogProbBatchMultivariateShapes(self):\n gm = self._build_mvndiag_mixture(\n probs=[0.3, 0.7],\n loc=[[[-1., 1], [1, -1]], [[0., 1], [1, 0]]],\n scale_identity_multiplier=[1., 0.5])\n x = gm.sample([4, 5], seed=tfp_test_util.test_seed())\n log_prob_x = gm.log_prob(x)\n self.assertAllEqual([4, 5, 2, 2], self._shape(x))\n self.assertAllEqual([4, 5, 2], self._shape(log_prob_x))\n\n def testSampleConsistentLogProb(self):\n gm = self._build_mvndiag_mixture(\n probs=[0.3, 0.7],\n loc=[[-1., 1], [1, -1]],\n scale_identity_multiplier=[1., 0.5])\n # Ball centered at component0's mean.\n self.run_test_sample_consistent_log_prob(\n self.evaluate, gm, radius=1., center=[-1., 1], rtol=0.02)\n # Larger ball centered at component1's mean.\n self.run_test_sample_consistent_log_prob(\n self.evaluate, gm, radius=1., center=[1., -1], rtol=0.02)\n\n def testLogCdf(self):\n gm = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(\n probs=self._build_tensor([0.3, 0.7])),\n components_distribution=tfd.Normal(\n loc=self._build_tensor([-1., 1]),\n scale=self._build_tensor([0.1, 0.5])))\n x = gm.sample(10, seed=tfp_test_util.test_seed())\n actual_log_cdf = gm.log_cdf(x)\n expected_log_cdf = tf.reduce_logsumexp(\n (gm.mixture_distribution.logits_parameter() +\n gm.components_distribution.log_cdf(x[..., tf.newaxis])),\n axis=1)\n actual_log_cdf_, expected_log_cdf_ = self.evaluate(\n [actual_log_cdf, expected_log_cdf])\n self.assertAllClose(actual_log_cdf_, expected_log_cdf_, rtol=1e-6, atol=0.0)\n\n def testCovarianceWithBatch(self):\n d = self._build_mvndiag_mixture(\n probs=[0.2, 0.3, 0.5],\n loc=np.zeros((2, 1, 5, 3, 4)),\n scale_identity_multiplier=[1., 0.75, 0.5])\n self.assertAllEqual((2, 1, 5, 4, 4), self.evaluate(d.covariance()).shape)\n\n def testSampleConsistentMeanCovariance(self):\n gm = self._build_mvndiag_mixture(\n probs=[0.3, 0.7],\n loc=[[-1., 1], [1, -1]],\n scale_identity_multiplier=[1., 0.5])\n self.run_test_sample_consistent_mean_covariance(self.evaluate, gm)\n\n def testVarianceConsistentCovariance(self):\n gm = self._build_mvndiag_mixture(\n probs=[0.3, 0.7],\n loc=[[-1., 1], [1, -1]],\n scale_identity_multiplier=[1., 0.5])\n cov_, var_ = self.evaluate([gm.covariance(), gm.variance()])\n self.assertAllClose(cov_.diagonal(), var_, atol=0.)\n\n def testReparameterizationOfNonReparameterizedComponents(self):\n with self.assertRaises(ValueError):\n tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(\n logits=self._build_tensor([-0.3, 0.4])),\n components_distribution=tfd.Bernoulli(\n logits=self._build_tensor([0.1, -0.1])),\n reparameterize=True)\n\n def testSecondGradientIsDisabled(self):\n if not self.use_static_shape:\n return\n\n # Testing using GradientTape in both eager and graph modes.\n # GradientTape does not support some control flow ops in graph mode, which\n # is not a problem here as this code does not use any control flow.\n logits = self._build_tensor([[0.1, 0.5]])\n with tf.GradientTape() as g:\n g.watch(logits)\n with tf.GradientTape() as gg:\n gg.watch(logits)\n mixture = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(\n logits=logits),\n components_distribution=tfd.Normal(\n loc=self._build_tensor([[0.4, 0.25]]),\n scale=self._build_tensor([[0.1, 0.5]])),\n reparameterize=True)\n\n sample = mixture.sample()\n grad = gg.gradient(sample, logits)\n\n with self.assertRaises(LookupError):\n g.gradient(grad, logits)\n\n def _testMixtureReparameterizationGradients(\n self, mixture_func, parameters, function, num_samples):\n assert function in ['mean', 'variance']\n\n if not self.use_static_shape:\n return\n\n def sample_estimate(*parameters):\n mixture = mixture_func(*parameters)\n values = mixture.sample(num_samples, seed=tfp_test_util.test_seed())\n if function == 'variance':\n values = tf.math.squared_difference(values, mixture.mean())\n return tf.reduce_mean(values, axis=0)\n\n def exact(*parameters):\n mixture = mixture_func(*parameters)\n # Normal mean does not depend on the scale, so add 0 * variance\n # to avoid None gradients. Also do the same for variance, just in case.\n if function == 'variance':\n return mixture.variance() + 0 * mixture.mean()\n elif function == 'mean':\n return mixture.mean() + 0 * mixture.variance()\n\n _, actual = tfp.math.value_and_gradient(sample_estimate, parameters)\n _, expected = tfp.math.value_and_gradient(exact, parameters)\n self.assertAllClose(actual, expected, atol=0.1, rtol=0.2)\n\n def testReparameterizationGradientsNormalScalarComponents(self):\n def mixture_func(logits, loc, scale):\n return tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=tfd.Normal(loc=loc, scale=scale),\n reparameterize=True)\n\n for function in ['mean', 'variance']:\n self._testMixtureReparameterizationGradients(\n mixture_func,\n [self._build_tensor([[0.1, 0.5]]), # logits\n self._build_tensor([[0.4, 0.25]]), # loc\n self._build_tensor([[0.1, 0.5]])], # scale\n function,\n num_samples=10000)\n\n def testReparameterizationGradientsNormalVectorComponents(self):\n def mixture_func(logits, loc, scale):\n return tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=tfd.Independent(\n tfd.Normal(loc=loc, scale=scale), reinterpreted_batch_ndims=1),\n reparameterize=True)\n\n for function in ['mean', 'variance']:\n self._testMixtureReparameterizationGradients(\n mixture_func,\n [self._build_tensor([0.5, -0.2, 0.1]), # logits\n self._build_tensor([[-1., 1], [0.5, -1], [-1., 0.5]]), # mean\n self._build_tensor([[0.1, 0.5], [0.3, 0.5], [0.2, 0.3]])], # scale\n function,\n num_samples=20000)\n\n def testReparameterizationGradientsNormalMatrixComponents(self):\n def mixture_func(logits, loc, scale):\n return tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=tfd.Independent(\n tfd.Normal(loc=loc, scale=scale), reinterpreted_batch_ndims=2),\n reparameterize=True)\n\n for function in ['mean', 'variance']:\n self._testMixtureReparameterizationGradients(\n mixture_func,\n [self._build_tensor([0.7, 0.2, 0.1]), # logits\n self._build_tensor([[[-1., 1]], [[0.5, -1]], [[-1., 0.5]]]), # mean\n # scale\n self._build_tensor([[[0.1, 0.5]], [[0.3, 0.5]], [[0.2, 0.3]]])],\n function,\n num_samples=50000)\n\n def testReparameterizationGradientsExponentialScalarComponents(self):\n def mixture_func(logits, rate):\n return tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=tfd.Exponential(rate=rate),\n reparameterize=True)\n\n for function in ['mean', 'variance']:\n self._testMixtureReparameterizationGradients(\n mixture_func,\n [self._build_tensor([0.7, 0.2, 0.1]), # logits\n self._build_tensor([1., 0.5, 1.])], # rate\n function,\n num_samples=10000)\n\n def testDeterministicSampling(self):\n seed = tfp_test_util.test_seed()\n tf1.set_random_seed(seed)\n dist = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=[0., 0.]),\n components_distribution=tfd.Normal(loc=[0., 200.], scale=[1., 1.]))\n sample_1 = self.evaluate(dist.sample([100], seed=seed))\n tf1.set_random_seed(seed)\n sample_2 = self.evaluate(dist.sample([100], seed=seed))\n self.assertAllClose(sample_1, sample_2)\n\n def testGradientsThroughParams(self):\n logits = self._build_variable([1., 2., 3.])\n loc = self._build_variable([0., 0., 0])\n scale = self._build_variable(1.)\n dist = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=tfd.Logistic(loc=loc, scale=scale),\n validate_args=True)\n with tf.GradientTape() as tape:\n loss = -dist.log_prob([5., 4.])\n grad = tape.gradient(loss, dist.trainable_variables)\n self.assertLen(grad, 3)\n self.assertAllNotNone(grad)\n\n logits = self._build_variable(np.zeros((4, 4, 5)))\n loc = self._build_variable(np.zeros((4, 4, 5, 2, 3)))\n scale = self._build_variable(1.)\n dist = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=tfd.Independent(\n tfd.Logistic(loc=loc, scale=scale),\n reinterpreted_batch_ndims=self._build_tensor(2, dtype=np.int32)),\n validate_args=True)\n with tf.GradientTape() as tape:\n loss = -dist.log_prob(np.zeros((4, 4, 2, 3)))\n grad = tape.gradient(loss, dist.trainable_variables)\n self.assertLen(grad, 3)\n self.assertAllNotNone(grad)\n\n def testExcessiveConcretizationOfParams(self):\n logits = tfp_hps.defer_and_count_usage(\n self._build_variable(np.zeros((4, 4, 5)), name='logits'))\n concentration = tfp_hps.defer_and_count_usage(\n self._build_variable(np.zeros((4, 4, 5, 3)), name='concentration'))\n dist = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=tfd.Dirichlet(concentration=concentration),\n validate_args=True)\n\n # Many methods use mixture_distribution and components_distribution at most\n # once, and thus incur no extra reads/concretizations of parameters.\n\n for method in ('batch_shape_tensor', 'event_shape_tensor',\n 'mean', 'sample'):\n with tfp_hps.assert_no_excessive_var_usage(method, max_permissible=2):\n getattr(dist, method)()\n\n for method in ('log_prob', 'prob'):\n with tfp_hps.assert_no_excessive_var_usage('method', max_permissible=2):\n getattr(dist, method)(np.ones((4, 4, 3)) / 3.)\n\n # TODO(b/140579567): The `variance()` and `covariance()` methods require\n # calling both:\n # - `self.components_distribution.mean()`\n # - `self.components_distribution.variance()` or `.covariance()`\n # Thus, these methods incur an additional concretization (or two if\n # `validate_args=True` for `self.components_distribution`).\n\n for method in ('variance', 'covariance'):\n with tfp_hps.assert_no_excessive_var_usage(method, max_permissible=3):\n getattr(dist, method)()\n\n # TODO(b/140579567): When event ndims is not known statically, several\n # methods call `self.components_distribution.event_shape_tensor()` to\n # determine the number of event dimensions. Depending on the underlying\n # distribution, this would likely incur additional concretizations of the\n # parameters of `self.components_distribution`. The methods are:\n # - `log_cdf` and `cdf`\n # - `log_prob` and `prob`\n # - `mean` and `variance`\n # - `sample`\n #\n # NOTE: `Distribution.survival_function` and `log_survival_function` will\n # call `Distribution.cdf` and `Distribution.log_cdf`, resulting in one\n # additional call to `_parameter_control_dependencies`, and thus an\n # additional concretizations of the underlying distribution parameters.\n\n def testExcessiveConcretizationOfParamsWithReparameterization(self):\n logits = tfp_hps.defer_and_count_usage(self._build_variable(\n np.zeros(5), name='logits', static_rank=True))\n loc = tfp_hps.defer_and_count_usage(self._build_variable(\n np.zeros((4, 4, 5)), name='loc', static_rank=True))\n scale = tfp_hps.defer_and_count_usage(self._build_variable(\n 1., name='scale', static_rank=True))\n dist = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=tfd.Logistic(loc=loc, scale=scale),\n reparameterize=True, validate_args=True)\n\n # TODO(b/140579567): With reparameterization, there are additional reads of\n # the parameters of the underlying mixture and components distributions when\n # sampling, from calls in `_distributional_transform` to:\n #\n # - `self.mixture_distribution.logits_parameter`\n # - `self.components_distribution.log_prob`\n # - `self.components_distribution.cdf`\n #\n # NOTE: In the unlikely case that samples have a statically-known rank but\n # the rank of `self.components_distribution.event_shape` is not known\n # statically, there can be additional reads in `_distributional_transform`\n # from calling `self.components_distribution.is_scalar_event`.\n\n with tfp_hps.assert_no_excessive_var_usage('sample', max_permissible=4):\n dist.sample()\n\n def testSampleGradientsThroughParams(self):\n logits = self._build_variable(np.zeros(5), static_rank=True)\n loc = self._build_variable(np.zeros((4, 5, 2, 3)), static_rank=True)\n scale = self._build_variable(1., static_rank=True)\n dist = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=tfd.Independent(\n tfd.Logistic(loc=loc, scale=scale), reinterpreted_batch_ndims=2),\n reparameterize=True, validate_args=True)\n with tf.GradientTape() as tape:\n loss = tf.reduce_sum(dist.sample(2))\n grad = tape.gradient(loss, dist.trainable_variables)\n self.assertLen(grad, 3)\n self.assertAllNotNone(grad)\n\n def _shape(self, x):\n if self.use_static_shape:\n return tensorshape_util.as_list(x.shape)\n else:\n return self.evaluate(tf.shape(x))\n\n def _build_mvndiag_mixture(self, probs, loc, scale_identity_multiplier):\n components_distribution = tfd.MultivariateNormalDiag(\n loc=self._build_tensor(loc),\n scale_identity_multiplier=self._build_tensor(\n scale_identity_multiplier))\n\n # Use a no-op `Independent` wrapper to possibly create dynamic ndims.\n wrapped_components_distribution = tfd.Independent(\n components_distribution,\n reinterpreted_batch_ndims=self._build_tensor(0, dtype=np.int32))\n # Lambda ensures that the covariance fn sees `self=components_distribution`.\n wrapped_components_distribution._covariance = (\n lambda: components_distribution.covariance()) # pylint: disable=unnecessary-lambda\n\n gm = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(\n probs=self._build_tensor(probs)),\n components_distribution=wrapped_components_distribution,\n validate_args=True)\n return gm\n\n def _build_tensor(self, ndarray, dtype=None):\n # Enforce parameterized dtype and static/dynamic testing.\n ndarray = np.asarray(ndarray).astype(\n dtype if dtype is not None else self.dtype)\n if self.use_static_shape:\n return tf.convert_to_tensor(ndarray)\n else:\n return tf1.placeholder_with_default(ndarray, shape=None)\n\n def _build_variable(self, ndarray, name=None, dtype=None, static_rank=False):\n if dtype is None:\n dtype = self.dtype\n ndarray = np.asarray(ndarray).astype(dtype)\n if self.use_static_shape:\n return tf.Variable(ndarray, name=name, dtype=dtype)\n elif static_rank:\n return tf.Variable(ndarray, name=name, dtype=dtype,\n shape=tf.TensorShape([None] * len(ndarray.shape)))\n else:\n return tf.Variable(ndarray, name=name, dtype=dtype,\n shape=tf.TensorShape(None))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass MixtureSameFamilyTestStatic32(\n _MixtureSameFamilyTest,\n test_case.TestCase):\n use_static_shape = True\n dtype = np.float32\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass MixtureSameFamilyTestDynamic32(\n _MixtureSameFamilyTest,\n test_case.TestCase):\n use_static_shape = False\n dtype = np.float32\n\n def testMatchingComponentsSizeAssertions(self):\n logits = self._build_variable(np.zeros(5))\n loc = self._build_variable(np.zeros((4, 5, 2, 3)), static_rank=True)\n scale = self._build_variable(1.)\n dist = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=tfd.Independent(\n tfd.Logistic(loc=loc, scale=scale), reinterpreted_batch_ndims=2),\n validate_args=True)\n\n self.evaluate([v.initializer for v in [logits, loc, scale]])\n self.evaluate(dist.mean())\n\n msg = ('`mixture_distribution` components.* does not equal '\n r'`components_distribution.batch_shape\\[-1\\]`')\n with self.assertRaisesRegex(Exception, msg):\n with tf.control_dependencies([loc.assign(np.zeros((4, 7, 2, 3)))]):\n self.evaluate(dist.mean())\n\n def testMatchingBatchShapeAssertions(self):\n logits = self._build_variable(np.zeros(5))\n loc = self._build_variable(np.zeros((4, 5, 2, 3)), static_rank=True)\n scale = self._build_variable(1.)\n dist = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=tfd.Independent(\n tfd.Logistic(loc=loc, scale=scale), reinterpreted_batch_ndims=2),\n validate_args=True)\n\n self.evaluate([v.initializer for v in [logits, loc, scale]])\n self.evaluate(dist.sample())\n\n msg = ('`mixture_distribution.batch_shape`.* is not compatible with '\n '`components_distribution.batch_shape')\n with self.assertRaisesRegex(Exception, msg):\n with tf.control_dependencies([logits.assign(np.zeros((4, 3, 5)))]):\n self.evaluate(dist.sample())\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass MixtureSameFamilyTestStatic64(\n _MixtureSameFamilyTest,\n test_case.TestCase):\n use_static_shape = True\n dtype = np.float64\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Positive-semidefinite kernels package.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport warnings\n\nfrom tensorflow_probability.python.math.psd_kernels.exp_sin_squared import ExpSinSquared\nfrom tensorflow_probability.python.math.psd_kernels.exponentiated_quadratic import ExponentiatedQuadratic\nfrom tensorflow_probability.python.math.psd_kernels.feature_scaled import FeatureScaled\nfrom tensorflow_probability.python.math.psd_kernels.feature_transformed import FeatureTransformed\nfrom tensorflow_probability.python.math.psd_kernels.kumaraswamy_transformed import KumaraswamyTransformed\nfrom tensorflow_probability.python.math.psd_kernels.matern import MaternFiveHalves\nfrom tensorflow_probability.python.math.psd_kernels.matern import MaternOneHalf\nfrom tensorflow_probability.python.math.psd_kernels.matern import MaternThreeHalves\nfrom tensorflow_probability.python.math.psd_kernels.polynomial import Linear\nfrom tensorflow_probability.python.math.psd_kernels.polynomial import Polynomial\nfrom tensorflow_probability.python.math.psd_kernels.positive_semidefinite_kernel import PositiveSemidefiniteKernel\nfrom tensorflow_probability.python.math.psd_kernels.rational_quadratic import RationalQuadratic\nfrom tensorflow_probability.python.math.psd_kernels.schur_complement import SchurComplement\n\n\nfrom tensorflow.python.util.all_util import remove_undocumented\n\nwarnings.warn(\n 'tfp.positive_semidefinite_kernels module has been moved to '\n 'tfp.math.psd_kernels. This alias will be deleted on 2019-12-01',\n stacklevel=5)\n\n_allowed_symbols = [\n 'ExponentiatedQuadratic',\n 'ExpSinSquared',\n 'FeatureScaled',\n 'FeatureTransformed',\n 'KumaraswamyTransformed',\n 'Linear',\n 'MaternFiveHalves',\n 'MaternOneHalf',\n 'MaternThreeHalves',\n 'Polynomial',\n 'PositiveSemidefiniteKernel',\n 'RationalQuadratic',\n 'SchurComplement',\n]\n\nremove_undocumented(__name__, _allowed_symbols)\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.experimental.auto_batching import allocation_strategy\nfrom tensorflow_probability.python.experimental.auto_batching import dsl\nfrom tensorflow_probability.python.experimental.auto_batching import instructions\nfrom tensorflow_probability.python.experimental.auto_batching import lowering\nfrom tensorflow_probability.python.experimental.auto_batching import numpy_backend\nfrom tensorflow_probability.python.experimental.auto_batching import tf_backend\nfrom tensorflow_probability.python.experimental.auto_batching import type_inference\nfrom tensorflow_probability.python.experimental.auto_batching import virtual_machine as vm\nfrom tensorflow_probability.python.internal import test_case\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\nTF_BACKEND = tf_backend.TensorFlowBackend()\n\nNP_BACKEND = numpy_backend.NumpyBackend()\n\n\ndef _execute(prog, inputs, stack_depth, backend):\n return vm.execute(\n prog, [inputs], max_stack_depth=stack_depth, backend=backend)\n\n\ndef fibonacci_program():\n ab = dsl.ProgramBuilder()\n\n def fib_type(arg_types):\n return arg_types[0]\n\n with ab.function('fibonacci', type_inference=fib_type) as fibonacci:\n ab.param('n')\n ab.var.cond = ab.primop(lambda n: n > 1)\n with ab.if_(ab.var.cond, then_name='recur'):\n ab.var.nm1 = ab.primop(lambda n: n - 1)\n ab.var.fibm1 = ab.call(fibonacci, [ab.var.nm1])\n ab.var.nm2 = ab.primop(lambda n: n - 2)\n ab.var.fibm2 = ab.call(fibonacci, [ab.var.nm2])\n ab.var.ans = ab.primop(lambda fibm1, fibm2: fibm1 + fibm2)\n with ab.else_(else_name='base-case', continue_name='finish'):\n ab.var.ans = ab.const(1)\n ab.return_(ab.var.ans)\n\n prog = ab.program(main=fibonacci)\n return prog\n\n\ndef even_odd_program():\n ab = dsl.ProgramBuilder()\n\n def pred_type(_):\n return instructions.TensorType(np.bool, ())\n\n odd = ab.declare_function('odd', type_inference=pred_type)\n\n with ab.function('even', type_inference=pred_type) as even:\n ab.param('n')\n ab.var.cond = ab.primop(lambda n: n <= 0)\n with ab.if_(ab.var.cond, then_name='base-case'):\n ab.var.ans = ab.const(True)\n with ab.else_(else_name='recur', continue_name='finish'):\n ab.var.nm1 = ab.primop(lambda n: n - 1)\n ab.var.ans = ab.call(odd, [ab.var.nm1])\n ab.return_(ab.var.ans)\n\n with ab.define_function(odd):\n ab.param('n')\n ab.var.cond = ab.primop(lambda n: n <= 0)\n with ab.if_(ab.var.cond, then_name='base-case'):\n ab.var.ans = ab.const(False)\n with ab.else_(else_name='recur', continue_name='finish'):\n ab.var.nm1 = ab.primop(lambda n: n - 1)\n ab.var.ans = ab.call(even, [ab.var.nm1])\n ab.return_(ab.var.ans)\n\n prog = ab.program(main=even)\n return prog\n\n\ndef synthetic_pattern_program():\n ab = dsl.ProgramBuilder()\n def my_type(_):\n int_ = instructions.TensorType(np.int64, ())\n return ((int_, int_), int_, (int_, int_))\n\n with ab.function('synthetic', type_inference=my_type) as syn:\n ab.param('batch_size_index')\n one, three, five = ab.locals_(3)\n ab((one, (five, three))).pattern = ab.primop(lambda: (1, (2, 3)))\n ab(((ab.var.four, five), ab.var.six)).pattern = ab.primop(\n lambda: ((4, 5), 6))\n ab.return_(((one, three), ab.var.four, (five, ab.var.six)))\n\n prog = ab.program(main=syn)\n return prog\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass AutoBatchingTest(test_case.TestCase):\n\n def testAutoBatchingFibonacciNumpy(self):\n for inputs, outputs in ([5], [8]), ([5, 6, 8, 9], [8, 13, 34, 55]):\n # This test doesn't pass with int32 input types, because (apparently)\n # numpy can't tell the difference between an ndarray of shape () and known\n # dtype, and a scalar (literal) whose dtype needs to be inferred.\n # To wit:\n # (np.zeros((), dtype=np.int32) - 1).dtype == np.int64\n # because that's somehow the best numpy can do, even though\n # (np.zeros([6], dtype=np.int32) - 1).dtype == np.int32\n # Needless to say, this messes up type inference for programs like\n # Fibonacci whose unbatched input shape is scalar.\n inputs = np.array(inputs, dtype=np.int64)\n outputs = np.array(outputs, dtype=np.int64)\n prog = fibonacci_program()\n # print(prog)\n typed = type_inference.infer_types(prog, [inputs], NP_BACKEND)\n # print(typed)\n alloc = allocation_strategy.optimize(typed)\n lowered = lowering.lower_function_calls(alloc)\n # print(lowered)\n self.assertAllEqual(outputs, _execute(lowered, inputs, 15, NP_BACKEND))\n\n def testAutoBatchingFibonacciTF(self):\n for inputs, outputs in ([5], [8]), ([5, 6, 8, 9], [8, 13, 34, 55]):\n inputs = np.array(inputs, dtype=np.int32)\n outputs = np.array(outputs, dtype=np.int32)\n prog = fibonacci_program()\n # print(prog)\n inputs_t = tf.constant(inputs, dtype=np.int32)\n typed = type_inference.infer_types(prog, [inputs_t], TF_BACKEND)\n # print(typed)\n alloc = allocation_strategy.optimize(typed)\n lowered = lowering.lower_function_calls(alloc)\n # print(lowered)\n self.assertAllEqual(\n outputs, self.evaluate(_execute(lowered, inputs_t, 15, TF_BACKEND)))\n\n def testAutoBatchingEvenOddNumpy(self):\n for inputs, outputs in ([5], [False]), ([5, 6, 8, 9],\n [False, True, True, False]):\n inputs = np.array(inputs, dtype=np.int64)\n outputs = np.array(outputs, dtype=np.bool)\n prog = even_odd_program()\n # print(prog)\n typed = type_inference.infer_types(prog, [inputs], NP_BACKEND)\n # print(typed)\n alloc = allocation_strategy.optimize(typed)\n lowered = lowering.lower_function_calls(alloc)\n # print(lowered)\n self.assertAllEqual(outputs, _execute(lowered, inputs, 15, NP_BACKEND))\n\n def testAutoBatchingEvenOddTF(self):\n for inputs, outputs in ([5], [False]), ([5, 6, 8, 9],\n [False, True, True, False]):\n inputs = np.array(inputs, dtype=np.int32)\n outputs = np.array(outputs, dtype=np.int32)\n prog = even_odd_program()\n # print(prog)\n inputs_t = tf.constant(inputs, dtype=np.int32)\n typed = type_inference.infer_types(prog, [inputs_t], TF_BACKEND)\n # print(typed)\n alloc = allocation_strategy.optimize(typed)\n lowered = lowering.lower_function_calls(alloc)\n # print(lowered)\n self.assertAllEqual(\n outputs, self.evaluate(_execute(lowered, inputs_t, 15, TF_BACKEND)))\n\n def testAutoBatchingMultivalueTF(self):\n input_ = np.array([1, 1, 1], dtype=np.int64)\n output = ((np.array([1, 1, 1], dtype=np.int64),\n np.array([3, 3, 3], dtype=np.int64)),\n np.array([4, 4, 4], dtype=np.int64),\n (np.array([5, 5, 5], dtype=np.int64),\n np.array([6, 6, 6], dtype=np.int64)))\n prog = synthetic_pattern_program()\n # print(prog)\n input_t = tf.constant(input_, dtype=np.int64)\n typed = type_inference.infer_types(prog, [input_t], TF_BACKEND)\n # print(typed)\n alloc = allocation_strategy.optimize(typed)\n lowered = lowering.lower_function_calls(alloc)\n # print(lowered)\n for expected, obtained in instructions.pattern_zip(\n output, self.evaluate(_execute(lowered, input_t, 15, TF_BACKEND))):\n self.assertAllEqual(expected, obtained)\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"CategoricalToDiscrete bijector.\n\nThis bijector is hidden from public API for now because it is only valid for\ncategorical distribution.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import bijector\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n__all__ = [\n 'CategoricalToDiscrete',\n]\n\n\nclass CategoricalToDiscrete(bijector.Bijector):\n \"\"\"Bijector which computes `Y = g(X) = values[X]`.\n\n Example Usage:\n\n ```python\n bijector = CategoricalToDiscrete(map_values=[0.01, 0.1, 1., 10.])\n bijector.forward([1, 3, 2, 1, 0]) = [1., 10., 1., 0.1, 0.01]\n bijector.inverse([1., 10., 1., 0.1, 0.01]) = [1, 3, 2, 1, 0]\n ```\n\n \"\"\"\n\n def __init__(self,\n map_values,\n validate_args=False,\n name='categorical_to_discrete'):\n \"\"\"Instantiates `CategoricalToDiscrete` bijector.\n\n Args:\n map_values: 1D numerical tensor of discrete values to map to, sorted in\n strictly increasing order.\n validate_args: Python `bool` indicating whether arguments should be\n checked for correctness.\n name: Python `str` name given to ops managed by this object.\n \"\"\"\n with tf.name_scope(name):\n dtype = dtype_util.common_dtype([map_values], tf.float32)\n self._map_values = tensor_util.convert_nonref_to_tensor(\n map_values, name='map_values', dtype=dtype)\n super(CategoricalToDiscrete, self).__init__(\n forward_min_event_ndims=0,\n is_constant_jacobian=True,\n validate_args=validate_args,\n name=name)\n\n def _forward(self, x):\n map_values = tf.convert_to_tensor(self.map_values)\n if self.validate_args:\n with tf.control_dependencies([\n assert_util.assert_equal(\n (0 <= x) & (x < tf.size(map_values)),\n True,\n message='indices out of bound')\n ]):\n x = tf.identity(x)\n # If we want batch dims in self.map_values, we can (after broadcasting),\n # use:\n # tf.gather(self.map_values, x, batch_dims=-1, axis=-1)\n return tf.gather(map_values, indices=x)\n\n def _inverse(self, y):\n map_values = tf.convert_to_tensor(self.map_values)\n flat_y = tf.reshape(y, shape=[-1])\n # Search for the indices of map_values that are closest to flat_y.\n # Since map_values is strictly increasing, the closest is either the\n # first one that is strictly greater than flat_y, or the one before it.\n upper_candidates = tf.minimum(\n tf.size(map_values) - 1,\n tf.searchsorted(map_values, values=flat_y, side='right'))\n lower_candidates = tf.maximum(0, upper_candidates - 1)\n candidates = tf.stack([lower_candidates, upper_candidates], axis=-1)\n lower_cand_diff = tf.abs(flat_y - self._forward(lower_candidates))\n upper_cand_diff = tf.abs(flat_y - self._forward(upper_candidates))\n if self.validate_args:\n with tf.control_dependencies([\n assert_util.assert_near(\n tf.minimum(lower_cand_diff, upper_cand_diff),\n 0,\n message='inverse value not found')\n ]):\n candidates = tf.identity(candidates)\n candidate_selector = tf.stack([\n tf.range(tf.size(flat_y), dtype=tf.int32),\n tf.argmin([lower_cand_diff, upper_cand_diff], output_type=tf.int32)\n ],\n axis=-1)\n return tf.reshape(\n tf.gather_nd(candidates, candidate_selector), shape=y.shape)\n\n def _inverse_log_det_jacobian(self, y):\n return tf.constant(0., dtype=y.dtype)\n\n @property\n def map_values(self):\n return self._map_values\n\n def _parameter_control_dependencies(self, is_init):\n return _maybe_check_valid_map_values(self.map_values, self.validate_args)\n\n\ndef _maybe_check_valid_map_values(map_values, validate_args):\n \"\"\"Validate `map_values` if `validate_args`==True.\"\"\"\n assertions = []\n\n message = 'Rank of map_values must be 1.'\n if tensorshape_util.rank(map_values.shape) is not None:\n if tensorshape_util.rank(map_values.shape) != 1:\n raise ValueError(message)\n elif validate_args:\n assertions.append(assert_util.assert_rank(map_values, 1, message=message))\n\n message = 'Size of map_values must be greater than 0.'\n if tensorshape_util.num_elements(map_values.shape) is not None:\n if tensorshape_util.num_elements(map_values.shape) == 0:\n raise ValueError(message)\n elif validate_args:\n assertions.append(\n assert_util.assert_greater(tf.size(map_values), 0, message=message))\n\n if validate_args:\n assertions.append(\n assert_util.assert_equal(\n tf.math.is_strictly_increasing(map_values),\n True,\n message='map_values is not strictly increasing.'))\n\n return assertions\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Markov chain Monte Carlo driver, `sample_chain_annealed_importance_chain`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.mcmc.internal import util as mcmc_util\n\n\n__all__ = [\n \"sample_annealed_importance_chain\",\n]\n\n\nAISResults = collections.namedtuple(\n \"AISResults\",\n [\n \"proposal_log_prob\",\n \"target_log_prob\",\n \"inner_results\",\n ])\n\n\ndef sample_annealed_importance_chain(\n num_steps,\n proposal_log_prob_fn,\n target_log_prob_fn,\n current_state,\n make_kernel_fn,\n parallel_iterations=10,\n name=None):\n \"\"\"Runs annealed importance sampling (AIS) to estimate normalizing constants.\n\n This function uses an MCMC transition operator (e.g., Hamiltonian Monte Carlo)\n to sample from a series of distributions that slowly interpolates between\n an initial \"proposal\" distribution:\n\n `exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`\n\n and the target distribution:\n\n `exp(target_log_prob_fn(x) - target_log_normalizer)`,\n\n accumulating importance weights along the way. The product of these\n importance weights gives an unbiased estimate of the ratio of the\n normalizing constants of the initial distribution and the target\n distribution:\n\n `E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`.\n\n Note: When running in graph mode, `proposal_log_prob_fn` and\n `target_log_prob_fn` are called exactly three times (although this may be\n reduced to two times in the future).\n\n Args:\n num_steps: Integer number of Markov chain updates to run. More\n iterations means more expense, but smoother annealing between q\n and p, which in turn means exponentially lower variance for the\n normalizing constant estimator.\n proposal_log_prob_fn: Python callable that returns the log density of the\n initial distribution.\n target_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the target distribution.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n make_kernel_fn: Python `callable` which returns a `TransitionKernel`-like\n object. Must take one argument representing the `TransitionKernel`'s\n `target_log_prob_fn`. The `target_log_prob_fn` argument represents the\n `TransitionKernel`'s target log distribution. Note:\n `sample_annealed_importance_chain` creates a new `target_log_prob_fn`\n which is an interpolation between the supplied `target_log_prob_fn` and\n `proposal_log_prob_fn`; it is this interpolated function which is used as\n an argument to `make_kernel_fn`.\n parallel_iterations: The number of iterations allowed to run in parallel.\n It must be a positive integer. See `tf.while_loop` for more details.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., \"sample_annealed_importance_chain\").\n\n Returns:\n next_state: `Tensor` or Python list of `Tensor`s representing the\n state(s) of the Markov chain(s) at the final iteration. Has same shape as\n input `current_state`.\n ais_weights: Tensor with the estimated weight(s). Has shape matching\n `target_log_prob_fn(current_state)`.\n kernel_results: `collections.namedtuple` of internal calculations used to\n advance the chain.\n\n #### Examples\n\n ##### Estimate the normalizing constant of a log-gamma distribution.\n\n ```python\n tfd = tfp.distributions\n\n # Run 100 AIS chains in parallel\n num_chains = 100\n dims = 20\n dtype = np.float32\n\n proposal = tfd.MultivariateNormalDiag(\n loc=tf.zeros([dims], dtype=dtype))\n\n target = tfd.TransformedDistribution(\n distribution=tfd.Gamma(concentration=dtype(2),\n rate=dtype(3)),\n bijector=tfp.bijectors.Invert(tfp.bijectors.Exp()),\n event_shape=[dims])\n\n chains_state, ais_weights, kernels_results = (\n tfp.mcmc.sample_annealed_importance_chain(\n num_steps=1000,\n proposal_log_prob_fn=proposal.log_prob,\n target_log_prob_fn=target.log_prob,\n current_state=proposal.sample(num_chains),\n make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=tlp_fn,\n step_size=0.2,\n num_leapfrog_steps=2)))\n\n log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)\n - np.log(num_chains))\n log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.)\n ```\n\n ##### Estimate marginal likelihood of a Bayesian regression model.\n\n ```python\n tfd = tfp.distributions\n\n def make_prior(dims, dtype):\n return tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype))\n\n def make_likelihood(weights, x):\n return tfd.MultivariateNormalDiag(\n loc=tf.tensordot(weights, x, axes=[[0], [-1]]))\n\n # Run 100 AIS chains in parallel\n num_chains = 100\n dims = 10\n dtype = np.float32\n\n # Make training data.\n x = np.random.randn(num_chains, dims).astype(dtype)\n true_weights = np.random.randn(dims).astype(dtype)\n y = np.dot(x, true_weights) + np.random.randn(num_chains)\n\n # Setup model.\n prior = make_prior(dims, dtype)\n def target_log_prob_fn(weights):\n return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y)\n\n proposal = tfd.MultivariateNormalDiag(\n loc=tf.zeros(dims, dtype))\n\n weight_samples, ais_weights, kernel_results = (\n tfp.mcmc.sample_annealed_importance_chain(\n num_steps=1000,\n proposal_log_prob_fn=proposal.log_prob,\n target_log_prob_fn=target_log_prob_fn\n current_state=tf.zeros([num_chains, dims], dtype),\n make_kernel_fn=lambda tlp_fn: tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=tlp_fn,\n step_size=0.1,\n num_leapfrog_steps=2)))\n log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights)\n - np.log(num_chains))\n ```\n\n \"\"\"\n with tf1.name_scope(name, \"sample_annealed_importance_chain\",\n [num_steps, current_state]):\n num_steps = tf.convert_to_tensor(\n value=num_steps, dtype=tf.int32, name=\"num_steps\")\n if mcmc_util.is_list_like(current_state):\n current_state = [\n tf.convert_to_tensor(value=s, name=\"current_state\")\n for s in current_state\n ]\n else:\n current_state = tf.convert_to_tensor(\n value=current_state, name=\"current_state\")\n\n def _make_convex_combined_log_prob_fn(iter_):\n def _fn(*args):\n p = tf.identity(proposal_log_prob_fn(*args), name=\"proposal_log_prob\")\n t = tf.identity(target_log_prob_fn(*args), name=\"target_log_prob\")\n dtype = p.dtype.base_dtype\n beta = tf.cast(iter_ + 1, dtype) / tf.cast(num_steps, dtype)\n return tf.identity(beta * t + (1. - beta) * p,\n name=\"convex_combined_log_prob\")\n return _fn\n\n def _loop_body(iter_, ais_weights, current_state, kernel_results):\n \"\"\"Closure which implements `tf.while_loop` body.\"\"\"\n x = (current_state if mcmc_util.is_list_like(current_state)\n else [current_state])\n proposal_log_prob = proposal_log_prob_fn(*x)\n target_log_prob = target_log_prob_fn(*x)\n ais_weights += ((target_log_prob - proposal_log_prob) /\n tf.cast(num_steps, ais_weights.dtype))\n kernel = make_kernel_fn(_make_convex_combined_log_prob_fn(iter_))\n next_state, inner_results = kernel.one_step(\n current_state, kernel_results.inner_results)\n kernel_results = AISResults(\n proposal_log_prob=proposal_log_prob,\n target_log_prob=target_log_prob,\n inner_results=inner_results,\n )\n return [iter_ + 1, ais_weights, next_state, kernel_results]\n\n def _bootstrap_results(init_state):\n \"\"\"Creates first version of `previous_kernel_results`.\"\"\"\n kernel = make_kernel_fn(_make_convex_combined_log_prob_fn(iter_=0))\n inner_results = kernel.bootstrap_results(init_state)\n\n convex_combined_log_prob = inner_results.accepted_results.target_log_prob\n dtype = convex_combined_log_prob.dtype.as_numpy_dtype\n shape = tf.shape(input=convex_combined_log_prob)\n proposal_log_prob = tf.fill(shape, dtype(np.nan),\n name=\"bootstrap_proposal_log_prob\")\n target_log_prob = tf.fill(shape, dtype(np.nan),\n name=\"target_target_log_prob\")\n\n return AISResults(\n proposal_log_prob=proposal_log_prob,\n target_log_prob=target_log_prob,\n inner_results=inner_results,\n )\n\n previous_kernel_results = _bootstrap_results(current_state)\n inner_results = previous_kernel_results.inner_results\n\n ais_weights = tf.zeros(\n shape=tf.broadcast_dynamic_shape(\n tf.shape(input=inner_results.proposed_results.target_log_prob),\n tf.shape(input=inner_results.accepted_results.target_log_prob)),\n dtype=inner_results.proposed_results.target_log_prob.dtype.base_dtype)\n\n [_, ais_weights, current_state, kernel_results] = tf.while_loop(\n cond=lambda iter_, *args: iter_ < num_steps,\n body=_loop_body,\n loop_vars=[\n np.int32(0), # iter_\n ais_weights,\n current_state,\n previous_kernel_results,\n ],\n parallel_iterations=parallel_iterations)\n\n return [current_state, ais_weights, kernel_results]\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The Truncated Normal distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import special_math\nfrom tensorflow.python.ops import random_ops # pylint: disable=g-direct-tensorflow-import\n\n\n__all__ = [\n \"TruncatedNormal\",\n]\n\n\nclass TruncatedNormal(distribution.Distribution):\n \"\"\"The Truncated Normal distribution.\n\n #### Mathematical details\n\n The truncated normal is a normal distribution bounded between `low`\n and `high` (the pdf is 0 outside these bounds and renormalized).\n\n Samples from this distribution are differentiable with respect to `loc`,\n `scale` as well as the bounds, `low` and `high`, i.e., this\n implementation is fully reparameterizeable.\n\n For more details, see [here](\n https://en.wikipedia.org/wiki/Truncated_normal_distribution).\n\n ### Mathematical Details\n\n The probability density function (pdf) of this distribution is:\n ```none\n pdf(x; loc, scale, low, high) =\n { (2 pi)**(-0.5) exp(-0.5 y**2) / (scale * z) for low <= x <= high\n { 0 otherwise\n y = (x - loc)/scale\n z = NormalCDF((high - loc) / scale) - NormalCDF((lower - loc) / scale)\n ```\n\n where:\n\n * `NormalCDF` is the cumulative density function of the Normal distribution\n with 0 mean and unit variance.\n\n This is a scalar distribution so the event shape is always scalar and the\n dimensions of the parameters defined the batch_shape.\n\n #### Examples\n ```python\n\n tfd = tfp.distributions\n # Define a batch of two scalar TruncatedNormals which modes at 0. and 1.0\n dist = tfd.TruncatedNormal(loc=[0., 1.], scale=1.0,\n low=[-1., 0.],\n high=[1., 1.])\n\n # Evaluate the pdf of the distributions at 0.5 and 0.8 respectively returning\n # a 2-vector tensor.\n dist.prob([0.5, 0.8])\n\n # Get 3 samples, returning a 3 x 2 tensor.\n dist.sample([3])\n ```\n \"\"\"\n\n def __init__(self,\n loc,\n scale,\n low,\n high,\n validate_args=False,\n allow_nan_stats=True,\n name=\"TruncatedNormal\"):\n \"\"\"Construct TruncatedNormal.\n\n All parameters of the distribution will be broadcast to the same shape,\n so the resulting distribution will have a batch_shape of the broadcast\n shape of all parameters.\n\n Args:\n loc: Floating point tensor; the mean of the normal distribution(s) (\n note that the mean of the resulting distribution will be different\n since it is modified by the bounds).\n scale: Floating point tensor; the std deviation of the normal\n distribution(s).\n low: `float` `Tensor` representing lower bound of the distribution's\n support. Must be such that `low < high`.\n high: `float` `Tensor` representing upper bound of the distribution's\n support. Must be such that `low < high`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked at run-time.\n allow_nan_stats: Python `bool`, default `True`. When `True`,\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\n indicate the result is undefined. When `False`, an exception is raised\n if one or more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([loc, scale, low, high], tf.float32)\n loc = tf.convert_to_tensor(loc, name=\"loc\", dtype=dtype)\n scale = tf.convert_to_tensor(scale, name=\"scale\", dtype=dtype)\n low = tf.convert_to_tensor(low, name=\"low\", dtype=dtype)\n high = tf.convert_to_tensor(high, name=\"high\", dtype=dtype)\n dtype_util.assert_same_float_dtype([loc, scale, low, high])\n\n self._broadcast_batch_shape = distribution_util.get_broadcast_shape(\n loc, scale, low, high)\n\n # Broadcast all parameters to the same shape\n broadcast_ones = tf.ones(shape=self._broadcast_batch_shape,\n dtype=scale.dtype)\n self._scale = scale * broadcast_ones\n self._loc = loc * broadcast_ones\n self._low = low * broadcast_ones\n self._high = high * broadcast_ones\n\n with tf.control_dependencies([self._validate()] if validate_args else []):\n self._loc = tf.identity(self._loc)\n\n super(TruncatedNormal, self).__init__(\n dtype=dtype,\n # This distribution is fully reparameterized. loc, scale have straight\n # through gradients. The gradients for the bounds are implemented using\n # custom derived expressions based on implicit gradients.\n # For the special case of lower bound zero and a positive upper bound\n # an equivalent expression can also be found in Sec 9.1.1.\n # of https://arxiv.org/pdf/1806.01851.pdf. The implementation here\n # handles arbitrary bounds.\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n def _validate(self):\n vops = [\n assert_util.assert_positive(self._scale),\n assert_util.assert_positive(self._high - self._low),\n assert_util.assert_finite(self._low, message=\"Lower bound not finite\"),\n assert_util.assert_finite(self._high, message=\"Upper bound not finite\"),\n assert_util.assert_finite(self._loc, message=\"Loc not finite\"),\n assert_util.assert_finite(self._scale, message=\"scale not finite\"),\n ]\n return tf.group(*vops, name=\"ValidationOps\")\n\n @property\n def _standardized_low(self):\n return (self._low - self._loc) / self._scale\n\n @property\n def _standardized_high(self):\n return (self._high - self._loc) / self._scale\n\n @property\n def _normalizer(self):\n return (special_math.ndtr(self._standardized_high) -\n special_math.ndtr(self._standardized_low))\n\n def _normal_pdf(self, x):\n return 1. / np.sqrt(2 * np.pi) * tf.exp(-0.5 * tf.square(x))\n\n @staticmethod\n def _param_shapes(sample_shape):\n # All parameters are of the same shape\n shape = tf.convert_to_tensor(sample_shape, dtype=tf.int32)\n return {\"loc\": shape,\n \"scale\": shape,\n \"high\": shape,\n \"low\": shape}\n\n @classmethod\n def _params_event_ndims(cls):\n return dict(loc=0, scale=0, low=0, high=0)\n\n @property\n def loc(self):\n return self._loc\n\n @property\n def scale(self):\n \"\"\"Distribution parameter for the scale.\"\"\"\n return self._scale\n\n @property\n def low(self):\n return self._low\n\n @property\n def high(self):\n return self._high\n\n def _batch_shape_tensor(self):\n # All the parameters are broadcast the same shape during construction.\n return tf.shape(self.loc)\n\n def _batch_shape(self):\n # All the parameters are broadcast the same shape during construction.\n return self.loc.shape\n\n def _event_shape_tensor(self):\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n def _sample_n(self, n, seed=None):\n sample_and_batch_shape = tf.concat([[n], self.batch_shape_tensor()], 0)\n flat_batch_and_sample_shape = tf.stack(\n [tf.reduce_prod(self.batch_shape_tensor()), n])\n\n # In order to be reparameterizable we sample on the truncated_normal of\n # unit variance and mean and scale (but with the standardized\n # truncation bounds).\n\n @tf.custom_gradient\n def _std_samples_with_gradients(lower, upper):\n \"\"\"Standard truncated Normal with gradient support for low, high.\"\"\"\n # Note: Unlike the convention in tf_probability,\n # parameterized_truncated_normal returns a tensor with the final dimension\n # being the sample dimension.\n std_samples = random_ops.parameterized_truncated_normal(\n shape=flat_batch_and_sample_shape,\n means=0.0,\n stddevs=1.0,\n minvals=lower,\n maxvals=upper,\n dtype=self.dtype,\n seed=seed)\n\n def grad(dy):\n \"\"\"Computes a derivative for the min and max parameters.\n\n This function implements the derivative wrt the truncation bounds, which\n get blocked by the sampler. We use a custom expression for numerical\n stability instead of automatic differentiation on CDF for implicit\n gradients.\n\n Args:\n dy: output gradients\n\n Returns:\n The standard normal samples and the gradients wrt the upper\n bound and lower bound.\n \"\"\"\n # std_samples has an extra dimension (the sample dimension), expand\n # lower and upper so they broadcast along this dimension.\n # See note above regarding parameterized_truncated_normal, the sample\n # dimension is the final dimension.\n lower_broadcast = lower[..., tf.newaxis]\n upper_broadcast = upper[..., tf.newaxis]\n\n cdf_samples = ((special_math.ndtr(std_samples) -\n special_math.ndtr(lower_broadcast)) /\n (special_math.ndtr(upper_broadcast)\n - special_math.ndtr(lower_broadcast)))\n\n # tiny, eps are tolerance parameters to ensure we stay away from giving\n # a zero arg to the log CDF expression.\n\n tiny = np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny\n eps = np.finfo(dtype_util.as_numpy_dtype(self.dtype)).eps\n cdf_samples = tf.clip_by_value(cdf_samples, tiny, 1 - eps)\n\n du = tf.exp(0.5 * (std_samples**2 - upper_broadcast**2) +\n tf.math.log(cdf_samples))\n dl = tf.exp(0.5 * (std_samples**2 - lower_broadcast**2) +\n tf.math.log1p(-cdf_samples))\n\n # Reduce the gradient across the samples\n grad_u = tf.reduce_sum(dy * du, axis=-1)\n grad_l = tf.reduce_sum(dy * dl, axis=-1)\n return [grad_l, grad_u]\n\n return std_samples, grad\n\n std_samples = _std_samples_with_gradients(\n tf.reshape(self._standardized_low, [-1]),\n tf.reshape(self._standardized_high, [-1]))\n\n # The returned shape is [flat_batch x n]\n std_samples = tf.transpose(a=std_samples, perm=[1, 0])\n\n std_samples = tf.reshape(std_samples, sample_and_batch_shape)\n samples = (std_samples * tf.expand_dims(self._scale, axis=0) +\n tf.expand_dims(self._loc, axis=0))\n\n return samples\n\n def _log_prob(self, x):\n log_prob = -(0.5 *\n ((x - self.loc) / self.scale)**2 + 0.5 * np.log(2. * np.pi) +\n tf.math.log(self.scale * self._normalizer))\n # p(x) is 0 outside the bounds.\n bounded_log_prob = tf.where(\n (x > self._high) | (x < self._low),\n dtype_util.as_numpy_dtype(x.dtype)(-np.inf),\n log_prob)\n return bounded_log_prob\n\n def _cdf(self, x):\n cdf_in_support = ((special_math.ndtr((x - self.loc) / self.scale)\n - special_math.ndtr(self._standardized_low))\n / self._normalizer)\n return tf.clip_by_value(cdf_in_support, 0., 1.)\n\n def _entropy(self):\n return (\n tf.math.log(\n np.sqrt(2. * np.pi * np.e) * self.scale * self._normalizer) +\n (self._standardized_low * self._normal_pdf(self._standardized_low) -\n self._standardized_high * self._normal_pdf(self._standardized_high)) /\n (2. * self._normalizer))\n\n def _mean(self):\n return (self.loc +\n self._scale * ((self._normal_pdf(self._standardized_low) -\n self._normal_pdf(self._standardized_high))\n / self._normalizer))\n\n def _mode(self):\n # mode = { loc: for low <= loc <= high\n # low: for loc < low\n # high: for loc > high\n # }\n return tf.clip_by_value(self.loc, self.low, self.high)\n\n def _variance(self):\n var = (tf.square(self.scale) *\n (1. + (self._standardized_low * self._normal_pdf(\n self._standardized_low) -\n self._standardized_high * self._normal_pdf(\n self._standardized_high)) / self._normalizer -\n tf.square((self._normal_pdf(self._standardized_low) -\n self._normal_pdf(self._standardized_high))\n / self._normalizer)))\n return var\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for dtype_util.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\n# Dependency imports\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import test_case\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\nclass DtypeUtilTest(test_case.TestCase):\n\n def testIsInteger(self):\n self.assertFalse(dtype_util.is_integer(np.float64))\n\n def testNoModifyArgsList(self):\n x = tf.ones(3, tf.float32)\n y = tf.zeros(4, tf.float32)\n lst = [x, y]\n self.assertEqual(tf.float32, dtype_util.common_dtype(lst))\n self.assertLen(lst, 2)\n\n def testCommonDtypeAcceptsNone(self):\n self.assertEqual(\n tf.float16, dtype_util.common_dtype(\n [None], dtype_hint=tf.float16))\n\n x = tf.ones(3, tf.float16)\n self.assertEqual(\n tf.float16, dtype_util.common_dtype(\n [x, None], dtype_hint=tf.float32))\n\n fake_tensor = collections.namedtuple('fake_tensor', ['dtype'])\n self.assertEqual(\n tf.float16, dtype_util.common_dtype(\n [fake_tensor(dtype=None), None, x], dtype_hint=tf.float32))\n\n def testCommonDtypeFromLinop(self):\n x = tf.linalg.LinearOperatorDiag(tf.ones(3, tf.float16))\n self.assertEqual(\n tf.float16, dtype_util.common_dtype([x], dtype_hint=tf.float32))\n\n def testCommonDtypeFromEdRV(self):\n # As in tensorflow_probability github issue #221\n ed = tfp.edward2\n x = ed.Dirichlet(np.ones(3, dtype='float64'))\n self.assertEqual(\n tf.float64, dtype_util.common_dtype([x], dtype_hint=tf.float32))\n\n\nclass FloatDTypeTest(test_case.TestCase):\n\n @test_util.run_in_graph_and_eager_modes\n def test_assert_same_float_dtype(self):\n self.assertIs(tf.float32, dtype_util.assert_same_float_dtype(None, None))\n self.assertIs(tf.float32, dtype_util.assert_same_float_dtype([], None))\n self.assertIs(\n tf.float32, dtype_util.assert_same_float_dtype([], tf.float32))\n self.assertIs(\n tf.float32, dtype_util.assert_same_float_dtype(None, tf.float32))\n self.assertIs(\n tf.float32, dtype_util.assert_same_float_dtype([None, None], None))\n self.assertIs(\n tf.float32,\n dtype_util.assert_same_float_dtype([None, None], tf.float32))\n\n const_float = tf.constant(3.0, dtype=tf.float32)\n self.assertIs(\n tf.float32,\n dtype_util.assert_same_float_dtype([const_float], tf.float32))\n self.assertRaises(ValueError, dtype_util.assert_same_float_dtype,\n [const_float], tf.int32)\n\n sparse_float = tf.SparseTensor(\n tf.constant([[111], [232]], tf.int64),\n tf.constant([23.4, -43.2], tf.float32),\n tf.constant([500], tf.int64))\n self.assertIs(\n tf.float32,\n dtype_util.assert_same_float_dtype([sparse_float], tf.float32))\n self.assertRaises(ValueError, dtype_util.assert_same_float_dtype,\n [sparse_float], tf.int32)\n self.assertRaises(ValueError, dtype_util.assert_same_float_dtype,\n [const_float, None, sparse_float], tf.float64)\n\n self.assertIs(\n tf.float32,\n dtype_util.assert_same_float_dtype([const_float, sparse_float]))\n self.assertIs(\n tf.float32,\n dtype_util.assert_same_float_dtype(\n [const_float, sparse_float], tf.float32))\n\n const_int = tf.constant(3, dtype=tf.int32)\n self.assertRaises(ValueError, dtype_util.assert_same_float_dtype,\n [sparse_float, const_int])\n self.assertRaises(ValueError, dtype_util.assert_same_float_dtype,\n [sparse_float, const_int], tf.int32)\n self.assertRaises(ValueError, dtype_util.assert_same_float_dtype,\n [sparse_float, const_int], tf.float32)\n self.assertRaises(ValueError, dtype_util.assert_same_float_dtype,\n [const_int])\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.TensorArray", "tensorflow.compat.v2.unstack", "tensorflow.compat.v2.size", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.stack", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.range", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.reshape", "tensorflow.compat.v2.compat.dimension_value", "tensorflow.compat.v1.name_scope" ], [ "tensorflow.compat.v2.executing_eagerly", "numpy.sqrt", "tensorflow.compat.v2.linalg.cholesky", "numpy.concatenate", "numpy.mean", "tensorflow.compat.v1.train.get_or_create_global_step", "tensorflow.compat.v2.expand_dims", "tensorflow.compat.v1.set_random_seed", "numpy.zeros", "tensorflow.compat.v2.math.rsqrt", "tensorflow.compat.v1.train.polynomial_decay", "tensorflow.compat.v2.Variable", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.square", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.ones_like", "tensorflow.compat.v2.concat", "tensorflow.compat.v2.cast", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v2.linalg.matvec" ], [ "tensorflow.compat.v2.sqrt", "numpy.all", "numpy.zeros_like", "tensorflow.compat.v2.TensorShape", "scipy.stats.dirichlet.entropy", "tensorflow.compat.v2.linalg.diag_part", "numpy.ones_like", "scipy.special.digamma", "tensorflow.compat.v2.zeros", "numpy.zeros", "tensorflow.compat.v2.Variable", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.reduce_mean", "scipy.stats.beta", "numpy.random.rand", "tensorflow.compat.v2.matmul", "scipy.special.gammaln", "tensorflow.compat.v2.constant", "numpy.array", "numpy.sum", "numpy.isfinite", "tensorflow.compat.v2.GradientTape", "scipy.stats.dirichlet.var", "numpy.ones", "scipy.stats.dirichlet.mean" ], [ "tensorflow.python.framework.test_combinations.combine", "tensorflow.compat.v2.config.experimental_run_functions_eagerly", "tensorflow.python.framework.combinations.EagerGraphCombination", "tensorflow.python.framework.test_combinations.OptionalParameter" ], [ "tensorflow.compat.v2.Variable", "numpy.ones_like", "tensorflow.compat.v2.test.main", "tensorflow.compat.v1.placeholder_with_default", "numpy.asarray", "tensorflow.compat.v2.GradientTape", "tensorflow.compat.v2.reduce_mean", "numpy.ones", "numpy.logical_or", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.TensorShape", "tensorflow.compat.v1.set_random_seed", "numpy.zeros" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "numpy.array", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.constant" ], [ "tensorflow.compat.v2.math.is_strictly_increasing", "tensorflow.compat.v2.searchsorted", "tensorflow.compat.v2.size", "tensorflow.compat.v2.maximum", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.minimum", "tensorflow.compat.v2.reshape", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.stack", "tensorflow.compat.v2.identity", "tensorflow.compat.v2.gather", "tensorflow.compat.v2.gather_nd", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.argmin" ], [ "numpy.int32", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.identity", "tensorflow.compat.v1.name_scope" ], [ "tensorflow.compat.v2.transpose", "numpy.sqrt", "tensorflow.compat.v2.clip_by_value", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.identity", "tensorflow.compat.v2.reduce_sum", "tensorflow.compat.v2.TensorShape", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.reshape", "tensorflow.compat.v2.expand_dims", "tensorflow.compat.v2.math.log", "tensorflow.compat.v2.group", "numpy.log", "tensorflow.compat.v2.square", "tensorflow.python.ops.random_ops.parameterized_truncated_normal", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.math.log1p" ], [ "tensorflow.compat.v2.test.main", "numpy.ones", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.zeros", "tensorflow.compat.v2.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "2.4", "2.3", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TencentYoutuResearch/PersonReID-ACT
[ "264b1b43f9424c297638ebf6f8f8ace09512ed29" ]
[ "selftrainingCT.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, absolute_import\nimport argparse\nimport time\nimport os.path as osp\nimport os\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import init\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader\nfrom reid import datasets\nfrom reid import models\nfrom reid.dist_metric import DistanceMetric\nfrom reid.loss import TripletLoss\nfrom reid.trainers import CoTeaching\nfrom reid.evaluators import Evaluator, extract_features\nfrom reid.utils.data import transforms as T\nimport torch.nn.functional as F\nfrom reid.utils.data.preprocessor import Preprocessor\nfrom reid.utils.data.sampler import RandomIdentitySampler\nfrom reid.utils.serialization import load_checkpoint, save_checkpoint\n\nfrom sklearn.cluster import DBSCAN\nfrom reid.rerank import re_ranking\n\n\ndef calScores(clusters, labels):\n \"\"\"\n compute pair-wise precision pair-wise recall\n \"\"\"\n from scipy.special import comb\n if len(clusters) == 0:\n return 0, 0\n else:\n curCluster = []\n for curClus in clusters.values():\n curCluster.append(labels[curClus])\n TPandFP = sum([comb(len(val), 2) for val in curCluster])\n TP = 0\n for clusterVal in curCluster:\n for setMember in set(clusterVal):\n if sum(clusterVal == setMember) < 2: continue\n TP += comb(sum(clusterVal == setMember), 2)\n FP = TPandFP - TP\n # FN and TN\n TPandFN = sum([comb(labels.tolist().count(val), 2) for val in set(labels)])\n FN = TPandFN - TP\n # cal precision and recall\n precision, recall = TP / (TP + FP), TP / (TP + FN)\n fScore = 2 * precision * recall / (precision + recall)\n return precision, recall, fScore\n\n\ndef get_data(name, data_dir, height, width, batch_size,\n workers):\n root = osp.join(data_dir, name)\n\n dataset = datasets.create(name, root, num_val=0.1)\n\n normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n # use all training and validation images in target dataset\n train_set = dataset.trainval\n num_classes = dataset.num_trainval_ids\n\n transformer = T.Compose([\n T.Resize((height, width)),\n T.ToTensor(),\n normalizer,\n ])\n\n extfeat_loader = DataLoader(\n Preprocessor(train_set, root=dataset.images_dir,\n transform=transformer),\n batch_size=batch_size, num_workers=workers,\n shuffle=False, pin_memory=True)\n\n test_loader = DataLoader(\n Preprocessor(list(set(dataset.query) | set(dataset.gallery)),\n root=dataset.images_dir, transform=transformer),\n batch_size=batch_size, num_workers=workers,\n shuffle=False, pin_memory=True)\n\n return dataset, num_classes, extfeat_loader, test_loader\n\n\ndef get_source_data(name, data_dir, height, width, batch_size,\n workers):\n root = osp.join(data_dir, name)\n\n dataset = datasets.create(name, root, num_val=0.1)\n\n normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n # use all training images on source dataset\n train_set = dataset.train\n num_classes = dataset.num_train_ids\n\n transformer = T.Compose([\n T.Resize((height, width)),\n T.ToTensor(),\n normalizer,\n ])\n\n extfeat_loader = DataLoader(\n Preprocessor(train_set, root=dataset.images_dir,\n transform=transformer),\n batch_size=batch_size, num_workers=workers,\n shuffle=False, pin_memory=True)\n\n return dataset, extfeat_loader\n\n\ndef calDis(qFeature, gFeature): # 246s\n x, y = F.normalize(qFeature), F.normalize(gFeature)\n # x, y = qFeature, gFeature\n m, n = x.shape[0], y.shape[0]\n disMat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n disMat.addmm_(1, -2, x, y.t())\n return disMat.clamp_(min=1e-5)\n\n\ndef labelUnknown(knownFeat, allLab, unknownFeat):\n # allLab--label from known\n disMat = calDis(knownFeat, unknownFeat)\n labLoc = disMat.argmin(dim=0)\n return allLab[labLoc]\n\n\ndef labelNoise(feature, labels):\n # features and labels with -1\n noiseFeat, pureFeat = feature[labels == -1, :], feature[labels != -1, :]\n pureLabs = labels[labels != -1] # no outliers\n unLab = labelUnknown(pureFeat, pureLabs, noiseFeat)\n labels[labels == -1] = unLab\n return labels.numpy()\n\n\ndef getCenter(features, labels):\n allCenter = {}\n features = features[labels != -1, :]\n labels = labels[labels != -1]\n for pid in set(labels):\n allCenter[pid] = torch.from_numpy(features[labels == pid, :].mean(axis=0)).unsqueeze(0)\n return torch.cat(list(allCenter.values()))\n\n\ndef main(args):\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.benchmark = True\n\n # Create data loaders\n assert args.num_instances > 1, \"num_instances should be greater than 1\"\n assert args.batch_size % args.num_instances == 0, \\\n 'num_instances should divide batch_size'\n if args.height is None or args.width is None:\n args.height, args.width = (144, 56) if args.arch == 'inception' else \\\n (256, 128)\n\n # get source data\n src_dataset, src_extfeat_loader = \\\n get_source_data(args.src_dataset, args.data_dir, args.height,\n args.width, args.batch_size, args.workers)\n # get target data\n tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \\\n get_data(args.tgt_dataset, args.data_dir, args.height,\n args.width, args.batch_size, args.workers)\n\n # Create model\n # Hacking here to let the classifier be the number of source ids\n if args.src_dataset == 'dukemtmc':\n model = models.create(args.arch, num_classes=632, pretrained=False)\n coModel = models.create(args.arch, num_classes=632, pretrained=False)\n elif args.src_dataset == 'market1501':\n model = models.create(args.arch, num_classes=676, pretrained=False)\n coModel = models.create(args.arch, num_classes=676, pretrained=False)\n elif args.src_dataset == 'msmt17':\n model = models.create(args.arch, num_classes=1041, pretrained=False)\n coModel = models.create(args.arch, num_classes=1041, pretrained=False)\n elif args.src_dataset == 'cuhk03':\n model = models.create(args.arch, num_classes=1230, pretrained=False)\n coModel = models.create(args.arch, num_classes=1230, pretrained=False)\n else:\n raise RuntimeError('Please specify the number of classes (ids) of the network.')\n\n # Load from checkpoint\n if args.resume:\n print('Resuming checkpoints from finetuned model on another dataset...\\n')\n checkpoint = load_checkpoint(args.resume)\n model.load_state_dict(checkpoint['state_dict'], strict=False)\n coModel.load_state_dict(checkpoint['state_dict'], strict=False)\n else:\n raise RuntimeWarning('Not using a pre-trained model.')\n model = nn.DataParallel(model).cuda()\n coModel = nn.DataParallel(coModel).cuda()\n\n # Criterion\n criterion = [\n TripletLoss(args.margin, args.num_instances, isAvg=False, use_semi=False).cuda(),\n TripletLoss(args.margin, args.num_instances, isAvg=False, use_semi=False).cuda()\n ]\n optimizer = torch.optim.Adam(\n model.parameters(), lr=args.lr\n )\n coOptimizer = torch.optim.Adam(\n coModel.parameters(), lr=args.lr\n )\n\n optims = [optimizer, coOptimizer]\n\n # training stage transformer on input images\n normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n train_transformer = T.Compose([\n T.Resize((args.height, args.width)),\n T.RandomHorizontalFlip(),\n T.ToTensor(), normalizer,\n T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)\n ])\n\n # # Start training\n for iter_n in range(args.iteration):\n if args.lambda_value == 0:\n source_features = 0\n else:\n # get source datas' feature\n source_features, _ = extract_features(model, src_extfeat_loader, print_freq=args.print_freq, numStripe=None)\n # synchronization feature order with src_dataset.train\n source_features = torch.cat([source_features[f].unsqueeze(0) for f, _, _ in src_dataset.train], 0)\n\n # extract training images' features\n print('Iteration {}: Extracting Target Dataset Features...'.format(iter_n + 1))\n target_features, _ = extract_features(model, tgt_extfeat_loader, print_freq=args.print_freq, numStripe=None)\n # synchronization feature order with dataset.train\n target_features = torch.cat([target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval], 0)\n # calculate distance and rerank result\n print('Calculating feature distances...')\n target_features = target_features.numpy()\n rerank_dist = re_ranking(source_features, target_features, lambda_value=args.lambda_value)\n if iter_n == 0:\n # DBSCAN cluster\n tri_mat = np.triu(rerank_dist, 1) # tri_mat.dim=2\n tri_mat = tri_mat[np.nonzero(tri_mat)] # tri_mat.dim=1\n tri_mat = np.sort(tri_mat, axis=None)\n top_num = np.round(args.rho * tri_mat.size).astype(int)\n eps = tri_mat[:top_num].mean()\n print('eps in cluster: {:.3f}'.format(eps))\n cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=8)\n # select & cluster images as training set of this epochs\n print('Clustering and labeling...')\n labels = cluster.fit_predict(rerank_dist)\n num_ids = len(set(labels)) - 1\n print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))\n # generate new dataset\n new_dataset = []\n # assign label for target ones\n newLab = labelNoise(torch.from_numpy(target_features), torch.from_numpy(labels))\n # unknownFeats = target_features[labels==-1,:]\n counter = 0\n from collections import defaultdict\n realIDs, fakeIDs = defaultdict(list), []\n for (fname, realID, cam), label in zip(tgt_dataset.trainval, newLab):\n # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0\n new_dataset.append((fname, label, cam))\n realIDs[realID].append(counter)\n fakeIDs.append(label)\n counter += 1\n precision, recall, fscore = calScores(realIDs, np.asarray(fakeIDs))\n print('Iteration {} have {} training images'.format(iter_n + 1, len(new_dataset)))\n print(f'precision:{precision * 100}, recall:{100 * recall}, fscore:{100 * fscore}')\n train_loader = DataLoader(\n Preprocessor(new_dataset, root=tgt_dataset.images_dir, transform=train_transformer),\n batch_size=args.batch_size, num_workers=4,\n sampler=RandomIdentitySampler(new_dataset, args.num_instances),\n pin_memory=True, drop_last=True\n )\n trainer = CoTeaching(\n model, coModel, train_loader, criterion, optims\n )\n\n # Start training\n for epoch in range(args.epochs):\n trainer.train(epoch, remRate=0.2 + (0.8 / args.iteration) * (1 + iter_n)) # to at most 80%\n # test only\n evaluator = Evaluator(model, print_freq=args.print_freq)\n rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)\n\n # Evaluate\n rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)\n save_checkpoint({\n 'state_dict': model.module.state_dict(),\n 'epoch': epoch + 1, 'best_top1': rank_score.market1501[0],\n }, True, fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))\n return rank_score.map, rank_score.market1501[0]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Triplet loss classification\")\n # data\n parser.add_argument('--src_dataset', type=str, default='dukemtmc',\n choices=datasets.names())\n parser.add_argument('--tgt_dataset', type=str, default='market1501',\n choices=datasets.names())\n parser.add_argument('--batch_size', type=int, default=64)\n parser.add_argument('--workers', type=int, default=4)\n parser.add_argument('--split', type=int, default=0)\n parser.add_argument('--noiseLam', type=float, default=0.5)\n parser.add_argument('--height', type=int,\n help=\"input height, default: 256 for resnet*, \"\n \"144 for inception\")\n parser.add_argument('--width', type=int,\n help=\"input width, default: 128 for resnet*, \"\n \"56 for inception\")\n parser.add_argument('--combine-trainval', action='store_true',\n help=\"train and val sets together for training, \"\n \"val set alone for validation\")\n parser.add_argument('--num_instances', type=int, default=4,\n help=\"each minibatch consist of \"\n \"(batch_size // num_instances) identities, and \"\n \"each identity has num_instances instances, \"\n \"default: 4\")\n # model\n parser.add_argument('--arch', type=str, default='resnet50',\n choices=models.names())\n # loss\n parser.add_argument('--margin', type=float, default=0.5,\n help=\"margin of the triplet loss, default: 0.5\")\n parser.add_argument('--lambda_value', type=float, default=0.1,\n help=\"balancing parameter, default: 0.1\")\n parser.add_argument('--rho', type=float, default=1.6e-3,\n help=\"rho percentage, default: 1.6e-3\")\n # optimizer\n parser.add_argument('--lr', type=float, default=6e-5,\n help=\"learning rate of all parameters\")\n # training configs\n parser.add_argument('--resume', type=str, metavar='PATH',\n default='')\n parser.add_argument('--evaluate', type=int, default=0,\n help=\"evaluation only\")\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--print_freq', type=int, default=1)\n parser.add_argument('--iteration', type=int, default=10)\n parser.add_argument('--epochs', type=int, default=30)\n # metric learning\n parser.add_argument('--dist_metric', type=str, default='euclidean',\n choices=['euclidean', 'kissme'])\n # misc\n parser.add_argument('--data_dir', type=str, metavar='PATH',\n default='')\n parser.add_argument('--logs_dir', type=str, metavar='PATH',\n default='')\n\n args = parser.parse_args()\n mean_ap, rank1 = main(args)\n" ]
[ [ "torch.nn.functional.normalize", "numpy.random.seed", "numpy.nonzero", "numpy.asarray", "torch.manual_seed", "torch.from_numpy", "numpy.sort", "sklearn.cluster.DBSCAN", "numpy.round", "numpy.triu", "torch.nn.DataParallel", "torch.pow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Diego-II/Datadriven-GPVAD
[ "ef033eb317553dc60464e07d8b5ba4f67bcf4a8d" ]
[ "utils.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport collections\nimport sys\nfrom loguru import logger\nfrom pprint import pformat\nfrom typing import List\n\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport six\nimport sklearn.preprocessing as pre\nimport torch\nimport tqdm\nimport yaml\n\nimport augment\nimport dataset\n\n# Some defaults for non-specified arguments in yaml\nDEFAULT_ARGS = {\n 'outputpath': 'experiments',\n 'loss': 'BCELoss',\n 'batch_size': 64,\n 'num_workers': 4,\n 'epochs': 100,\n 'transforms': [],\n 'label_type':'soft',\n 'scheduler_args': {\n 'patience': 3,\n 'factor': 0.1,\n },\n 'early_stop': 7,\n 'optimizer': 'Adam',\n 'optimizer_args': {\n 'lr': 0.001,\n },\n 'threshold': None, #Default threshold for postprocessing function\n 'postprocessing': 'double',\n}\n\n\ndef parse_config_or_kwargs(config_file, **kwargs):\n \"\"\"parse_config_or_kwargs\n\n :param config_file: Config file that has parameters, yaml format\n :param **kwargs: Other alternative parameters or overwrites for config\n \"\"\"\n with open(config_file) as con_read:\n yaml_config = yaml.load(con_read, Loader=yaml.FullLoader)\n # values from config file are all possible params\n arguments = dict(yaml_config, **kwargs)\n # In case some arguments were not passed, replace with default ones\n for key, value in DEFAULT_ARGS.items():\n arguments.setdefault(key, value)\n return arguments\n\n\ndef find_contiguous_regions(activity_array):\n \"\"\"Find contiguous regions from bool valued numpy.array.\n Copy of https://dcase-repo.github.io/dcase_util/_modules/dcase_util/data/decisions.html#DecisionEncoder\n\n Reason is:\n 1. This does not belong to a class necessarily\n 2. Import DecisionEncoder requires sndfile over some other imports..which causes some problems on clusters\n\n \"\"\"\n\n # Find the changes in the activity_array\n change_indices = np.logical_xor(activity_array[1:],\n activity_array[:-1]).nonzero()[0]\n\n # Shift change_index with one, focus on frame after the change.\n change_indices += 1\n\n if activity_array[0]:\n # If the first element of activity_array is True add 0 at the beginning\n change_indices = np.r_[0, change_indices]\n\n if activity_array[-1]:\n # If the last element of activity_array is True, add the length of the array\n change_indices = np.r_[change_indices, activity_array.size]\n\n # Reshape the result into two columns\n return change_indices.reshape((-1, 2))\n\n\ndef split_train_cv(input_data, frac: float = 0.9, **kwargs):\n \"\"\"split_train_cv\n\n :param data_frame:\n :param frac:\n :type frac: float\n \"\"\"\n if isinstance(input_data, list):\n N = len(input_data)\n indicies = np.random.permutation(N)\n train_size = round(N * frac)\n cv_size = N - train_size\n train_idxs, cv_idxs = indicies[:train_size], indicies[cv_size:]\n input_data = np.array(input_data)\n return input_data[train_idxs].tolist(), input_data[cv_idxs].tolist()\n elif isinstance(input_data, pd.DataFrame):\n train_df = input_data.sample(frac=frac)\n cv_df = input_data[~input_data.index.isin(train_df.index)]\n return train_df, cv_df\n\n\ndef parse_transforms(transform_list):\n \"\"\"parse_transforms\n parses the config files transformation strings to coresponding methods\n\n :param transform_list: String list\n \"\"\"\n transforms = []\n for trans in transform_list:\n if trans == 'noise':\n transforms.append(augment.GaussianNoise(snr=25))\n elif trans == 'roll':\n transforms.append(augment.Roll(0, 10))\n elif trans == 'freqmask':\n transforms.append(augment.FreqMask(2, 8))\n elif trans == 'timemask':\n transforms.append(augment.TimeMask(2, 60))\n elif trans == 'crop':\n transforms.append(augment.RandomCrop(200))\n elif trans == 'randompad':\n transforms.append(augment.RandomPad(value=0., padding=25))\n elif trans == 'flipsign':\n transforms.append(augment.FlipSign())\n elif trans == 'shift':\n transforms.append(augment.Shift())\n return torch.nn.Sequential(*transforms)\n\n\ndef pprint_dict(in_dict, outputfun=sys.stdout.write, formatter='yaml'):\n \"\"\"pprint_dict\n\n :param outputfun: function to use, defaults to sys.stdout\n :param in_dict: dict to print\n \"\"\"\n if formatter == 'yaml':\n format_fun = yaml.dump\n elif formatter == 'pretty':\n format_fun = pformat\n for line in format_fun(in_dict).split('\\n'):\n outputfun(line)\n\n\ndef getfile_outlogger(outputfile):\n log_format = \"[<green>{time:YYYY-MM-DD HH:mm:ss}</green>] {message}\"\n logger.configure(handlers=[{\"sink\": sys.stderr, \"format\": log_format}])\n if outputfile:\n logger.add(outputfile, enqueue=True, format=log_format)\n return logger\n\n\ndef train_labelencoder(labels: pd.Series, sparse=True):\n \"\"\"encode_labels\n\n Encodes labels\n\n :param labels: pd.Series representing the raw labels e.g., Speech, Water\n :param encoder (optional): Encoder already fitted \n returns encoded labels (many hot) and the encoder\n \"\"\"\n assert isinstance(labels, pd.Series), \"Labels need to be series\"\n if isinstance(labels[0], six.string_types):\n # In case of using non processed strings, e.g., Vaccum, Speech\n label_array = labels.str.split(',').values.tolist()\n elif isinstance(labels[0], np.ndarray):\n # Encoder does not like to see numpy array\n label_array = [lab.tolist() for lab in labels]\n elif isinstance(labels[0], collections.Iterable):\n label_array = labels\n encoder = pre.MultiLabelBinarizer(sparse_output=sparse)\n encoder.fit(label_array)\n return encoder\n\n\ndef encode_labels(labels: pd.Series, encoder=None, sparse=True):\n \"\"\"encode_labels\n\n Encodes labels\n\n :param labels: pd.Series representing the raw labels e.g., Speech, Water\n :param encoder (optional): Encoder already fitted \n returns encoded labels (many hot) and the encoder\n \"\"\"\n assert isinstance(labels, pd.Series), \"Labels need to be series\"\n instance = labels.iloc[0]\n if isinstance(instance, six.string_types):\n # In case of using non processed strings, e.g., Vaccum, Speech\n label_array = labels.str.split(',').values.tolist()\n elif isinstance(instance, np.ndarray):\n # Encoder does not like to see numpy array\n label_array = [lab.tolist() for lab in labels]\n elif isinstance(instance, collections.Iterable):\n label_array = labels\n if not encoder:\n encoder = pre.MultiLabelBinarizer(sparse_output=sparse)\n encoder.fit(label_array)\n labels_encoded = encoder.transform(label_array)\n return labels_encoded, encoder\n\n # return pd.arrays.SparseArray(\n # [row.toarray().ravel() for row in labels_encoded]), encoder\n\n\ndef decode_with_timestamps(encoder: pre.MultiLabelBinarizer, labels: np.array):\n \"\"\"decode_with_timestamps\n Decodes the predicted label array (2d) into a list of\n [(Labelname, onset, offset), ...]\n\n :param encoder: Encoder during training\n :type encoder: pre.MultiLabelBinarizer\n :param labels: n-dim array\n :type labels: np.array\n \"\"\"\n if labels.ndim == 3:\n return [_decode_with_timestamps(encoder, lab) for lab in labels]\n else:\n return _decode_with_timestamps(encoder, labels)\n\n\ndef sma_filter(x, window_size, axis=1):\n \"\"\"sma_filter\n\n :param x: Input numpy array,\n :param window_size: filter size\n :param axis: over which axis ( usually time ) to apply\n \"\"\"\n # 1 is time axis\n kernel = np.ones((window_size, )) / window_size\n\n def moving_average(arr):\n return np.convolve(arr, kernel, 'same')\n\n return np.apply_along_axis(moving_average, axis, x)\n\n\ndef median_filter(x, window_size, threshold=0.5):\n \"\"\"median_filter\n\n :param x: input prediction array of shape (B, T, C) or (B, T).\n Input is a sequence of probabilities 0 <= x <= 1\n :param window_size: An integer to use \n :param threshold: Binary thresholding threshold\n \"\"\"\n x = binarize(x, threshold=threshold)\n if x.ndim == 3:\n size = (1, window_size, 1)\n elif x.ndim == 2 and x.shape[0] == 1:\n # Assume input is class-specific median filtering\n # E.g, Batch x Time [1, 501]\n size = (1, window_size)\n elif x.ndim == 2 and x.shape[0] > 1:\n # Assume input is standard median pooling, class-independent\n # E.g., Time x Class [501, 10]\n size = (window_size, 1)\n return scipy.ndimage.median_filter(x, size=size)\n\n\ndef _decode_with_timestamps(encoder, labels):\n result_labels = []\n for i, label_column in enumerate(labels.T):\n change_indices = find_contiguous_regions(label_column)\n # append [onset, offset] in the result list\n for row in change_indices:\n result_labels.append((encoder.classes_[i], row[0], row[1]))\n return result_labels\n\n\ndef inverse_transform_labels(encoder, pred):\n if pred.ndim == 3:\n return [encoder.inverse_transform(x) for x in pred]\n else:\n return encoder.inverse_transform(pred)\n\n\ndef binarize(pred, threshold=0.5):\n # Batch_wise\n if pred.ndim == 3:\n return np.array(\n [pre.binarize(sub, threshold=threshold) for sub in pred])\n else:\n return pre.binarize(pred, threshold=threshold)\n\n\ndef double_threshold(x, high_thres, low_thres, n_connect=1):\n \"\"\"double_threshold\n Helper function to calculate double threshold for n-dim arrays\n\n :param x: input array\n :param high_thres: high threshold value\n :param low_thres: Low threshold value\n :param n_connect: Distance of <= n clusters will be merged\n \"\"\"\n assert x.ndim <= 3, \"Whoops something went wrong with the input ({}), check if its <= 3 dims\".format(\n x.shape)\n if x.ndim == 3:\n apply_dim = 1\n elif x.ndim < 3:\n apply_dim = 0\n # x is assumed to be 3d: (batch, time, dim)\n # Assumed to be 2d : (time, dim)\n # Assumed to be 1d : (time)\n # time axis is therefore at 1 for 3d and 0 for 2d (\n return np.apply_along_axis(lambda x: _double_threshold(\n x, high_thres, low_thres, n_connect=n_connect),\n axis=apply_dim,\n arr=x)\n\n\ndef _double_threshold(x, high_thres, low_thres, n_connect=1, return_arr=True):\n \"\"\"_double_threshold\n Computes a double threshold over the input array\n\n :param x: input array, needs to be 1d\n :param high_thres: High threshold over the array\n :param low_thres: Low threshold over the array\n :param n_connect: Postprocessing, maximal distance between clusters to connect\n :param return_arr: By default this function returns the filtered indiced, but if return_arr = True it returns an array of tsame size as x filled with ones and zeros.\n \"\"\"\n assert x.ndim == 1, \"Input needs to be 1d\"\n high_locations = np.where(x > high_thres)[0]\n locations = x > low_thres\n encoded_pairs = find_contiguous_regions(locations)\n\n filtered_list = list(\n filter(\n lambda pair:\n ((pair[0] <= high_locations) & (high_locations <= pair[1])).any(),\n encoded_pairs))\n\n filtered_list = connect_(filtered_list, n_connect)\n if return_arr:\n zero_one_arr = np.zeros_like(x, dtype=int)\n for sl in filtered_list:\n zero_one_arr[sl[0]:sl[1]] = 1\n return zero_one_arr\n return filtered_list\n\n\ndef connect_clusters(x, n=1):\n if x.ndim == 1:\n return connect_clusters_(x, n)\n if x.ndim >= 2:\n return np.apply_along_axis(lambda a: connect_clusters_(a, n=n), -2, x)\n\n\ndef connect_clusters_(x, n=1):\n \"\"\"connect_clusters_\n Connects clustered predictions (0,1) in x with range n\n\n :param x: Input array. zero-one format\n :param n: Number of frames to skip until connection can be made\n \"\"\"\n assert x.ndim == 1, \"input needs to be 1d\"\n reg = find_contiguous_regions(x)\n start_end = connect_(reg, n=n)\n zero_one_arr = np.zeros_like(x, dtype=int)\n for sl in start_end:\n zero_one_arr[sl[0]:sl[1]] = 1\n return zero_one_arr\n\n\ndef connect_(pairs, n=1):\n \"\"\"connect_\n Connects two adjacent clusters if their distance is <= n\n\n :param pairs: Clusters of iterateables e.g., [(1,5),(7,10)]\n :param n: distance between two clusters \n \"\"\"\n if len(pairs) == 0:\n return []\n start_, end_ = pairs[0]\n new_pairs = []\n for i, (next_item, cur_item) in enumerate(zip(pairs[1:], pairs[0:])):\n end_ = next_item[1]\n if next_item[0] - cur_item[1] <= n:\n pass\n else:\n new_pairs.append((start_, cur_item[1]))\n start_ = next_item[0]\n new_pairs.append((start_, end_))\n return new_pairs\n\n\ndef predictions_to_time(df, ratio):\n df.onset = df.onset * ratio\n df.offset = df.offset * ratio\n return df\n\n\ndef estimate_scaler(dataloader, **scaler_args):\n\n scaler = pre.StandardScaler(**scaler_args)\n with tqdm.tqdm(total=len(dataloader),\n unit='batch',\n leave=False,\n desc='Estimating Scaler') as pbar:\n for batch in dataloader:\n feature = batch[0]\n # Flatten time and batch dim to one\n feature = feature.reshape(-1, feature.shape[-1])\n pbar.set_postfix(feature=feature.shape)\n pbar.update()\n scaler.partial_fit(feature)\n return scaler\n\n\ndef rescale_0_1(x):\n if x.ndim == 2:\n return pre.minmax_scale(x, axis=0)\n else:\n\n def min_max_scale(a):\n return pre.minmax_scale(a, axis=0)\n\ndef df_to_dict(df, index='filename', value='hdf5path'):\n return dict(zip(df[index],df[value]))\n" ]
[ [ "numpy.convolve", "torch.nn.Sequential", "numpy.logical_xor", "sklearn.preprocessing.minmax_scale", "scipy.ndimage.median_filter", "sklearn.preprocessing.MultiLabelBinarizer", "numpy.ones", "numpy.apply_along_axis", "numpy.zeros_like", "numpy.random.permutation", "sklearn.preprocessing.binarize", "sklearn.preprocessing.StandardScaler", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
benJephunneh/opticspy
[ "a0b841f60f7c053b05444c0e8886cd4a99c4d082" ]
[ "opticspy/test/PSF.py" ]
[ "import numpy as np\nfrom numpy import sqrt as sqrt\nfrom numpy import cos as cos\nfrom numpy import sin as sin\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm as cm\nfrom matplotlib.ticker import LinearLocator as LinearLocator\nfrom matplotlib.ticker import FormatStrFormatter as FormatStrFormatter\nfrom numpy.fft import fftshift as fftshift\nfrom numpy.fft import ifftshift as ifftshift\nfrom numpy.fft import fft2 as fft2\n\ndef apershow(obj):\n\tobj = -abs(obj)\n\tplt.imshow(obj)\n\tplt.set_cmap('Greys')\n\tplt.show()\n\nl1 = 100\n#Generate test surface matrix from a detector\nx = np.linspace(-1, 1, l1)\ny = np.linspace(-1, 1, l1)\n[X,Y] = np.meshgrid(x,y)\nr = sqrt(X**2+Y**2)\nZ = sqrt(14)*(8*X**4-8*X**2*r**2+r**4)*(6*r**2-5)\nfor i in range(len(Z)):\n\tfor j in range(len(Z)):\n\t\tif x[i]**2+y[j]**2>1:\n\t\t\tZ[i][j]=0\n\nfig = plt.figure(1)\nax = fig.gca(projection='3d')\nsurf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.RdYlGn,\n linewidth=0, antialiased=False, alpha = 0.6)\n\nv = max(abs(Z.max()),abs(Z.min()))\nax.set_zlim(-v*5, v*5)\ncset = ax.contourf(X, Y, Z, zdir='z', offset=-v*5, cmap=cm.RdYlGn)\nax.zaxis.set_major_locator(LinearLocator(10))\nax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\nfig.colorbar(surf, shrink=1, aspect=30)\nplt.show()\n\nd = 400\nA = np.zeros([d,d])\nA[d/2-49:d/2+51,d/2-49:d/2+51] = Z\nplt.imshow(A)\nplt.show()\n\nabbe = np.exp(1j*2*np.pi*A)\nfor i in range(len(abbe)):\n\tfor j in range(len(abbe)):\n\t\tif abbe[i][j]==1:\n\t\t\tabbe[i][j]=0\nfig = plt.figure(2)\nAP = abs(fftshift(fft2(fftshift(abbe))))**2\nAP = AP/AP.max()\nplt.imshow(AP)\nplt.show()" ]
[ [ "matplotlib.pyplot.imshow", "numpy.sqrt", "numpy.meshgrid", "numpy.linspace", "numpy.fft.fftshift", "matplotlib.pyplot.set_cmap", "matplotlib.ticker.LinearLocator", "numpy.exp", "matplotlib.ticker.FormatStrFormatter", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yangzhou95/learn-tensorflow
[ "6def45659ebf7aa9f74f1efe1ff6465b8db5ee93" ]
[ "read_csv.py" ]
[ "# read csv to model\nimport tensorflow as tf\nimport numpy as np\nimport os\n\n\ndef read_csv(batch_size, file_name, record_defaults=1):\n fileName_queue=tf.train.string_input_producer(os.path.dirname(__file__)+\"/\"+file_name)\n reader = tf.TextLineReader(skip_header_lines=1)\n key, value=reader.read(fileName_queue,name='read_op')\n\n # decode_csv will convert a Tensor from type string (the text line) in\n # a tuple of tensor columns with the specified defaults, which also\n # sets teh data type for each column\n decoded=tf.decode_csv(records=value)\n\n # batch actually reads the file and loads \"batch size\" rows in a single tensor\n return tf.train.shuffle_batch(decoded, batch_size=batch_size, capacity=batch_size* 50, min_after_dequeue=batch_size)\n\n\ndef inputs():\n passenger_id, survived, pclass, name, sex, age, sibsp, parch, ticket, fare, cabin, embarked =\\\n read_csv(100,\"./data/train.csv\",)" ]
[ [ "tensorflow.TextLineReader", "tensorflow.train.shuffle_batch", "tensorflow.decode_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
HusseinLezzaik/Consensus-Algorithm-for-2-Mobile-Robots
[ "ecdb28793cc1f5fa6cded752908105ec37e9bfc7", "ecdb28793cc1f5fa6cded752908105ec37e9bfc7", "ecdb28793cc1f5fa6cded752908105ec37e9bfc7", "ecdb28793cc1f5fa6cded752908105ec37e9bfc7" ]
[ "Real Topology Graph/GNN Model 2/Fully Connected Graph/test_n3_cyclic.py", "Real Topology Graph/GNN Model 1/Fully Connected Graph/Main_MLP_line.py", "Real Topology Graph/GNN Model 2/Cyclic Graph/test_n2_robot3.py", "Real Topology Graph/GNN Model 4/Fully Connected Graph/test_n2_robots34.py" ]
[ "\"\"\"\n\nConsensus Algorithm for 3 Mobile robots using MLP Model Cyclic Graph Implementation\n\nScene: Robot 1, Robot 2, Robot 3\n\nInputs: Mx, My, Phix, Phiy\nOutputs: Ux, Uy\n\n\"\"\"\nimport torch\nimport MLP_Model\nimport math\nimport numpy as np\nimport rclpy\nfrom rclpy.node import Node\nfrom tf2_msgs.msg import TFMessage\nfrom std_msgs.msg import Float32\nimport time\n\nL = 1\nd = 0.5\n\n# load model using dict\nFILE = \"model.pth\"\nloaded_model = MLP_Model.MLP()\nloaded_model.load_state_dict(torch.load(FILE))\nloaded_model.eval()\n\ndef euler_from_quaternion(x, y, z, w):\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return yaw_z # in radians\n \n \nclass MinimalPublisher(Node):\n\n def __init__(self):\n super().__init__('minimal_publisher1')\n self.publisher_l1 = self.create_publisher(Float32, '/leftMotorSpeedrobot1', 0) #Change according to topic in child script,String to Float32\n self.publisher_r1 = self.create_publisher(Float32, '/rightMotorSpeedrobot1',0) #Change according to topic in child script,String to Float32\n self.publisher_l2 = self.create_publisher(Float32, '/leftMotorSpeedrobot2', 0) #Change according to topic in child script,String to Float32\n self.publisher_r2 = self.create_publisher(Float32, '/rightMotorSpeedrobot2',0) #Change according to topic in child script,String to Float32\n self.publisher_l3 = self.create_publisher(Float32, '/leftMotorSpeedrobot3', 0) #Change according to topic in child script,String to Float32\n self.publisher_r3 = self.create_publisher(Float32, '/rightMotorSpeedrobot3',0) #Change according to topic in child script,String to Float32 \n self.subscription = self.create_subscription(\n TFMessage,\n '/tf',\n self.listener_callback,\n 0)\n\n \" Timer Callback \"\n \n #self.publisher_ = self.create_publisher(Float32(), 'topic', 10)\n timer_period = 0.01 # seconds\n self.timer = self.create_timer(timer_period, self.timer_callback)\n self.i = 0\n \n \" Parameters \"\n self.t = 0 # Just to intialized Phix's and Phiy's\n \n \" Initialize Phi's\"\n self.Phix1 = 0 # 1x1\n self.Phiy1 = 0 # 1x1\n self.Phix2 = 0 # 1x1\n self.Phiy2 = 0 # 1x1\n self.Phix3 = 0 # 1x1\n self.Phiy3 = 0 # 1x1 \n \n \" Mobile Robot 1 Parameters \"\n self.x1 = 0\n self.y1 = 0\n self.Theta1 = 0\n self.v1 = 0\n self.w1 = 0\n self.vL1 = 0\n self.vR1 = 0\n \n \" Mobile Robot 2 Parameters \"\n self.x2 = 0\n self.y2 = 0\n self.Theta2 = 0\n self.v2 = 0\n self.w2 = 0\n self.vL2 = 0 \n self.vR2 = 0\n\n \" Mobile Robot 3 Parameters \"\n self.x3 = 0\n self.y3 = 0\n self.Theta3 = 0\n self.v3 = 0\n self.w3 = 0\n self.vL3 =0 \n self.vR3 = 0\n \n def timer_callback(self):\n \n \" Calculate Mx1, My1, ...... Mx6, My6 \" \n # Initialize Phi's\n if self.t ==0:\n self.Phix1 = 0 # 1x1\n self.Phiy1 = 0 # 1x1\n self.Phix2 = 0 # 1x1\n self.Phiy2 = 0 # 1x1\n self.Phix3 = 0 # 1x1\n self.Phiy3 = 0 # 1x1\n self.t += 1 \n \n Mx1 = ( (self.x2 - self.x1) + (self.x3 - self.x1) )/2\n My1 = ( (self.y2 - self.y1) + (self.y3 - self.y1) )/2\n \n Mx2 = ( (self.x1 - self.x2) + (self.x3 - self.x2) )/2\n My2 = ( (self.y1 - self.y2) + (self.y3 - self.y2) )/2 \n\n Mx3 = ( (self.x2 - self.x3) + (self.x1 - self.x3) )/2\n My3 = ( (self.y2 - self.y3) + (self.y1 - self.y3) )/2 \n \n \" Use MLP to Predict control inputs \"\n \n relative_pose_1 = [ Mx1, My1, self.Phix1, self.Phiy1 ] # tensor data for MLP model\n relative_pose_2 = [ Mx2, My2, self.Phix2, self.Phiy2 ] # tensor data for MLP model\n relative_pose_3 = [ Mx3, My3, self.Phix3, self.Phiy3 ] # tensor data for MLP model\n\n\n u1_predicted = MLP_Model.predict(relative_pose_1, loaded_model) # predict control input u1, tensor\n u2_predicted = MLP_Model.predict(relative_pose_2, loaded_model) # predict control input u2, tensor\n u3_predicted = MLP_Model.predict(relative_pose_3, loaded_model) # predict control input u3, tensor \n\n self.Phix1 = ( u2_predicted[0][0] + u3_predicted[0][0] )/2 # 1x1\n self.Phiy1 = ( u2_predicted[0][1] + u3_predicted[0][1] )/2 # 1x1\n \n self.Phix2 = ( u1_predicted[0][0] + u3_predicted[0][0] )/2 # 1x1\n self.Phiy2 = ( u1_predicted[0][1] + u3_predicted[0][1] )/2 # 1x1\n \n self.Phix3 = ( u2_predicted[0][0] + u1_predicted[0][0] )/2 # 1x1\n self.Phiy3 = ( u2_predicted[0][1] + u1_predicted[0][1] )/2 # 1x1 \n \n u1_predicted_np = np.array([[ u1_predicted[0][0] ], [ u1_predicted[0][1] ]]) # from tensor to numpy array for calculation\n u2_predicted_np = np.array([[ u2_predicted[0][0] ], [ u2_predicted[0][1] ]]) # from tensor to numpy array for calculation\n u3_predicted_np = np.array([[ u3_predicted[0][0] ], [ u3_predicted[0][1] ]]) # from tensor to numpy array for calculation\n\n \" Calculate V1/W1, V2/W2, V3/W3, V4/W4, V5/W5, V6/W6 \"\n \n S1 = np.array([[self.v1], [self.w1]]) #2x1\n G1 = np.array([[1,0], [0,1/L]]) #2x2\n R1 = np.array([[math.cos(self.Theta1),math.sin(self.Theta1)],[-math.sin(self.Theta1),math.cos(self.Theta1)]]) #2x2\n S1 = np.dot(np.dot(G1, R1), u1_predicted_np) #2x1\n \n S2 = np.array([[self.v2], [self.w2]]) #2x1\n G2 = np.array([[1,0], [0,1/L]]) #2x2\n R2 = np.array([[math.cos(self.Theta2),math.sin(self.Theta2)],[-math.sin(self.Theta2),math.cos(self.Theta2)]]) #2x2\n S2 = np.dot(np.dot(G2, R2), u2_predicted_np) # 2x1\n \n S3 = np.array([[self.v3], [self.w3]]) #2x1\n G3 = np.array([[1,0], [0,1/L]]) #2x2\n R3 = np.array([[math.cos(self.Theta3),math.sin(self.Theta3)],[-math.sin(self.Theta3),math.cos(self.Theta3)]]) #2x2\n S3 = np.dot(np.dot(G3, R3), u3_predicted_np) # 2x1 \n \n \" Calculate VL1/VR1, VL2/VR2, VL3/VR3, VL4/VR4, VL5/VR5, VL6/VR6 \"\n \n D = np.array([[1/2,1/2],[-1/(2*d),1/(2*d)]]) #2x2\n Di = np.linalg.inv(D) #2x2\n\n Speed_L1 = np.array([[self.vL1], [self.vR1]]) # Vector 2x1 for Speed of Robot 1\n Speed_L2 = np.array([[self.vL2], [self.vR2]]) # Vector 2x1 for Speed of Robot 2\n Speed_L3 = np.array([[self.vL3], [self.vR3]]) # Vector 2x1 for Speed of Robot 3\n\n M1 = np.array([[S1[0]],[S1[1]]]).reshape(2,1) #2x1\n M2 = np.array([[S2[0]],[S2[1]]]).reshape(2,1) #2x1\n M3 = np.array([[S3[0]],[S3[1]]]).reshape(2,1) #2x1\n\n Speed_L1 = np.dot(Di, M1) # 2x1 (VL1, VR1)\n Speed_L2 = np.dot(Di, M2) # 2x1 (VL2, VR2)\n Speed_L3 = np.dot(Di, M3) # 2x1 (VL1, VR1)\n\n VL1 = float(Speed_L1[0])\n VR1 = float(Speed_L1[1])\n VL2 = float(Speed_L2[0])\n VR2 = float(Speed_L2[1])\n VL3 = float(Speed_L3[0])\n VR3 = float(Speed_L3[1]) \n \n \" Publish Speed Commands to Robot 1 \"\n \n msgl1 = Float32() \n msgr1 = Float32()\n msgl1.data = VL1\n msgr1.data = VR1\n self.publisher_l1.publish(msgl1)\n self.publisher_r1.publish(msgr1)\n\n \" Publish Speed Commands to Robot 2 \"\n \n msgl2 = Float32()\n msgr2 = Float32()\n msgl2.data = VL2\n msgr2.data = VR2\n self.publisher_l2.publish(msgl2)\n self.publisher_r2.publish(msgr2) \n \n \" Publish Speed Commands to Robot 3 \"\n \n msgl3 = Float32()\n msgr3 = Float32()\n msgl3.data = VL3\n msgr3.data = VR3\n self.publisher_l3.publish(msgl3)\n self.publisher_r3.publish(msgr3) \n \n self.i += 1\n \n def listener_callback(self, msg):\n \n if msg.transforms[0].child_frame_id == 'robot1' : \n self.x1 = msg.transforms[0].transform.translation.x\n self.y1 = msg.transforms[0].transform.translation.y\n self.xr1 = msg.transforms[0].transform.rotation.x\n self.yr1 = msg.transforms[0].transform.rotation.y\n self.zr1 = msg.transforms[0].transform.rotation.z\n self.wr1 = msg.transforms[0].transform.rotation.w\n self.Theta1 = euler_from_quaternion(self.xr1,self.yr1,self.zr1,self.wr1)\n \n if msg.transforms[0].child_frame_id == 'robot2' :\n self.x2 = msg.transforms[0].transform.translation.x\n self.y2 = msg.transforms[0].transform.translation.y\n self.xr2 = msg.transforms[0].transform.rotation.x\n self.yr2 = msg.transforms[0].transform.rotation.y\n self.zr2 = msg.transforms[0].transform.rotation.z\n self.wr2 = msg.transforms[0].transform.rotation.w\n self.Theta2 = euler_from_quaternion(self.xr2,self.yr2,self.zr2,self.wr2)\n\n if msg.transforms[0].child_frame_id == 'robot3' : \n self.x3 = msg.transforms[0].transform.translation.x\n self.y3 = msg.transforms[0].transform.translation.y\n self.xr3 = msg.transforms[0].transform.rotation.x\n self.yr3 = msg.transforms[0].transform.rotation.y\n self.zr3 = msg.transforms[0].transform.rotation.z\n self.wr3 = msg.transforms[0].transform.rotation.w\n self.Theta3 = euler_from_quaternion(self.xr3,self.yr3,self.zr3,self.wr3) \n \n \ndef main(args=None):\n rclpy.init(args=args)\n minimal_publisher = MinimalPublisher()\n time.sleep(5)\n rclpy.spin(minimal_publisher)\n minimal_publisher.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n \n\n", "\"\"\"\n\nConsensus Algorithm for 6 Mobile robots using MLP Model for Line Graph Implementation\n\nInputs: Mx, My\nOutputs: Ux, Uy\n\n\"\"\"\nimport torch\nimport MLP_Model\nimport math\nimport numpy as np\nimport rclpy\nfrom rclpy.node import Node\nfrom tf2_msgs.msg import TFMessage\nfrom std_msgs.msg import Float32\nimport time\n\nL = 1\nd = 0.5\n#distance = 2\n\nA = np.ones(6) - np.identity(6) # Adjancency Matrix fully connected case 6x6\n\nux = np.zeros((6,1)) # 6x1 controller vector\nuy = np.zeros((6,1)) # 6x1 controller vector\n\n# load model using dict\nFILE = \"model.pth\"\nloaded_model = MLP_Model.MLP()\nloaded_model.load_state_dict(torch.load(FILE))\nloaded_model.eval()\n\n\ndef euler_from_quaternion(x, y, z, w):\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return yaw_z # in radians\n\n\nclass MinimalPublisher(Node):\n\n def __init__(self):\n super().__init__('minimal_publisher1')\n self.publisher_l1 = self.create_publisher(Float32, '/leftMotorSpeedrobot1', 0) #Change according to topic in child script,String to Float32\n self.publisher_r1 = self.create_publisher(Float32, '/rightMotorSpeedrobot1', 0) #Change according to topic in child script,String to Float32\n self.publisher_l2 = self.create_publisher(Float32, '/leftMotorSpeedrobot2', 0) #Change according to topic in child script,String to Float32\n self.publisher_r2 = self.create_publisher(Float32, '/rightMotorSpeedrobot2', 0) #Change according to topic in child script,String to Float32\n self.publisher_l3 = self.create_publisher(Float32, '/leftMotorSpeedrobot3', 0) #Change according to topic in child script,String to Float32\n self.publisher_r3 = self.create_publisher(Float32, '/rightMotorSpeedrobot3', 0) #Change according to topic in child script,String to Float32\n self.publisher_l4 = self.create_publisher(Float32, '/leftMotorSpeedrobot4', 0) #Change according to topic in child script,String to Float32\n self.publisher_r4 = self.create_publisher(Float32, '/rightMotorSpeedrobot4', 0) #Change according to topic in child script,String to Float32\n self.publisher_l5 = self.create_publisher(Float32, '/leftMotorSpeedrobot5', 0) #Change according to topic in child script,String to Float32\n self.publisher_r5 = self.create_publisher(Float32, '/rightMotorSpeedrobot5', 0) #Change according to topic in child script,String to Float32\n self.publisher_l6 = self.create_publisher(Float32, '/leftMotorSpeedrobot6', 0) #Change according to topic in child script,String to Float32\n self.publisher_r6 = self.create_publisher(Float32, '/rightMotorSpeedrobot6', 0) #Change according to topic in child script,String to Float32 \n self.subscription = self.create_subscription(\n TFMessage,\n '/tf',\n self.listener_callback,\n 0)\n\n \" Timer Callback \"\n #self.publisher_ = self.create_publisher(Float32(), 'topic', 10)\n timer_period = 0.01 # seconds\n self.timer = self.create_timer(timer_period, self.timer_callback)\n self.i = 0 \n\n \"Parameters \"\n self.k = 1 # Control Gain\n self.scene = 0 # Nb of scene iteration\n \n \" Mobile Robot 1 Parameters \"\n self.x1 = 0\n self.y1 = 0\n self.Theta1 = 0\n self.v1 = 0\n self.w1 = 0\n self.vL1 = 0\n self.vR1 = 0\n \n \" Mobile Robot 1 Parameters \"\n self.x2 = 0\n self.y2 = 0\n self.Theta2 = 0\n self.v2 = 0\n self.w2 = 0\n self.vL2 = 0 \n self.vR2 = 0\n \n \" Mobile Robot 3 Parameters \"\n self.x3 = 0\n self.y3 = 0\n self.Theta3 = 0\n self.v3 = 0\n self.w3 = 0\n self.vL3 = 0\n self.vR3 = 0 \n \n \" Mobile Robot 4 Parameters \"\n self.x4 = 0\n self.y4 = 0\n self.Theta4 = 0\n self.v4 = 0\n self.w4 = 0\n self.vL4 = 0\n self.vR4 = 0 \n \n \" Mobile Robot 5 Parameters \"\n self.x5 = 0\n self.y5 = 0\n self.Theta5 = 0\n self.v5 = 0\n self.w5 = 0\n self.vL5 = 0\n self.vR5 = 0\n\n \" Mobile Robot 6 Parameters \"\n self.x6 = 0\n self.y6 = 0\n self.Theta6 = 0\n self.v6 = 0\n self.w6 = 0\n self.vL6 = 0\n self.vR6 = 0\n \n def timer_callback(self):\n \n \" Calculate Mx1, My1, ...... Mx6, My6 \"\n \n Mx1 = self.x2 - self.x1 # 1x1\n My1 = self.y2 - self.y1 # 1x1\n \n Mx2 = ( ( self.x1 - self.x2 ) + ( self.x3 - self.x2 ) ) / 2 # 1x1\n My2 = ( ( self.y1 - self.y2 ) + ( self.y3 - self.y2 ) ) / 2 # 1x1 \n\n Mx3 = ( ( self.x2 - self.x3 ) + ( self.x4 - self.x3 ) ) / 2 # 1x1\n My3 = ( ( self.y2 - self.y3 ) + ( self.y4 - self.y3 ) ) / 2 # 1x1 \n \n Mx4 = ( ( self.x3 - self.x4 ) + ( self.x5 - self.x4 ) ) / 2 # 1x1\n My4 = ( ( self.y4 - self.y4 ) + ( self.y5 - self.y4 ) ) / 2 # 1x1 \n\n Mx5 = ( ( self.x4 - self.x5 ) + ( self.x6 - self.x5 ) ) / 2 # 1x1\n My5 = ( ( self.y4 - self.y5 ) + ( self.y6 - self.y5 ) ) / 2 # 1x1 \n \n Mx6 = self.x5 - self.x6 # 1x1\n My6 = self.y5 - self.y6 # 1x1 \n\n \" Use MLP to Predict control inputs \"\n \n relative_pose_1 = [ Mx1, My1 ] # tensor data for MLP model\n relative_pose_2 = [ Mx2, My2 ] # tensor data for MLP model\n relative_pose_3 = [ Mx3, My3 ] # tensor data for MLP model\n relative_pose_4 = [ Mx4, My4 ] # tensor data for MLP model\n relative_pose_5 = [ Mx5, My5 ] # tensor data for MLP model\n relative_pose_6 = [ Mx6, My6 ] # tensor data for MLP model\n \n u1_predicted = MLP_Model.predict(relative_pose_1, loaded_model) # predict control input u1, tensor\n u2_predicted = MLP_Model.predict(relative_pose_2, loaded_model) # predict control input u2, tensor\n u3_predicted = MLP_Model.predict(relative_pose_3, loaded_model) # predict control input u3, tensor\n u4_predicted = MLP_Model.predict(relative_pose_4, loaded_model) # predict control input u4, tensor\n u5_predicted = MLP_Model.predict(relative_pose_5, loaded_model) # predict control input u5, tensor\n u6_predicted = MLP_Model.predict(relative_pose_6, loaded_model) # predict control input u6, tensor\n \n u1_predicted_np = np.array([[ u1_predicted[0][0] ], [ u1_predicted[0][1] ]]) # from tensor to numpy array for calculation\n u2_predicted_np = np.array([[ u2_predicted[0][0] ], [ u2_predicted[0][1] ]]) # from tensor to numpy array for calculation\n u3_predicted_np = np.array([[ u3_predicted[0][0] ], [ u3_predicted[0][1] ]]) # from tensor to numpy array for calculation\n u4_predicted_np = np.array([[ u4_predicted[0][0] ], [ u4_predicted[0][1] ]]) # from tensor to numpy array for calculation \n u5_predicted_np = np.array([[ u5_predicted[0][0] ], [ u5_predicted[0][1] ]]) # from tensor to numpy array for calculation\n u6_predicted_np = np.array([[ u6_predicted[0][0] ], [ u6_predicted[0][1] ]]) # from tensor to numpy array for calculation\n \n \" Calculate V1/W1, V2/W2, V3/W3, V4/W4, V5/W5, V6/W6 \"\n \n S1 = np.array([[self.v1], [self.w1]]) #2x1\n G1 = np.array([[1,0], [0,1/L]]) #2x2\n R1 = np.array([[math.cos(self.Theta1),math.sin(self.Theta1)],[-math.sin(self.Theta1),math.cos(self.Theta1)]]) #2x2\n S1 = np.dot(np.dot(G1, R1), u1_predicted_np) #2x1\n \n S2 = np.array([[self.v2], [self.w2]]) #2x1\n G2 = np.array([[1,0], [0,1/L]]) #2x2\n R2 = np.array([[math.cos(self.Theta2),math.sin(self.Theta2)],[-math.sin(self.Theta2),math.cos(self.Theta2)]]) #2x2\n S2 = np.dot(np.dot(G2, R2), u2_predicted_np) # 2x1\n\n S3 = np.array([[self.v3], [self.w3]]) #2x1\n G3 = np.array([[1,0], [0,1/L]]) #2x2\n R3 = np.array([[math.cos(self.Theta3),math.sin(self.Theta3)],[-math.sin(self.Theta3),math.cos(self.Theta3)]]) #2x2\n S3 = np.dot(np.dot(G3, R3), u3_predicted_np) #2x1 \n\n S4 = np.array([[self.v4], [self.w4]]) #2x1\n G4 = np.array([[1,0], [0,1/L]]) #2x2\n R4 = np.array([[math.cos(self.Theta4),math.sin(self.Theta4)],[-math.sin(self.Theta4),math.cos(self.Theta4)]]) #2x2\n S4 = np.dot(np.dot(G4, R4), u4_predicted_np) #2x1 \n\n S5 = np.array([[self.v5], [self.w5]]) #2x1\n G5 = np.array([[1,0], [0,1/L]]) #2x2\n R5 = np.array([[math.cos(self.Theta5),math.sin(self.Theta5)],[-math.sin(self.Theta5),math.cos(self.Theta5)]]) #2x2\n S5 = np.dot(np.dot(G5, R5), u5_predicted_np) #2x1\n\n S6 = np.array([[self.v6], [self.w6]]) #2x1\n G6 = np.array([[1,0], [0,1/L]]) #2x2\n R6 = np.array([[math.cos(self.Theta6),math.sin(self.Theta6)],[-math.sin(self.Theta6),math.cos(self.Theta6)]]) #2x2\n S6 = np.dot(np.dot(G6, R6), u6_predicted_np) #2x1 \n \n \" Calculate VL1/VR1, VL2/VR2, VL3/VR3, VL4/VR4, VL5/VR5, VL6/VR6 \"\n \n D = np.array([[1/2,1/2],[-1/(2*d),1/(2*d)]]) #2x2\n Di = np.linalg.inv(D) #2x2\n\n Speed_L1 = np.array([[self.vL1], [self.vR1]]) # Vector 2x1 for Speed of Robot 1\n Speed_L2 = np.array([[self.vL2], [self.vR2]]) # Vector 2x1 for Speed of Robot 2 \n Speed_L3 = np.array([[self.vL3], [self.vR3]]) # Vector 2x1 for Speed of Robot 3\n Speed_L4 = np.array([[self.vL4], [self.vR4]]) # Vector 2x1 for Speed of Robot 4\n Speed_L5 = np.array([[self.vL5], [self.vR5]]) # Vector 2x1 for Speed of Robot 5\n Speed_L6 = np.array([[self.vL6], [self.vR6]]) # Vector 2x1 for Speed of Robot 6\n\n M1 = np.array([[S1[0]],[S1[1]]]).reshape(2,1) #2x1\n M2 = np.array([[S2[0]],[S2[1]]]).reshape(2,1) #2x1\n M3 = np.array([[S3[0]],[S3[1]]]).reshape(2,1) #2x1\n M4 = np.array([[S4[0]],[S4[1]]]).reshape(2,1) #2x1\n M5 = np.array([[S5[0]],[S5[1]]]).reshape(2,1) #2x1\n M6 = np.array([[S6[0]],[S6[1]]]).reshape(2,1) #2x1\n \n Speed_L1 = np.dot(Di, M1) # 2x1 (VL1, VR1)\n Speed_L2 = np.dot(Di, M2) # 2x1 (VL2, VR2)\n Speed_L3 = np.dot(Di, M3) # 2x1 (VL3, VR3)\n Speed_L4 = np.dot(Di, M4) # 2x1 (VL4, VR4)\n Speed_L5 = np.dot(Di, M5) # 2x1 (VL5, VR5)\n Speed_L6 = np.dot(Di, M6) # 2x1 (VL6, VR6)\n\n VL1 = float(Speed_L1[0])\n VR1 = float(Speed_L1[1])\n VL2 = float(Speed_L2[0])\n VR2 = float(Speed_L2[1])\n VL3 = float(Speed_L3[0])\n VR3 = float(Speed_L3[1])\n VL4 = float(Speed_L4[0])\n VR4 = float(Speed_L4[1])\n VL5 = float(Speed_L5[0])\n VR5 = float(Speed_L5[1]) \n VL6 = float(Speed_L6[0])\n VR6 = float(Speed_L6[1])\n \n \" Publish Speed Commands to Robot 1 \"\n \n msgl1 = Float32() \n msgr1 = Float32()\n msgl1.data = VL1\n msgr1.data = VR1\n self.publisher_l1.publish(msgl1)\n self.publisher_r1.publish(msgr1)\n #self.get_logger().info('Publishing R1: \"%s\"' % msgr1.data)\n\n\n \" Publish Speed Commands to Robot 2 \"\n \n msgl2 = Float32()\n msgr2 = Float32()\n msgl2.data = VL2\n msgr2.data = VR2\n self.publisher_l2.publish(msgl2)\n self.publisher_r2.publish(msgr2)\n\n \" Publish Speed Commands to Robot 3 \"\n \n msgl3 = Float32()\n msgr3 = Float32()\n msgl3.data = VL3\n msgr3.data = VR3\n self.publisher_l3.publish(msgl3)\n self.publisher_r3.publish(msgr3)\n \n \" Publish Speed Commands to Robot 4 \"\n \n msgl4 = Float32()\n msgr4 = Float32()\n msgl4.data = VL4\n msgr4.data = VR4\n self.publisher_l4.publish(msgl4)\n self.publisher_r4.publish(msgr4) \n \n \n \" Publish Speed Commands to Robot 5 \"\n \n msgl5 = Float32()\n msgr5 = Float32()\n msgl5.data = VL5\n msgr5.data = VR5\n self.publisher_l5.publish(msgl5)\n self.publisher_r5.publish(msgr5) \n\n\n \" Publish Speed Commands to Robot 6 \"\n \n msgl6 = Float32()\n msgr6 = Float32()\n msgl6.data = VL6\n msgr6.data = VR6\n self.publisher_l6.publish(msgl6)\n self.publisher_r6.publish(msgr6)\n\n self.i += 1 \n\n\n def listener_callback(self, msg):\n \n if msg.transforms[0].child_frame_id == 'robot1' : \n self.x1 = msg.transforms[0].transform.translation.x\n self.y1 = msg.transforms[0].transform.translation.y\n self.xr1 = msg.transforms[0].transform.rotation.x\n self.yr1 = msg.transforms[0].transform.rotation.y\n self.zr1 = msg.transforms[0].transform.rotation.z\n self.wr1 = msg.transforms[0].transform.rotation.w\n self.Theta1 = euler_from_quaternion(self.xr1,self.yr1,self.zr1,self.wr1)\n \n\n if msg.transforms[0].child_frame_id == 'robot2' :\n self.x2 = msg.transforms[0].transform.translation.x\n self.y2 = msg.transforms[0].transform.translation.y\n self.xr2 = msg.transforms[0].transform.rotation.x\n self.yr2 = msg.transforms[0].transform.rotation.y\n self.zr2 = msg.transforms[0].transform.rotation.z\n self.wr2 = msg.transforms[0].transform.rotation.w\n self.Theta2 = euler_from_quaternion(self.xr2,self.yr2,self.zr2,self.wr2) \n \n if msg.transforms[0].child_frame_id == 'robot3' :\n \n self.x3 = msg.transforms[0].transform.translation.x\n self.y3 = msg.transforms[0].transform.translation.y\n self.xr3 = msg.transforms[0].transform.rotation.x\n self.yr3 = msg.transforms[0].transform.rotation.y\n self.zr3 = msg.transforms[0].transform.rotation.z\n self.wr3 = msg.transforms[0].transform.rotation.w\n self.Theta3 = euler_from_quaternion(self.xr3,self.yr3,self.zr3,self.wr3)\n\n if msg.transforms[0].child_frame_id == 'robot4' :\n \n self.x4 = msg.transforms[0].transform.translation.x\n self.y4 = msg.transforms[0].transform.translation.y\n self.xr4 = msg.transforms[0].transform.rotation.x\n self.yr4 = msg.transforms[0].transform.rotation.y\n self.zr4 = msg.transforms[0].transform.rotation.z\n self.wr4 = msg.transforms[0].transform.rotation.w\n self.Theta4 = euler_from_quaternion(self.xr4,self.yr4,self.zr4,self.wr4) \n \n if msg.transforms[0].child_frame_id == 'robot5' :\n \n self.x5 = msg.transforms[0].transform.translation.x\n self.y5 = msg.transforms[0].transform.translation.y\n self.xr5 = msg.transforms[0].transform.rotation.x\n self.yr5 = msg.transforms[0].transform.rotation.y\n self.zr5 = msg.transforms[0].transform.rotation.z\n self.wr5 = msg.transforms[0].transform.rotation.w\n self.Theta5 = euler_from_quaternion(self.xr5,self.yr5,self.zr5,self.wr5) \n \n if msg.transforms[0].child_frame_id == 'robot6' :\n \n self.x6 = msg.transforms[0].transform.translation.x\n self.y6 = msg.transforms[0].transform.translation.y\n self.xr6 = msg.transforms[0].transform.rotation.x\n self.yr6 = msg.transforms[0].transform.rotation.y\n self.zr6 = msg.transforms[0].transform.rotation.z\n self.wr6 = msg.transforms[0].transform.rotation.w\n self.Theta6 = euler_from_quaternion(self.xr6,self.yr6,self.zr6,self.wr6) \n \n \n \ndef main(args=None):\n rclpy.init(args=args)\n minimal_publisher = MinimalPublisher()\n time.sleep(5)\n rclpy.spin(minimal_publisher)\n minimal_publisher.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n", "\"\"\"\n\nConsensus Algorithm for 2 Robots using MLP Model\n\nScene: Robot 1, Robot 3\n\nInputs: Mx, My\nOutputs: Ux, Uy\n\n\"\"\"\nimport torch\nimport MLP_Model\nimport math\nimport numpy as np\nimport rclpy\nfrom rclpy.node import Node\nfrom tf2_msgs.msg import TFMessage\nfrom std_msgs.msg import Float32\nimport time\n\nL = 1\nd = 0.5\n\n# load model using dict\nFILE = \"model.pth\"\nloaded_model = MLP_Model.MLP()\nloaded_model.load_state_dict(torch.load(FILE))\nloaded_model.eval()\n\ndef euler_from_quaternion(x, y, z, w):\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return yaw_z # in radians\n \n \nclass MinimalPublisher(Node):\n\n def __init__(self):\n super().__init__('minimal_publisher1')\n self.publisher_l1 = self.create_publisher(Float32, '/leftMotorSpeedrobot1', 0) #Change according to topic in child script,String to Float32\n self.publisher_r1 = self.create_publisher(Float32, '/rightMotorSpeedrobot1',0) #Change according to topic in child script,String to Float32\n self.publisher_l3 = self.create_publisher(Float32, '/leftMotorSpeedrobot3', 0) #Change according to topic in child script,String to Float32\n self.publisher_r3 = self.create_publisher(Float32, '/rightMotorSpeedrobot3',0) #Change according to topic in child script,String to Float32 \n self.subscription = self.create_subscription(\n TFMessage,\n '/tf',\n self.listener_callback,\n 0)\n \n \" Timer Callback \"\n timer_period = 0.1 # seconds\n self.timer = self.create_timer(timer_period, self.timer_callback)\n self.i = 0 \n\n \" Parameters \"\n self.t = 0 # Just to intialized Phix's and Phiy's\n \n \" Initialize Phi's\"\n self.Phix1 = 0# 1x1\n self.Phiy1 = 0 # 1x1\n self.Phix3 = 0 # 1x1\n self.Phiy3 = 0 # 1x1\n \n \" Mobile Robot 1 Parameters \"\n self.x1 = 0\n self.y1 = 0\n self.Theta1 = 0\n self.v1 = 0\n self.w1 = 0\n self.vL1 = 0\n self.vR1 = 0\n \n \" Mobile Robot 3 Parameters \"\n self.x3 = 0\n self.y3 = 0\n self.Theta3 = 0\n self.v3 = 0\n self.w3 = 0\n self.vL3 = 0 \n self.vR3 = 0\n\n def timer_callback(self):\n \n \" Calculate Mx1, My1, ...... Mx6, My6 \"\n # Initialize Phi's\n if self.t ==0:\n self.Phix1 = 0 # 1x1\n self.Phiy1 = 0 # 1x1\n self.Phix3 = 0 # 1x1\n self.Phiy3 = 0 # 1x1\n self.t += 1 \n \n Mx1 = self.x3 - self.x1\n My1 = self.y3 - self.y1\n \n Mx3 = self.x1 - self.x3\n My3 = self.y1 - self.y3 \n \n \" Use MLP to Predict control inputs \"\n \n relative_pose_1 = [ Mx1, My1, self.Phix1, self.Phiy1 ] # tensor data for MLP model\n relative_pose_3 = [ Mx3, My3, self.Phix3, self.Phiy3 ] # tensor data for MLP model\n\n u1_predicted = MLP_Model.predict(relative_pose_1, loaded_model) # predict control input u1, tensor\n u3_predicted = MLP_Model.predict(relative_pose_3, loaded_model) # predict control input u2, tensor\n\n self.Phix1 = u3_predicted[0][0] # 1x1\n self.Phiy1 = u3_predicted[0][1] # 1x1\n \n self.Phix3 = u1_predicted[0][0] # 1x1\n self.Phiy3 = u1_predicted[0][1] # 1x1\n \n u1_predicted_np = np.array([[ u1_predicted[0][0] ], [ u1_predicted[0][1] ]]) # from tensor to numpy array for calculation\n u3_predicted_np = np.array([[ u3_predicted[0][0] ], [ u3_predicted[0][1] ]]) # from tensor to numpy array for calculation\n\n \" Calculate V1/W1, V2/W2, V3/W3, V4/W4, V5/W5, V6/W6 \"\n \n S1 = np.array([[self.v1], [self.w1]]) #2x1\n G1 = np.array([[1,0], [0,1/L]]) #2x2\n R1 = np.array([[math.cos(self.Theta1),math.sin(self.Theta1)],[-math.sin(self.Theta1),math.cos(self.Theta1)]]) #2x2\n S1 = np.dot(np.dot(G1, R1), u1_predicted_np) #2x1\n \n S3 = np.array([[self.v3], [self.w3]]) #2x1\n G3 = np.array([[1,0], [0,1/L]]) #2x2\n R3 = np.array([[math.cos(self.Theta3),math.sin(self.Theta3)],[-math.sin(self.Theta3),math.cos(self.Theta3)]]) #2x2\n S3 = np.dot(np.dot(G3, R3), u3_predicted_np) # 2x1 \n \n \" Calculate VL1/VR1, VL2/VR2, VL3/VR3, VL4/VR4, VL5/VR5, VL6/VR6 \"\n \n D = np.array([[1/2,1/2],[-1/(2*d),1/(2*d)]]) #2x2\n Di = np.linalg.inv(D) #2x2\n\n Speed_L1 = np.array([[self.vL1], [self.vR1]]) # Vector 2x1 for Speed of Robot 1\n Speed_L3 = np.array([[self.vL3], [self.vR3]]) # Vector 2x1 for Speed of Robot 3\n\n M1 = np.array([[S1[0]],[S1[1]]]).reshape(2,1) #2x1\n M3 = np.array([[S3[0]],[S3[1]]]).reshape(2,1) #2x1\n\n Speed_L1 = np.dot(Di, M1) # 2x1 (VL1, VR1)\n Speed_L3 = np.dot(Di, M3) # 2x1 (VL1, VR1)\n\n VL1 = float(Speed_L1[0])\n VR1 = float(Speed_L1[1])\n VL3 = float(Speed_L3[0])\n VR3 = float(Speed_L3[1]) \n \n \" Publish Speed Commands to Robot 1 \"\n \n msgl1 = Float32() \n msgr1 = Float32()\n msgl1.data = VL1\n msgr1.data = VR1\n self.publisher_l1.publish(msgl1)\n self.publisher_r1.publish(msgr1)\n \n \" Publish Speed Commands to Robot 3 \"\n \n msgl3 = Float32()\n msgr3 = Float32()\n msgl3.data = VL3\n msgr3.data = VR3\n self.publisher_l3.publish(msgl3)\n self.publisher_r3.publish(msgr3) \n\n self.i += 1\n \n def listener_callback(self, msg):\n \n if msg.transforms[0].child_frame_id == 'robot1' : \n self.x1 = msg.transforms[0].transform.translation.x\n self.y1 = msg.transforms[0].transform.translation.y\n self.xr1 = msg.transforms[0].transform.rotation.x\n self.yr1 = msg.transforms[0].transform.rotation.y\n self.zr1 = msg.transforms[0].transform.rotation.z\n self.wr1 = msg.transforms[0].transform.rotation.w\n self.Theta1 = euler_from_quaternion(self.xr1,self.yr1,self.zr1,self.wr1)\n \n if msg.transforms[0].child_frame_id == 'robot3' : \n self.x3 = msg.transforms[0].transform.translation.x\n self.y3 = msg.transforms[0].transform.translation.y\n self.xr3 = msg.transforms[0].transform.rotation.x\n self.yr3 = msg.transforms[0].transform.rotation.y\n self.zr3 = msg.transforms[0].transform.rotation.z\n self.wr3 = msg.transforms[0].transform.rotation.w\n self.Theta3 = euler_from_quaternion(self.xr3,self.yr3,self.zr3,self.wr3) \n \n \ndef main(args=None):\n rclpy.init(args=args)\n minimal_publisher = MinimalPublisher()\n time.sleep(5)\n rclpy.spin(minimal_publisher)\n minimal_publisher.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n \n\n", "\"\"\"\n\nConsensus for 2 Robots, using MLP Model\n\nScene: Robot 3, Robot 4\n\nInputs: Mx, My\nOutputs: Ux, Uy\n\n\"\"\"\nimport torch\nimport MLP_Model\nimport math\nimport numpy as np\nimport rclpy\nfrom rclpy.node import Node\nfrom tf2_msgs.msg import TFMessage\nfrom std_msgs.msg import Float32\nimport time\n\nL = 1\nd = 0.5\n\n# load model using dict\nFILE = \"model.pth\"\nloaded_model = MLP_Model.MLP()\nloaded_model.load_state_dict(torch.load(FILE))\nloaded_model.eval()\n\ndef euler_from_quaternion(x, y, z, w):\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n \n return yaw_z # in radians\n \n \nclass MinimalPublisher(Node):\n\n def __init__(self):\n super().__init__('minimal_publisher1')\n self.publisher_l4 = self.create_publisher(Float32, '/leftMotorSpeedrobot4', 0) #Change according to topic in child script,String to Float32\n self.publisher_r4 = self.create_publisher(Float32, '/rightMotorSpeedrobot4',0) #Change according to topic in child script,String to Float32\n self.publisher_l3 = self.create_publisher(Float32, '/leftMotorSpeedrobot3', 0) #Change according to topic in child script,String to Float32\n self.publisher_r3 = self.create_publisher(Float32, '/rightMotorSpeedrobot3',0) #Change according to topic in child script,String to Float32 \n self.subscription = self.create_subscription(\n TFMessage,\n '/tf',\n self.listener_callback,\n 0)\n\n \" Timer Callback \"\n timer_period = 0.01 # seconds\n self.timer = self.create_timer(timer_period, self.timer_callback)\n self.i = 0 \n \n \" Parameters \"\n self.t = 0 # Just to intialized Phix's and Phiy's\n \n \" Initialize Phi's\"\n self.Phix3 = 0 # 1x1\n self.Phiy3 = 0 # 1x1\n self.Phix4 = 0 # 1x1\n self.Phiy4 = 0 # 1x1 \n \n \" Mobile Robot 4 Parameters \"\n self.x4 = 0\n self.y4 = 0\n self.Theta4 = 0\n self.v4 = 0\n self.w4 = 0\n self.vL4 = 0\n self.vR4 = 0 \n \n \" Mobile Robot 3 Parameters \"\n self.x3 = 0\n self.y3 = 0\n self.Theta3 = 0\n self.v3 = 0\n self.w3 = 0\n self.vL3 = 0 \n self.vR3 = 0\n\n \n \n def timer_callback(self):\n \n \" Calculate Mx1, My1, ...... Mx6, My6 \"\n \n # Initialize Phi's\n if self.t ==0:\n self.Phix3 = 0 # 1x1\n self.Phiy3 = 0 # 1x1\n self.Phix4 = 0 # 1x1\n self.Phiy4 = 0 # 1x1\n self.t += 1 \n \n \n Mx3 = self.x4 - self.x3\n My3 = self.y4 - self.y3\n \n Mx4 = self.x3 - self.x4\n My4 = self.y3 - self.y4 \n \n \" Use MLP to Predict control inputs \"\n \n relative_pose_3 = [ Mx3, My3, self.Phix3, self.Phiy3 ] # tensor data for MLP model\n relative_pose_4 = [ Mx4, My4, self.Phix4, self.Phiy4 ] # tensor data for MLP model\n\n u3_predicted = MLP_Model.predict(relative_pose_3, loaded_model) # predict control input u1, tensor\n u4_predicted = MLP_Model.predict(relative_pose_4, loaded_model) # predict control input u2, tensor\n \n self.Phix3 = u4_predicted[0][0] # 1x1\n self.Phiy3 = u4_predicted[0][1] # 1x1\n\n self.Phix4 = u3_predicted[0][0] # 1x1\n self.Phiy4 = u3_predicted[0][1] # 1x1 \n \n u3_predicted_np = np.array([[ u3_predicted[0][0] ], [ u3_predicted[0][1] ]]) # from tensor to numpy array for calculation\n u4_predicted_np = np.array([[ u4_predicted[0][0] ], [ u4_predicted[0][1] ]]) # from tensor to numpy array for calculation\n\n \" Calculate V1/W1, V2/W2, V3/W3, V4/W4, V5/W5, V6/W6 \"\n \n S3 = np.array([[self.v3], [self.w3]]) #2x1\n G3 = np.array([[1,0], [0,1/L]]) #2x2\n R3 = np.array([[math.cos(self.Theta3),math.sin(self.Theta3)],[-math.sin(self.Theta3),math.cos(self.Theta3)]]) #2x2\n S3 = np.dot(np.dot(G3, R3), u3_predicted_np) #2x1 \n\n S4 = np.array([[self.v4], [self.w4]]) #2x1\n G4 = np.array([[1,0], [0,1/L]]) #2x2\n R4 = np.array([[math.cos(self.Theta4),math.sin(self.Theta4)],[-math.sin(self.Theta4),math.cos(self.Theta4)]]) #2x2\n S4 = np.dot(np.dot(G4, R4), u4_predicted_np) #2x1 \n \n \" Calculate VL1/VR1, VL2/VR2, VL3/VR3, VL4/VR4, VL5/VR5, VL6/VR6 \"\n \n D = np.array([[1/2,1/2],[-1/(2*d),1/(2*d)]]) #2x2\n Di = np.linalg.inv(D) #2x2\n\n Speed_L3 = np.array([[self.vL3], [self.vR3]]) # Vector 2x1 for Speed of Robot 3\n Speed_L4 = np.array([[self.vL4], [self.vR4]]) # Vector 2x1 for Speed of Robot 4\n \n\n M3 = np.array([[S3[0]],[S3[1]]]).reshape(2,1) #2x1\n M4 = np.array([[S4[0]],[S4[1]]]).reshape(2,1) #2x1\n\n Speed_L3 = np.dot(Di, M3) # 2x1 (VL3, VR3)\n Speed_L4 = np.dot(Di, M4) # 2x1 (VL4, VR4)\n\n VL3 = float(Speed_L3[0])\n VR3 = float(Speed_L3[1])\n VL4 = float(Speed_L4[0])\n VR4 = float(Speed_L4[1])\n \n \" Publish Speed Commands to Robot 3 \"\n \n msgl3 = Float32()\n msgr3 = Float32()\n msgl3.data = VL3\n msgr3.data = VR3\n self.publisher_l3.publish(msgl3)\n self.publisher_r3.publish(msgr3)\n \n \" Publish Speed Commands to Robot 4 \"\n \n msgl4 = Float32()\n msgr4 = Float32()\n msgl4.data = VL4\n msgr4.data = VR4\n self.publisher_l4.publish(msgl4)\n self.publisher_r4.publish(msgr4)\n\n self.i += 1\n \n def listener_callback(self, msg):\n \n if msg.transforms[0].child_frame_id == 'robot4' :\n \n self.x4 = msg.transforms[0].transform.translation.x\n self.y4 = msg.transforms[0].transform.translation.y\n self.xr4 = msg.transforms[0].transform.rotation.x\n self.yr4 = msg.transforms[0].transform.rotation.y\n self.zr4 = msg.transforms[0].transform.rotation.z\n self.wr4 = msg.transforms[0].transform.rotation.w\n self.Theta4 = euler_from_quaternion(self.xr4,self.yr4,self.zr4,self.wr4) \n \n if msg.transforms[0].child_frame_id == 'robot3' : \n self.x3 = msg.transforms[0].transform.translation.x\n self.y3 = msg.transforms[0].transform.translation.y\n self.xr3 = msg.transforms[0].transform.rotation.x\n self.yr3 = msg.transforms[0].transform.rotation.y\n self.zr3 = msg.transforms[0].transform.rotation.z\n self.wr3 = msg.transforms[0].transform.rotation.w\n self.Theta3 = euler_from_quaternion(self.xr3,self.yr3,self.zr3,self.wr3) \n \n \ndef main(args=None):\n rclpy.init(args=args)\n minimal_publisher = MinimalPublisher()\n time.sleep(5)\n rclpy.spin(minimal_publisher)\n minimal_publisher.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n \n\n" ]
[ [ "numpy.linalg.inv", "numpy.dot", "numpy.array", "torch.load" ], [ "numpy.dot", "torch.load", "numpy.linalg.inv", "numpy.ones", "numpy.identity", "numpy.array", "numpy.zeros" ], [ "numpy.linalg.inv", "numpy.dot", "numpy.array", "torch.load" ], [ "numpy.linalg.inv", "numpy.dot", "numpy.array", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thunlp/MetaAdaptRank
[ "5e80520b003b0a3a5fad817edf65cf76222438dd", "5e80520b003b0a3a5fad817edf65cf76222438dd" ]
[ "metaranker/losses/pairwise.py", "metaranker/networks/bert_ranker.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.autograd import Variable\n\nclass PairWise(nn.Module):\n def __init__(\n self, \n margin=1\n ):\n super(PairWise, self).__init__()\n self.tanh = nn.Tanh()\n self.loss_fct = nn.MarginRankingLoss(\n margin=margin, \n reduction='none'\n )\n def forward(\n self, \n pos_score, \n neg_score,\n ):\n pos_score = self.tanh(pos_score)\n neg_score = self.tanh(neg_score)\n # compute loss\n batch_loss = self.loss_fct(\n pos_score, \n neg_score, \n target=torch.ones(pos_score.size()).to(pos_score.device)\n )\n return batch_loss", "import torch\nimport torch.nn as nn\nimport logging\nfrom .. import losses\nfrom ..transformers import BertPreTrainedModel, BertModel\n\nfrom typing import Tuple\nlogger = logging.getLogger()\n\n\n\nclass BertRanker(BertPreTrainedModel):\n def __init__(\n self, \n config,\n loss_class,\n ):\n super().__init__(config)\n self._loss_class = loss_class\n self.num_labels = config.num_labels\n \n self.bert = BertModel(config)\n self.linear_layer = nn.Linear(config.hidden_size, config.num_labels) \n self.loss_fct = losses.get_class(self._loss_class)\n \n self.init_weights()\n \n def forward(\n self, \n pos_input_ids, \n pos_input_mask, \n pos_segment_ids, \n neg_input_ids=None, \n neg_input_mask=None, \n neg_segment_ids=None,\n labels=None,\n ):\n # pos input\n _, pos_output = self.bert(\n pos_input_ids, \n attention_mask = pos_input_mask, \n token_type_ids = pos_segment_ids\n )\n \n pos_score = self.linear_layer(pos_output).squeeze(-1)\n \n if self._loss_class == \"pointwise\":\n if labels is not None:\n loss = self.loss_fct(pos_score, labels)\n return loss\n else:\n return pos_score.softmax(dim=-1)[:, 1].squeeze(-1), pos_output\n \n elif self._loss_class == \"pairwise\":\n # pairwise loss\n if neg_input_ids is not None:\n _, neg_output = self.bert(\n neg_input_ids, \n attention_mask = neg_input_mask, \n token_type_ids = neg_segment_ids\n )\n # pick cls token\n neg_score = self.linear_layer(neg_output).squeeze(-1)\n \n # compute loss\n loss = self.loss_fct(pos_score, neg_score)\n return loss\n \n # inference\n else:\n return pos_score, pos_output\n" ]
[ [ "torch.nn.MarginRankingLoss", "torch.nn.Tanh" ], [ "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WLM1ke/poptimizer
[ "084ac14ca6212a5b14bea5bbc9bb575da077ffb0" ]
[ "poptimizer/portfolio/optimizer.py" ]
[ "\"\"\"Оптимизатор портфеля.\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nfrom poptimizer import config\nfrom poptimizer.portfolio import metrics\nfrom poptimizer.portfolio.portfolio import CASH, Portfolio\n\n\nclass Optimizer:\n \"\"\"Предлагает сделки для улучшения метрики портфеля.\"\"\"\n\n def __init__(self, portfolio: Portfolio, p_value: float = config.P_VALUE):\n \"\"\"Учитывается градиент, его ошибку и ликвидность бумаг.\n\n :param portfolio:\n Оптимизируемый портфель.\n :param p_value:\n Требуемая значимость отклонения градиента от нуля.\n \"\"\"\n self._portfolio = portfolio\n self._p_value = p_value\n self._metrics = metrics.MetricsResample(portfolio)\n\n def __str__(self) -> str:\n \"\"\"Информация о позициях, градиенты которых значимо отличны от 0.\"\"\"\n df = self._for_trade()\n forecasts = self.metrics.count\n blocks = [\n \"\\nОПТИМИЗАЦИЯ ПОРТФЕЛЯ\",\n f\"\\nforecasts = {forecasts}\",\n f\"p-value = {self._p_value:.2%}\",\n f\"\\n{df}\",\n ]\n return \"\\n\".join(blocks)\n\n @property\n def portfolio(self) -> Portfolio:\n \"\"\"Оптимизируемый портфель.\"\"\"\n return self._portfolio\n\n @property\n def metrics(self) -> metrics.MetricsResample:\n \"\"\"Метрики портфеля.\"\"\"\n return self._metrics\n\n def _for_trade(self) -> pd.DataFrame:\n \"\"\"Осуществляет расчет доверительного интервала для среднего.\"\"\"\n p_value = self._p_value / (len(self._portfolio.index) - 2)\n\n conf_int = self.metrics.all_gradients.iloc[:-2]\n conf_int = conf_int.apply(\n lambda grad: _grad_conf_int(grad, p_value),\n axis=1,\n result_type=\"expand\",\n )\n conf_int.columns = [\"LOWER\", \"UPPER\"]\n conf_int[\"COSTS\"] = self._costs()\n conf_int[\"PRIORITY\"] = conf_int[\"LOWER\"] - conf_int[\"COSTS\"]\n\n for_sale = conf_int[\"UPPER\"] < 0\n for_sale = for_sale & (self._portfolio.shares.iloc[:-2] > 0) # noqa: WPS465\n for_sale = conf_int[for_sale]\n for_sale = for_sale.assign(PRIORITY=lambda df: df[\"UPPER\"])\n\n good_purchase = conf_int[\"PRIORITY\"] > 0 # noqa: WPS465\n good_purchase = conf_int[good_purchase]\n\n return pd.concat(\n [\n good_purchase,\n for_sale,\n ],\n axis=0,\n ).sort_values(\"PRIORITY\", ascending=False)\n\n def _costs(self) -> pd.DataFrame:\n \"\"\"Удельные торговые издержки.\n\n Полностью распределяются на покупаемую позицию с учетом ее последующего закрытия. Состоят из\n двух составляющих - комиссии и воздействия на рынок. Для учета воздействия на рынок\n используется Rule of thumb, trading one day’s volume moves the price by about one day’s\n volatility\n\n https://arxiv.org/pdf/1705.00109.pdf\n\n Размер операций на покупку условно выбран равным текущему кэшу, а на последующую продажу\n текущая позиция плюс кэш за вычетом уже учтенных издержек на продажу текущей позиции.\n\n Было решено отказаться от расчета производной так как для нулевых позиций издержки воздействия\n небольшие, но быстро нарастают с объемом. Расчет для условной сделки в размере кэша сразу\n отсекает совсем неликвидных кандидатов на покупку.\n \"\"\"\n port = self._portfolio\n\n cash = port.weight[CASH] / port.turnover_factor\n weight = port.weight / port.turnover_factor\n weight_cash = weight + cash\n\n impact_scale = 1.5\n\n return (\n # Размер рыночного воздействие в дневном СКО для дневного оборот\n config.MARKET_IMPACT_FACTOR\n # Дневное СКО\n * (self.metrics.std / config.YEAR_IN_TRADING_DAYS ** 0.5)\n # Зависимость общих издержек от воздействия пропорционален степени 1.5 от нормированного на\n # дневной оборот объема. Совершается покупка на кэш сейчас и увеличиваются издержки на\n # ликвидацию позиции\n * (cash ** impact_scale + (weight_cash ** impact_scale - weight ** impact_scale))\n # Делим на объем операции для получения удельных издержек\n / cash\n # Умножаем на коэффициент пересчета в годовые значения\n * (config.YEAR_IN_TRADING_DAYS / config.FORECAST_DAYS)\n # Обычные издержки в две стороны\n + config.COSTS * 2\n )\n\n\ndef _grad_conf_int(forecasts, p_value) -> tuple[float, float]:\n interval = stats.bootstrap(\n (forecasts,),\n np.median,\n confidence_level=(1 - p_value),\n random_state=0,\n ).confidence_interval\n\n return interval.low, interval.high\n" ]
[ [ "scipy.stats.bootstrap", "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "1.7", "1.9", "1.10", "1.8" ], "tensorflow": [] } ]
Olek-Donaldson/astropy
[ "ed9ec69007bd540bcf476def57c5231e5e7c1240", "ed9ec69007bd540bcf476def57c5231e5e7c1240" ]
[ "astropy/modeling/tests/test_models.py", "astropy/coordinates/builtin_frames/icrs_cirs_transforms.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nTests for model evaluation.\nCompare the results of some models with other programs.\n\"\"\"\n# pylint: disable=invalid-name, no-member\nimport pytest\nimport numpy as np\n\nfrom numpy.testing import assert_allclose, assert_equal\n\nfrom astropy import units as u\nfrom astropy.modeling import fitting, models\nfrom astropy.modeling.models import Gaussian2D\nfrom astropy.modeling.core import FittableModel\nfrom astropy.modeling.parameters import Parameter\nfrom astropy.modeling.polynomial import PolynomialBase\nfrom astropy.utils import minversion\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.utils import NumpyRNGContext\nfrom .example_models import models_1D, models_2D\n\ntry:\n import scipy\n HAS_SCIPY = True\nexcept ImportError:\n HAS_SCIPY = False\n\n\[email protected]('not HAS_SCIPY')\ndef test_custom_model(amplitude=4, frequency=1):\n\n def sine_model(x, amplitude=4, frequency=1):\n \"\"\"\n Model function\n \"\"\"\n return amplitude * np.sin(2 * np.pi * frequency * x)\n\n def sine_deriv(x, amplitude=4, frequency=1):\n \"\"\"\n Jacobian of model function, e.g. derivative of the function with\n respect to the *parameters*\n \"\"\"\n da = np.sin(2 * np.pi * frequency * x)\n df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)\n return np.vstack((da, df))\n\n SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv)\n\n x = np.linspace(0, 4, 50)\n sin_model = SineModel()\n\n sin_model.evaluate(x, 5., 2.)\n sin_model.fit_deriv(x, 5., 2.)\n\n np.random.seed(0)\n data = sin_model(x) + np.random.rand(len(x)) - 0.5\n fitter = fitting.LevMarLSQFitter()\n model = fitter(sin_model, x, data)\n assert np.all((np.array([model.amplitude.value, model.frequency.value]) -\n np.array([amplitude, frequency])) < 0.001)\n\n\ndef test_custom_model_init():\n @models.custom_model\n def SineModel(x, amplitude=4, frequency=1):\n \"\"\"Model function\"\"\"\n\n return amplitude * np.sin(2 * np.pi * frequency * x)\n\n sin_model = SineModel(amplitude=2., frequency=0.5)\n assert sin_model.amplitude == 2.\n assert sin_model.frequency == 0.5\n\n\ndef test_custom_model_defaults():\n @models.custom_model\n def SineModel(x, amplitude=4, frequency=1):\n \"\"\"Model function\"\"\"\n\n return amplitude * np.sin(2 * np.pi * frequency * x)\n\n sin_model = SineModel()\n assert SineModel.amplitude.default == 4\n assert SineModel.frequency.default == 1\n\n assert sin_model.amplitude == 4\n assert sin_model.frequency == 1\n\n\ndef test_inconsistent_input_shapes():\n g = Gaussian2D()\n x = np.arange(-1., 1, .2)\n y = x.copy()\n # check scalar input broadcasting works\n assert np.abs(g(x, 0) - g(x, 0 * x)).sum() == 0\n # but not array broadcasting\n x.shape = (10, 1)\n y.shape = (1, 10)\n with pytest.raises(ValueError):\n g(x, y)\n\n\ndef test_custom_model_bounding_box():\n \"\"\"Test bounding box evaluation for a 3D model\"\"\"\n\n def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):\n rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2\n val = (rsq < 1) * amp\n return val\n\n class Ellipsoid3D(models.custom_model(ellipsoid)):\n @property\n def bounding_box(self):\n return ((self.z0 - self.c, self.z0 + self.c),\n (self.y0 - self.b, self.y0 + self.b),\n (self.x0 - self.a, self.x0 + self.a))\n\n model = Ellipsoid3D()\n bbox = model.bounding_box\n\n zlim, ylim, xlim = bbox\n dz, dy, dx = np.diff(bbox) / 2\n z1, y1, x1 = np.mgrid[slice(zlim[0], zlim[1] + 1),\n slice(ylim[0], ylim[1] + 1),\n slice(xlim[0], xlim[1] + 1)]\n z2, y2, x2 = np.mgrid[slice(zlim[0] - dz, zlim[1] + dz + 1),\n slice(ylim[0] - dy, ylim[1] + dy + 1),\n slice(xlim[0] - dx, xlim[1] + dx + 1)]\n\n arr = model(x2, y2, z2)\n sub_arr = model(x1, y1, z1)\n\n # check for flux agreement\n assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7\n\n\nclass Fittable2DModelTester:\n \"\"\"\n Test class for all two dimensional parametric models.\n\n Test values have to be defined in example_models.py. It currently test the\n model with different input types, evaluates the model at different\n positions and assures that it gives the correct values. And tests if the\n model works with non-linear fitters.\n\n This can be used as a base class for user defined model testing.\n \"\"\"\n\n def setup_class(self):\n self.N = 100\n self.M = 100\n self.eval_error = 0.0001\n self.fit_error = 0.1\n self.x = 5.3\n self.y = 6.7\n self.x1 = np.arange(1, 10, .1)\n self.y1 = np.arange(1, 10, .1)\n self.y2, self.x2 = np.mgrid[:10, :8]\n\n def test_input2D(self, model_class, test_parameters):\n \"\"\"Test model with different input types.\"\"\"\n\n model = create_model(model_class, test_parameters)\n model(self.x, self.y)\n model(self.x1, self.y1)\n model(self.x2, self.y2)\n\n def test_eval2D(self, model_class, test_parameters):\n \"\"\"Test model values add certain given points\"\"\"\n\n model = create_model(model_class, test_parameters)\n x = test_parameters['x_values']\n y = test_parameters['y_values']\n z = test_parameters['z_values']\n assert np.all(np.abs(model(x, y) - z) < self.eval_error)\n\n def test_bounding_box2D(self, model_class, test_parameters):\n \"\"\"Test bounding box evaluation\"\"\"\n\n model = create_model(model_class, test_parameters)\n\n # testing setter\n model.bounding_box = ((-5, 5), (-5, 5))\n assert model.bounding_box == ((-5, 5), (-5, 5))\n\n model.bounding_box = None\n with pytest.raises(NotImplementedError):\n model.bounding_box\n\n # test the exception of dimensions don't match\n with pytest.raises(ValueError):\n model.bounding_box = (-5, 5)\n\n del model.bounding_box\n\n try:\n bbox = model.bounding_box\n except NotImplementedError:\n pytest.skip(\"Bounding_box is not defined for model.\")\n\n ylim, xlim = bbox\n dy, dx = np.diff(bbox)/2\n y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1),\n slice(xlim[0], xlim[1] + 1)]\n y2, x2 = np.mgrid[slice(ylim[0] - dy, ylim[1] + dy + 1),\n slice(xlim[0] - dx, xlim[1] + dx + 1)]\n\n arr = model(x2, y2)\n sub_arr = model(x1, y1)\n\n # check for flux agreement\n assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7\n\n @pytest.mark.skipif('not HAS_SCIPY')\n def test_fitter2D(self, model_class, test_parameters):\n \"\"\"Test if the parametric model works with the fitter.\"\"\"\n\n x_lim = test_parameters['x_lim']\n y_lim = test_parameters['y_lim']\n\n parameters = test_parameters['parameters']\n model = create_model(model_class, test_parameters)\n\n if isinstance(parameters, dict):\n parameters = [parameters[name] for name in model.param_names]\n\n if \"log_fit\" in test_parameters:\n if test_parameters['log_fit']:\n x = np.logspace(x_lim[0], x_lim[1], self.N)\n y = np.logspace(y_lim[0], y_lim[1], self.N)\n else:\n x = np.linspace(x_lim[0], x_lim[1], self.N)\n y = np.linspace(y_lim[0], y_lim[1], self.N)\n xv, yv = np.meshgrid(x, y)\n\n np.random.seed(0)\n # add 10% noise to the amplitude\n noise = np.random.rand(self.N, self.N) - 0.5\n data = model(xv, yv) + 0.1 * parameters[0] * noise\n fitter = fitting.LevMarLSQFitter()\n new_model = fitter(model, xv, yv, data)\n\n params = [getattr(new_model, name) for name in new_model.param_names]\n fixed = [param.fixed for param in params]\n expected = np.array([val for val, fixed in zip(parameters, fixed)\n if not fixed])\n fitted = np.array([param.value for param in params\n if not param.fixed])\n assert_allclose(fitted, expected,\n atol=self.fit_error)\n\n @pytest.mark.skipif('not HAS_SCIPY')\n def test_deriv_2D(self, model_class, test_parameters):\n \"\"\"\n Test the derivative of a model by fitting with an estimated and\n analytical derivative.\n \"\"\"\n\n x_lim = test_parameters['x_lim']\n y_lim = test_parameters['y_lim']\n\n if model_class.fit_deriv is None:\n pytest.skip(\"Derivative function is not defined for model.\")\n if issubclass(model_class, PolynomialBase):\n pytest.skip(\"Skip testing derivative of polynomials.\")\n\n if \"log_fit\" in test_parameters:\n if test_parameters['log_fit']:\n x = np.logspace(x_lim[0], x_lim[1], self.N)\n y = np.logspace(y_lim[0], y_lim[1], self.M)\n else:\n x = np.linspace(x_lim[0], x_lim[1], self.N)\n y = np.linspace(y_lim[0], y_lim[1], self.M)\n xv, yv = np.meshgrid(x, y)\n\n try:\n model_with_deriv = create_model(model_class, test_parameters,\n use_constraints=False,\n parameter_key='deriv_initial')\n model_no_deriv = create_model(model_class, test_parameters,\n use_constraints=False,\n parameter_key='deriv_initial')\n model = create_model(model_class, test_parameters,\n use_constraints=False,\n parameter_key='deriv_initial')\n except KeyError:\n model_with_deriv = create_model(model_class, test_parameters,\n use_constraints=False)\n model_no_deriv = create_model(model_class, test_parameters,\n use_constraints=False)\n model = create_model(model_class, test_parameters,\n use_constraints=False)\n\n # add 10% noise to the amplitude\n rsn = np.random.RandomState(1234567890)\n amplitude = test_parameters['parameters'][0]\n n = 0.1 * amplitude * (rsn.rand(self.M, self.N) - 0.5)\n\n data = model(xv, yv) + n\n fitter_with_deriv = fitting.LevMarLSQFitter()\n new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv,\n data)\n fitter_no_deriv = fitting.LevMarLSQFitter()\n new_model_no_deriv = fitter_no_deriv(model_no_deriv, xv, yv, data,\n estimate_jacobian=True)\n assert_allclose(new_model_with_deriv.parameters,\n new_model_no_deriv.parameters,\n rtol=0.1)\n\n\nclass Fittable1DModelTester:\n \"\"\"\n Test class for all one dimensional parametric models.\n\n Test values have to be defined in example_models.py. It currently test the\n model with different input types, evaluates the model at different\n positions and assures that it gives the correct values. And tests if the\n model works with non-linear fitters.\n\n This can be used as a base class for user defined model testing.\n \"\"\"\n\n def setup_class(self):\n self.N = 100\n self.M = 100\n self.eval_error = 0.0001\n self.fit_error = 0.1\n self.x = 5.3\n self.y = 6.7\n self.x1 = np.arange(1, 10, .1)\n self.y1 = np.arange(1, 10, .1)\n self.y2, self.x2 = np.mgrid[:10, :8]\n\n @pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')\n def test_input1D(self, model_class, test_parameters):\n \"\"\"Test model with different input types.\"\"\"\n\n model = create_model(model_class, test_parameters)\n model(self.x)\n model(self.x1)\n model(self.x2)\n\n def test_eval1D(self, model_class, test_parameters):\n \"\"\"\n Test model values at certain given points\n \"\"\"\n model = create_model(model_class, test_parameters)\n x = test_parameters['x_values']\n y = test_parameters['y_values']\n assert_allclose(model(x), y, atol=self.eval_error)\n\n def test_bounding_box1D(self, model_class, test_parameters):\n \"\"\"Test bounding box evaluation\"\"\"\n\n model = create_model(model_class, test_parameters)\n\n # testing setter\n model.bounding_box = (-5, 5)\n model.bounding_box = None\n\n with pytest.raises(NotImplementedError):\n model.bounding_box\n\n del model.bounding_box\n\n # test exception if dimensions don't match\n with pytest.raises(ValueError):\n model.bounding_box = 5\n\n try:\n bbox = model.bounding_box\n except NotImplementedError:\n pytest.skip(\"Bounding_box is not defined for model.\")\n\n if isinstance(model, models.Lorentz1D) or isinstance(model, models.Drude1D):\n rtol = 0.01 # 1% agreement is enough due to very extended wings\n ddx = 0.1 # Finer sampling to \"integrate\" flux for narrow peak\n else:\n rtol = 1e-7\n ddx = 1\n\n dx = np.diff(bbox) / 2\n x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)]\n x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)]\n arr = model(x2)\n sub_arr = model(x1)\n\n # check for flux agreement\n assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol\n\n @pytest.mark.skipif('not HAS_SCIPY')\n def test_fitter1D(self, model_class, test_parameters):\n \"\"\"\n Test if the parametric model works with the fitter.\n \"\"\"\n x_lim = test_parameters['x_lim']\n parameters = test_parameters['parameters']\n model = create_model(model_class, test_parameters)\n\n if isinstance(parameters, dict):\n parameters = [parameters[name] for name in model.param_names]\n\n if \"log_fit\" in test_parameters:\n if test_parameters['log_fit']:\n x = np.logspace(x_lim[0], x_lim[1], self.N)\n else:\n x = np.linspace(x_lim[0], x_lim[1], self.N)\n\n np.random.seed(0)\n # add 10% noise to the amplitude\n relative_noise_amplitude = 0.01\n data = ((1 + relative_noise_amplitude * np.random.randn(len(x))) *\n model(x))\n fitter = fitting.LevMarLSQFitter()\n new_model = fitter(model, x, data)\n\n # Only check parameters that were free in the fit\n params = [getattr(new_model, name) for name in new_model.param_names]\n fixed = [param.fixed for param in params]\n expected = np.array([val for val, fixed in zip(parameters, fixed)\n if not fixed])\n fitted = np.array([param.value for param in params\n if not param.fixed])\n assert_allclose(fitted, expected, atol=self.fit_error)\n\n @pytest.mark.skipif('not HAS_SCIPY')\n @pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')\n def test_deriv_1D(self, model_class, test_parameters):\n \"\"\"\n Test the derivative of a model by comparing results with an estimated\n derivative.\n \"\"\"\n\n x_lim = test_parameters['x_lim']\n\n if model_class.fit_deriv is None:\n pytest.skip(\"Derivative function is not defined for model.\")\n if issubclass(model_class, PolynomialBase):\n pytest.skip(\"Skip testing derivative of polynomials.\")\n\n if \"log_fit\" in test_parameters:\n if test_parameters['log_fit']:\n x = np.logspace(x_lim[0], x_lim[1], self.N)\n else:\n x = np.linspace(x_lim[0], x_lim[1], self.N)\n\n parameters = test_parameters['parameters']\n model_with_deriv = create_model(model_class, test_parameters,\n use_constraints=False)\n model_no_deriv = create_model(model_class, test_parameters,\n use_constraints=False)\n\n # add 10% noise to the amplitude\n rsn = np.random.RandomState(1234567890)\n n = 0.1 * parameters[0] * (rsn.rand(self.N) - 0.5)\n\n data = model_with_deriv(x) + n\n fitter_with_deriv = fitting.LevMarLSQFitter()\n new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)\n fitter_no_deriv = fitting.LevMarLSQFitter()\n new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data,\n estimate_jacobian=True)\n assert_allclose(new_model_with_deriv.parameters,\n new_model_no_deriv.parameters, atol=0.15)\n\n\ndef create_model(model_class, test_parameters, use_constraints=True,\n parameter_key='parameters'):\n \"\"\"Create instance of model class.\"\"\"\n\n constraints = {}\n if issubclass(model_class, PolynomialBase):\n return model_class(**test_parameters[parameter_key])\n elif issubclass(model_class, FittableModel):\n if \"requires_scipy\" in test_parameters and not HAS_SCIPY:\n pytest.skip(\"SciPy not found\")\n if use_constraints:\n if 'constraints' in test_parameters:\n constraints = test_parameters['constraints']\n return model_class(*test_parameters[parameter_key], **constraints)\n\n\[email protected](r'ignore:Model is linear in parameters.*')\[email protected](r'ignore:The fit may be unsuccessful.*')\[email protected](('model_class', 'test_parameters'),\n sorted(models_1D.items(), key=lambda x: str(x[0])))\nclass TestFittable1DModels(Fittable1DModelTester):\n pass\n\n\[email protected](r'ignore:Model is linear in parameters.*')\[email protected](('model_class', 'test_parameters'),\n sorted(models_2D.items(), key=lambda x: str(x[0])))\nclass TestFittable2DModels(Fittable2DModelTester):\n pass\n\n\ndef test_ShiftModel():\n # Shift by a scalar\n m = models.Shift(42)\n assert m(0) == 42\n assert_equal(m([1, 2]), [43, 44])\n\n # Shift by a list\n m = models.Shift([42, 43], n_models=2)\n assert_equal(m(0), [42, 43])\n assert_equal(m([1, 2], model_set_axis=False),\n [[43, 44], [44, 45]])\n\n\ndef test_ScaleModel():\n # Scale by a scalar\n m = models.Scale(42)\n assert m(0) == 0\n assert_equal(m([1, 2]), [42, 84])\n\n # Scale by a list\n m = models.Scale([42, 43], n_models=2)\n assert_equal(m(0), [0, 0])\n assert_equal(m([1, 2], model_set_axis=False),\n [[42, 84], [43, 86]])\n\n\ndef test_voigt_model():\n \"\"\"\n Currently just tests that the model peaks at its origin.\n Regression test for https://github.com/astropy/astropy/issues/3942\n \"\"\"\n\n m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)\n x = np.arange(0, 10, 0.01)\n y = m(x)\n assert y[500] == y.max() # y[500] is right at the center\n\n\ndef test_model_instance_repr():\n m = models.Gaussian1D(1.5, 2.5, 3.5)\n assert repr(m) == '<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>'\n\n\[email protected](\"not HAS_SCIPY\")\ndef test_tabular_interp_1d():\n \"\"\"\n Test Tabular1D model.\n \"\"\"\n points = np.arange(0, 5)\n values = [1., 10, 2, 45, -3]\n LookupTable = models.tabular_model(1)\n model = LookupTable(points=points, lookup_table=values)\n xnew = [0., .7, 1.4, 2.1, 3.9]\n ans1 = [1., 7.3, 6.8, 6.3, 1.8]\n assert_allclose(model(xnew), ans1)\n # Test evaluate without passing `points`.\n model = LookupTable(lookup_table=values)\n assert_allclose(model(xnew), ans1)\n # Test bounds error.\n xextrap = [0., .7, 1.4, 2.1, 3.9, 4.1]\n with pytest.raises(ValueError):\n model(xextrap)\n # test extrapolation and fill value\n model = LookupTable(lookup_table=values, bounds_error=False,\n fill_value=None)\n assert_allclose(model(xextrap),\n [1., 7.3, 6.8, 6.3, 1.8, -7.8])\n\n # Test unit support\n xnew = xnew * u.nm\n ans1 = ans1 * u.nJy\n model = LookupTable(points=points*u.nm, lookup_table=values*u.nJy)\n assert_quantity_allclose(model(xnew), ans1)\n assert_quantity_allclose(model(xnew.to(u.nm)), ans1)\n assert model.bounding_box == (0 * u.nm, 4 * u.nm)\n\n # Test fill value unit conversion and unitless input on table with unit\n model = LookupTable([1, 2, 3], [10, 20, 30] * u.nJy, bounds_error=False,\n fill_value=1e-33*(u.W / (u.m * u.m * u.Hz)))\n assert_quantity_allclose(model(np.arange(5)),\n [100, 10, 20, 30, 100] * u.nJy)\n\n\[email protected](\"not HAS_SCIPY\")\ndef test_tabular_interp_2d():\n table = np.array([\n [-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525],\n [-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205],\n [-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104],\n [-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417],\n [-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131]])\n\n points = np.arange(0, 5)\n points = (points, points)\n\n xnew = np.array([0., .7, 1.4, 2.1, 3.9])\n LookupTable = models.tabular_model(2)\n model = LookupTable(points, table)\n znew = model(xnew, xnew)\n result = np.array(\n [-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675])\n assert_allclose(znew, result, atol=1e-7)\n\n # test 2D arrays as input\n a = np.arange(12).reshape((3, 4))\n y, x = np.mgrid[:3, :4]\n t = models.Tabular2D(lookup_table=a)\n r = t(y, x)\n assert_allclose(a, r)\n\n with pytest.raises(ValueError):\n model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4]))\n with pytest.raises(ValueError):\n model = LookupTable(lookup_table=[1, 2, 3])\n with pytest.raises(NotImplementedError):\n model = LookupTable(n_models=2)\n with pytest.raises(ValueError):\n model = LookupTable(([1, 2], [3, 4]), [5, 6])\n with pytest.raises(ValueError):\n model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]])\n with pytest.raises(ValueError):\n model = LookupTable(points, table, bounds_error=False,\n fill_value=1*u.Jy)\n\n # Test unit support\n points = points[0] * u.nm\n points = (points, points)\n xnew = xnew * u.nm\n model = LookupTable(points, table * u.nJy)\n result = result * u.nJy\n assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)\n xnew = xnew.to(u.m)\n assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)\n bbox = (0 * u.nm, 4 * u.nm)\n bbox = (bbox, bbox)\n assert model.bounding_box == bbox\n\n\[email protected](\"not HAS_SCIPY\")\ndef test_tabular_nd():\n a = np.arange(24).reshape((2, 3, 4))\n x, y, z = np.mgrid[:2, :3, :4]\n tab = models.tabular_model(3)\n t = tab(lookup_table=a)\n result = t(x, y, z)\n assert_allclose(a, result)\n\n with pytest.raises(ValueError):\n models.tabular_model(0)\n\n\ndef test_with_bounding_box():\n \"\"\"\n Test the option to evaluate a model respecting\n its bunding_box.\n \"\"\"\n p = models.Polynomial2D(2) & models.Polynomial2D(2)\n m = models.Mapping((0, 1, 0, 1)) | p\n with NumpyRNGContext(1234567):\n m.parameters = np.random.rand(12)\n\n m.bounding_box = ((3, 9), (1, 8))\n x, y = np.mgrid[:10, :10]\n a, b = m(x, y)\n aw, bw = m(x, y, with_bounding_box=True)\n ind = (~np.isnan(aw)).nonzero()\n assert_allclose(a[ind], aw[ind])\n assert_allclose(b[ind], bw[ind])\n\n aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)\n ind = (aw != 1000).nonzero()\n assert_allclose(a[ind], aw[ind])\n assert_allclose(b[ind], bw[ind])\n\n # test the order of bbox is not reversed for 1D models\n p = models.Polynomial1D(1, c0=12, c1=2.3)\n p.bounding_box = (0, 5)\n assert(p(1) == p(1, with_bounding_box=True))\n\n t3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)\n t3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))\n assert_allclose(t3([1, 1], [7, 7], [3, 5], with_bounding_box=True),\n [[np.nan, 11], [np.nan, 14], [np.nan, 4]])\n\n trans3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)\n trans3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))\n assert_allclose(trans3(1, 7, 5, with_bounding_box=True), [11, 14, 4])\n\n\[email protected](\"not HAS_SCIPY\")\ndef test_tabular_with_bounding_box():\n points = np.arange(5)\n values = np.array([1.5, 3.4, 6.7, 7, 32])\n t = models.Tabular1D(points, values)\n result = t(1, with_bounding_box=True)\n\n assert result == 3.4\n assert t.inverse(result, with_bounding_box=True) == 1.\n\n\[email protected](\"not HAS_SCIPY\")\ndef test_tabular_bounding_box_with_units():\n points = np.arange(5)*u.pix\n lt = np.arange(5)*u.AA\n t = models.Tabular1D(points, lt)\n result = t(1*u.pix, with_bounding_box=True)\n\n assert result == 1.*u.AA\n assert t.inverse(result, with_bounding_box=True) == 1*u.pix\n\n\[email protected](\"not HAS_SCIPY\")\ndef test_tabular1d_inverse():\n \"\"\"Test that the Tabular1D inverse is defined\"\"\"\n points = np.arange(5)\n values = np.array([1.5, 3.4, 6.7, 7, 32])\n t = models.Tabular1D(points, values)\n result = t.inverse((3.4, 6.7))\n assert_allclose(result, np.array((1., 2.)))\n\n # Check that it works for descending values in lookup_table\n t2 = models.Tabular1D(points, values[::-1])\n assert_allclose(t2.inverse.points[0], t2.lookup_table[::-1])\n\n result2 = t2.inverse((7, 6.7))\n assert_allclose(result2, np.array((1., 2.)))\n\n # Check that it errors on double-valued lookup_table\n points = np.arange(5)\n values = np.array([1.5, 3.4, 3.4, 32, 25])\n t = models.Tabular1D(points, values)\n with pytest.raises(NotImplementedError):\n t.inverse((3.4, 7.))\n\n # Check that Tabular2D.inverse raises an error\n table = np.arange(5*5).reshape(5, 5)\n points = np.arange(0, 5)\n points = (points, points)\n t3 = models.Tabular2D(points=points, lookup_table=table)\n with pytest.raises(NotImplementedError):\n t3.inverse((3, 3))\n\n # Check that it uses the same kwargs as the original model\n points = np.arange(5)\n values = np.array([1.5, 3.4, 6.7, 7, 32])\n t = models.Tabular1D(points, values)\n with pytest.raises(ValueError):\n t.inverse(100)\n t = models.Tabular1D(points, values, bounds_error=False, fill_value=None)\n result = t.inverse(100)\n assert_allclose(t(result), 100)\n\n\[email protected](\"not HAS_SCIPY\")\ndef test_tabular_module_name():\n \"\"\"\n The module name must be set manually because\n these classes are created dynamically.\n \"\"\"\n for model in [models.Tabular1D, models.Tabular2D]:\n assert model.__module__ == \"astropy.modeling.tabular\"\n\n\nclass classmodel(FittableModel):\n f = Parameter(default=1)\n x = Parameter(default=0)\n y = Parameter(default=2)\n\n def __init__(self, f=f.default, x=x.default, y=y.default):\n super().__init__(f, x, y)\n\n def evaluate(self):\n pass\n\n\nclass subclassmodel(classmodel):\n f = Parameter(default=3, fixed=True)\n x = Parameter(default=10)\n y = Parameter(default=12)\n h = Parameter(default=5)\n\n def __init__(self, f=f.default, x=x.default, y=y.default, h=h.default):\n super().__init__(f, x, y)\n\n def evaluate(self):\n pass\n\n\ndef test_parameter_inheritance():\n b = subclassmodel()\n assert b.param_names == ('f', 'x', 'y', 'h')\n assert b.h == 5\n assert b.f == 3\n assert b.f.fixed == True\n", "# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nContains the transformation functions for getting from ICRS/HCRS to CIRS and\nanything in between (currently that means GCRS)\n\"\"\"\n\nimport numpy as np\n\nfrom astropy import units as u\nfrom astropy.coordinates.baseframe import frame_transform_graph\nfrom astropy.coordinates.transformations import (\n FunctionTransformWithFiniteDifference,\n AffineTransform,\n)\nfrom astropy.coordinates.representation import (\n SphericalRepresentation,\n CartesianRepresentation,\n UnitSphericalRepresentation,\n CartesianDifferential,\n)\n\nfrom .icrs import ICRS\nfrom .gcrs import GCRS\nfrom .cirs import CIRS\nfrom .hcrs import HCRS\nfrom .utils import aticq, atciqz\n\nfrom ..erfa_astrom import erfa_astrom\n\n\n# First the ICRS/CIRS related transforms\n@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, CIRS)\ndef icrs_to_cirs(icrs_coo, cirs_frame):\n # first set up the astrometry context for ICRS<->CIRS\n astrom = erfa_astrom.get().apci(cirs_frame)\n\n if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one:\n # if no distance, just do the infinite-distance/no parallax calculation\n usrepr = icrs_coo.represent_as(UnitSphericalRepresentation)\n i_ra = usrepr.lon.to_value(u.radian)\n i_dec = usrepr.lat.to_value(u.radian)\n cirs_ra, cirs_dec = atciqz(i_ra, i_dec, astrom)\n\n newrep = UnitSphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False),\n lon=u.Quantity(cirs_ra, u.radian, copy=False),\n copy=False)\n else:\n # When there is a distance, we first offset for parallax to get the\n # astrometric coordinate direction and *then* run the ERFA transform for\n # no parallax/PM. This ensures reversibility and is more sensible for\n # inside solar system objects\n astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,\n xyz_axis=-1, copy=False)\n newcart = icrs_coo.cartesian - astrom_eb\n\n srepr = newcart.represent_as(SphericalRepresentation)\n i_ra = srepr.lon.to_value(u.radian)\n i_dec = srepr.lat.to_value(u.radian)\n cirs_ra, cirs_dec = atciqz(i_ra, i_dec, astrom)\n\n newrep = SphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False),\n lon=u.Quantity(cirs_ra, u.radian, copy=False),\n distance=srepr.distance, copy=False)\n\n return cirs_frame.realize_frame(newrep)\n\n\n@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ICRS)\ndef cirs_to_icrs(cirs_coo, icrs_frame):\n srepr = cirs_coo.represent_as(SphericalRepresentation)\n cirs_ra = srepr.lon.to_value(u.radian)\n cirs_dec = srepr.lat.to_value(u.radian)\n\n # set up the astrometry context for ICRS<->cirs and then convert to\n # astrometric coordinate direction\n astrom = erfa_astrom.get().apci(cirs_coo)\n i_ra, i_dec = aticq(cirs_ra, cirs_dec, astrom)\n\n if cirs_coo.data.get_name() == 'unitspherical' or cirs_coo.data.to_cartesian().x.unit == u.one:\n # if no distance, just use the coordinate direction to yield the\n # infinite-distance/no parallax answer\n newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),\n lon=u.Quantity(i_ra, u.radian, copy=False),\n copy=False)\n else:\n # When there is a distance, apply the parallax/offset to the SSB as the\n # last step - ensures round-tripping with the icrs_to_cirs transform\n\n # the distance in intermedrep is *not* a real distance as it does not\n # include the offset back to the SSB\n intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),\n lon=u.Quantity(i_ra, u.radian, copy=False),\n distance=srepr.distance,\n copy=False)\n\n astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,\n xyz_axis=-1, copy=False)\n newrep = intermedrep + astrom_eb\n\n return icrs_frame.realize_frame(newrep)\n\n\n@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, CIRS)\ndef cirs_to_cirs(from_coo, to_frame):\n if np.all(from_coo.obstime == to_frame.obstime):\n return to_frame.realize_frame(from_coo.data)\n else:\n # the CIRS<-> CIRS transform actually goes through ICRS. This has a\n # subtle implication that a point in CIRS is uniquely determined\n # by the corresponding astrometric ICRS coordinate *at its\n # current time*. This has some subtle implications in terms of GR, but\n # is sort of glossed over in the current scheme because we are dropping\n # distances anyway.\n return from_coo.transform_to(ICRS()).transform_to(to_frame)\n\n\n# Now the GCRS-related transforms to/from ICRS\n\n@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, GCRS)\ndef icrs_to_gcrs(icrs_coo, gcrs_frame):\n # first set up the astrometry context for ICRS<->GCRS. There are a few steps...\n astrom = erfa_astrom.get().apcs(gcrs_frame)\n\n if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one:\n # if no distance, just do the infinite-distance/no parallax calculation\n usrepr = icrs_coo.represent_as(UnitSphericalRepresentation)\n i_ra = usrepr.lon.to_value(u.radian)\n i_dec = usrepr.lat.to_value(u.radian)\n gcrs_ra, gcrs_dec = atciqz(i_ra, i_dec, astrom)\n\n newrep = UnitSphericalRepresentation(lat=u.Quantity(gcrs_dec, u.radian, copy=False),\n lon=u.Quantity(gcrs_ra, u.radian, copy=False),\n copy=False)\n else:\n # When there is a distance, we first offset for parallax to get the\n # BCRS coordinate direction and *then* run the ERFA transform for no\n # parallax/PM. This ensures reversibility and is more sensible for\n # inside solar system objects\n astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,\n xyz_axis=-1, copy=False)\n newcart = icrs_coo.cartesian - astrom_eb\n\n srepr = newcart.represent_as(SphericalRepresentation)\n i_ra = srepr.lon.to_value(u.radian)\n i_dec = srepr.lat.to_value(u.radian)\n gcrs_ra, gcrs_dec = atciqz(i_ra, i_dec, astrom)\n\n newrep = SphericalRepresentation(lat=u.Quantity(gcrs_dec, u.radian, copy=False),\n lon=u.Quantity(gcrs_ra, u.radian, copy=False),\n distance=srepr.distance, copy=False)\n\n return gcrs_frame.realize_frame(newrep)\n\n\n@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,\n GCRS, ICRS)\ndef gcrs_to_icrs(gcrs_coo, icrs_frame):\n srepr = gcrs_coo.represent_as(SphericalRepresentation)\n gcrs_ra = srepr.lon.to_value(u.radian)\n gcrs_dec = srepr.lat.to_value(u.radian)\n\n # set up the astrometry context for ICRS<->GCRS and then convert to BCRS\n # coordinate direction\n astrom = erfa_astrom.get().apcs(gcrs_coo)\n\n i_ra, i_dec = aticq(gcrs_ra, gcrs_dec, astrom)\n\n if gcrs_coo.data.get_name() == 'unitspherical' or gcrs_coo.data.to_cartesian().x.unit == u.one:\n # if no distance, just use the coordinate direction to yield the\n # infinite-distance/no parallax answer\n newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),\n lon=u.Quantity(i_ra, u.radian, copy=False),\n copy=False)\n else:\n # When there is a distance, apply the parallax/offset to the SSB as the\n # last step - ensures round-tripping with the icrs_to_gcrs transform\n\n # the distance in intermedrep is *not* a real distance as it does not\n # include the offset back to the SSB\n intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),\n lon=u.Quantity(i_ra, u.radian, copy=False),\n distance=srepr.distance,\n copy=False)\n\n astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,\n xyz_axis=-1, copy=False)\n newrep = intermedrep + astrom_eb\n\n return icrs_frame.realize_frame(newrep)\n\n\n@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, GCRS)\ndef gcrs_to_gcrs(from_coo, to_frame):\n if (np.all(from_coo.obstime == to_frame.obstime) and\n np.all(from_coo.obsgeoloc == to_frame.obsgeoloc)):\n return to_frame.realize_frame(from_coo.data)\n else:\n # like CIRS, we do this self-transform via ICRS\n return from_coo.transform_to(ICRS()).transform_to(to_frame)\n\n\n@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, HCRS)\ndef gcrs_to_hcrs(gcrs_coo, hcrs_frame):\n\n if np.any(gcrs_coo.obstime != hcrs_frame.obstime):\n # if they GCRS obstime and HCRS obstime are not the same, we first\n # have to move to a GCRS where they are.\n frameattrs = gcrs_coo.get_frame_attr_names()\n frameattrs['obstime'] = hcrs_frame.obstime\n gcrs_coo = gcrs_coo.transform_to(GCRS(**frameattrs))\n\n srepr = gcrs_coo.represent_as(SphericalRepresentation)\n gcrs_ra = srepr.lon.to_value(u.radian)\n gcrs_dec = srepr.lat.to_value(u.radian)\n\n # set up the astrometry context for ICRS<->GCRS and then convert to ICRS\n # coordinate direction\n astrom = erfa_astrom.get().apcs(gcrs_coo)\n i_ra, i_dec = aticq(gcrs_ra, gcrs_dec, astrom)\n\n # convert to Quantity objects\n i_ra = u.Quantity(i_ra, u.radian, copy=False)\n i_dec = u.Quantity(i_dec, u.radian, copy=False)\n if gcrs_coo.data.get_name() == 'unitspherical' or gcrs_coo.data.to_cartesian().x.unit == u.one:\n # if no distance, just use the coordinate direction to yield the\n # infinite-distance/no parallax answer\n newrep = UnitSphericalRepresentation(lat=i_dec, lon=i_ra, copy=False)\n else:\n # When there is a distance, apply the parallax/offset to the\n # Heliocentre as the last step to ensure round-tripping with the\n # hcrs_to_gcrs transform\n\n # Note that the distance in intermedrep is *not* a real distance as it\n # does not include the offset back to the Heliocentre\n intermedrep = SphericalRepresentation(lat=i_dec, lon=i_ra,\n distance=srepr.distance,\n copy=False)\n\n # astrom['eh'] and astrom['em'] contain Sun to observer unit vector,\n # and distance, respectively. Shapes are (X) and (X,3), where (X) is the\n # shape resulting from broadcasting the shape of the times object\n # against the shape of the pv array.\n # broadcast em to eh and scale eh\n eh = astrom['eh'] * astrom['em'][..., np.newaxis]\n eh = CartesianRepresentation(eh, unit=u.au, xyz_axis=-1, copy=False)\n\n newrep = intermedrep.to_cartesian() + eh\n\n return hcrs_frame.realize_frame(newrep)\n\n\n_NEED_ORIGIN_HINT = (\"The input {0} coordinates do not have length units. This \"\n \"probably means you created coordinates with lat/lon but \"\n \"no distance. Heliocentric<->ICRS transforms cannot \"\n \"function in this case because there is an origin shift.\")\n\n\n@frame_transform_graph.transform(AffineTransform, HCRS, ICRS)\ndef hcrs_to_icrs(hcrs_coo, icrs_frame):\n # this is just an origin translation so without a distance it cannot go ahead\n if isinstance(hcrs_coo.data, UnitSphericalRepresentation):\n raise u.UnitsError(_NEED_ORIGIN_HINT.format(hcrs_coo.__class__.__name__))\n\n if hcrs_coo.data.differentials:\n from astropy.coordinates.solar_system import get_body_barycentric_posvel\n bary_sun_pos, bary_sun_vel = get_body_barycentric_posvel('sun',\n hcrs_coo.obstime)\n bary_sun_vel = bary_sun_vel.represent_as(CartesianDifferential)\n bary_sun_pos = bary_sun_pos.with_differentials(bary_sun_vel)\n\n else:\n from astropy.coordinates.solar_system import get_body_barycentric\n bary_sun_pos = get_body_barycentric('sun', hcrs_coo.obstime)\n bary_sun_vel = None\n\n return None, bary_sun_pos\n\n\n@frame_transform_graph.transform(AffineTransform, ICRS, HCRS)\ndef icrs_to_hcrs(icrs_coo, hcrs_frame):\n # this is just an origin translation so without a distance it cannot go ahead\n if isinstance(icrs_coo.data, UnitSphericalRepresentation):\n raise u.UnitsError(_NEED_ORIGIN_HINT.format(icrs_coo.__class__.__name__))\n\n if icrs_coo.data.differentials:\n from astropy.coordinates.solar_system import get_body_barycentric_posvel\n bary_sun_pos, bary_sun_vel = get_body_barycentric_posvel('sun',\n hcrs_frame.obstime)\n bary_sun_pos = -bary_sun_pos\n bary_sun_vel = -bary_sun_vel.represent_as(CartesianDifferential)\n bary_sun_pos = bary_sun_pos.with_differentials(bary_sun_vel)\n\n else:\n from astropy.coordinates.solar_system import get_body_barycentric\n bary_sun_pos = -get_body_barycentric('sun', hcrs_frame.obstime)\n bary_sun_vel = None\n\n return None, bary_sun_pos\n\n\n@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HCRS, HCRS)\ndef hcrs_to_hcrs(from_coo, to_frame):\n if np.all(from_coo.obstime == to_frame.obstime):\n return to_frame.realize_frame(from_coo.data)\n else:\n # like CIRS, we do this self-transform via ICRS\n return from_coo.transform_to(ICRS()).transform_to(to_frame)\n" ]
[ [ "numpy.random.seed", "numpy.linspace", "numpy.meshgrid", "numpy.arange", "numpy.random.RandomState", "numpy.logspace", "numpy.isnan", "numpy.cos", "numpy.sin", "numpy.diff", "numpy.random.rand", "numpy.testing.assert_allclose", "numpy.array", "numpy.vstack" ], [ "numpy.all", "numpy.any" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
davidbrochart/pythran
[ "24b6c8650fe99791a4091cbdc2c24686e86aa67c", "24b6c8650fe99791a4091cbdc2c24686e86aa67c", "24b6c8650fe99791a4091cbdc2c24686e86aa67c", "24b6c8650fe99791a4091cbdc2c24686e86aa67c", "24b6c8650fe99791a4091cbdc2c24686e86aa67c" ]
[ "pythran/tests/cases/lapl3d.py", "docs/papers/sc2013/hyantes_core.py", "pythran/tests/pydata/compute_mask.py", "pythran/tests/scipy/solve_toeplitz.py", "pythran/tests/cases/projection_simplex.py" ]
[ "#runas: import numpy as np ; N = 500 ; X = np.random.randn(N,N,3); laplacien(X)\n#pythran export laplacien(float64[][][3])\n\nimport numpy as np\ndef laplacien(image):\n out_image = np.abs(4*image[1:-1,1:-1] -\n image[0:-2,1:-1] - image[2:,1:-1] -\n image[1:-1,0:-2] - image[1:-1,2:])\n valmax = np.max(out_image)\n valmax = max(1.,valmax)+1.E-9\n out_image /= valmax\n return out_image\n", "#pythran export run(float, float, float, float, float, float, int, int, float [][])\nimport math\nfrom numpy import zeros\ndef run(xmin, ymin, xmax, ymax, step, range_, range_x, range_y, t):\n pt = zeros((range_x, range_y, 3))\n \"omp parallel for private(i,j,k,tmp)\"\n for i in xrange(range_x):\n for j in xrange(range_y):\n pt[i,j,0], pt[i,j,1] = (xmin+step*i)*180/math.pi, (ymin+step*j)*180/math.pi\n for k in xrange(t.shape[0]):\n tmp = 6368.* math.acos( math.cos(xmin+step*i)*math.cos( t[k,0] ) * math.cos((ymin+step*j)-t[k,1])+ math.sin(xmin+step*i)*math.sin(t[k,0]))\n if tmp < range_:\n pt[i,j,2]+= t[k,2] / (1+tmp)\n return pt\n", "#pythran export compute_mask(int[:,:], int[:,:])\n#runas import numpy as np; coords = np.array([[0, 0, 1, 1, 2, 2]]); indices = np.array([[0, 3, 2]]); compute_mask(coords, indices)\n\nimport numpy as np\ndef compute_mask(coords, indices): # pragma: no cover\n \"\"\"\n Gets the mask for the coords given the indices in slice format.\n\n Works with either start-stop ranges of matching indices into coords\n called \"pairs\" (start-stop pairs) or filters the mask directly, based\n on which is faster.\n\n Exploits the structure in sorted coords, which is that for a constant\n value of coords[i - 1], coords[i - 2] and so on, coords[i] is sorted.\n Concretely, ``coords[i, coords[i - 1] == v1 & coords[i - 2] = v2, ...]``\n is always sorted. It uses this sortedness to find sub-pairs for each\n dimension given the previous, and so on. This is efficient for small\n slices or ints, but not for large ones.\n\n After it detects that working with pairs is rather inefficient (or after\n going through each possible index), it constructs a filtered mask from the\n start-stop pairs.\n\n Parameters\n ----------\n coords : np.ndarray\n The coordinates of the array.\n indices : np.ndarray\n The indices in the form of slices such that indices[:, 0] are starts,\n indices[:, 1] are stops and indices[:, 2] are steps.\n\n Returns\n -------\n mask : np.ndarray\n The starts and stops in the mask.\n is_slice : bool\n Whether or not the array represents a continuous slice.\n\n Examples\n --------\n Let's create some mock coords and indices\n\n >>> import numpy as np\n >>> coords = np.array([[0, 0, 1, 1, 2, 2]])\n >>> indices = np.array([[0, 3, 2]]) # Equivalent to slice(0, 3, 2)\n\n Now let's get the mask. Notice that the indices of ``0`` and ``2`` are matched.\n\n >>> _compute_mask(coords, indices)\n (array([0, 1, 4, 5]), False)\n\n Now, let's try with a more \"continuous\" slice. Matches ``0`` and ``1``.\n\n >>> indices = np.array([[0, 2, 1]])\n >>> _compute_mask(coords, indices)\n (array([0, 4]), True)\n\n This is equivalent to mask being ``slice(0, 4, 1)``.\n \"\"\"\n # Set the initial mask to be the entire range of coordinates.\n starts = [0]\n stops = [coords.shape[1]]\n n_matches = coords.shape[1]\n\n i = 0\n while i < len(indices):\n # Guesstimate whether working with pairs is more efficient or\n # working with the mask directly.\n # One side is the estimate of time taken for binary searches\n # (n_searches * log(avg_length))\n # The other is an estimated time of a linear filter for the mask.\n n_pairs = len(starts)\n n_current_slices = _get_slice_len(indices[i]) * n_pairs + 2\n if n_current_slices * np.log(n_current_slices / max(n_pairs, 1)) > \\\n n_matches + n_pairs:\n break\n\n # For each of the pairs, search inside the coordinates for other\n # matching sub-pairs.\n # This gets the start-end coordinates in coords for each 'sub-array'\n # Which would come out of indexing a single integer.\n starts, stops, n_matches = _get_mask_pairs(starts, stops, coords[i], indices[i])\n\n i += 1\n\n # Combine adjacent pairs\n starts, stops = _join_adjacent_pairs(starts, stops)\n\n # If just one pair is left over, treat it as a slice.\n if i == len(indices) and len(starts) == 1:\n return np.array([starts[0], stops[0]]), True\n\n # Convert start-stop pairs into mask, filtering by remaining\n # coordinates.\n mask = _filter_pairs(starts, stops, coords[i:], indices[i:])\n\n return np.array(mask, dtype=np.intp), False\n\n\ndef _get_slice_len(idx):\n \"\"\"\n Get the number of elements in a slice.\n\n Parameters\n ----------\n idx : np.ndarray\n A (3,) shaped array containing start, stop, step\n\n Returns\n -------\n n : int\n The length of the slice.\n\n Examples\n --------\n >>> idx = np.array([5, 15, 5])\n >>> _get_slice_len(idx)\n 2\n \"\"\"\n start, stop, step = idx[0], idx[1], idx[2]\n\n if step > 0:\n return (stop - start + step - 1) // step\n else:\n return (start - stop - step - 1) // (-step)\n\n\ndef _get_mask_pairs(starts_old, stops_old, c, idx): # pragma: no cover\n \"\"\"\n Gets the pairs for a following dimension given the pairs for\n a dimension.\n\n For each pair, it searches in the following dimension for\n matching coords and returns those.\n\n The total combined length of all pairs is returned to\n help with the performance guesstimate.\n\n Parameters\n ----------\n starts_old, stops_old : list[int]\n The starts and stops from the previous index.\n c : np.ndarray\n The coords for this index's dimension.\n idx : np.ndarray\n The index in the form of a slice.\n idx[0], idx[1], idx[2] = start, stop, step\n\n Returns\n -------\n starts, stops: list\n The starts and stops after applying the current index.\n n_matches : int\n The sum of elements in all ranges.\n\n Examples\n --------\n >>> c = np.array([1, 2, 1, 2, 1, 1, 2, 2])\n >>> starts_old = [4]\n >>> stops_old = [8]\n >>> idx = np.array([1, 2, 1])\n >>> _get_mask_pairs(starts_old, stops_old, c, idx)\n ([4], [6], 2)\n \"\"\"\n starts = []\n stops = []\n n_matches = 0\n\n for j in range(len(starts_old)):\n # For each matching \"integer\" in the slice, search within the \"sub-coords\"\n # Using binary search.\n for p_match in range(idx[0], idx[1], idx[2]):\n start = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match) + starts_old[j]\n stop = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match + 1) + starts_old[j]\n\n if start != stop:\n starts.append(start)\n stops.append(stop)\n n_matches += stop - start\n\n return starts, stops, n_matches\n\ndef _join_adjacent_pairs(starts_old, stops_old): # pragma: no cover\n \"\"\"\n Joins adjacent pairs into one. For example, 2-5 and 5-7\n will reduce to 2-7 (a single pair). This may help in\n returning a slice in the end which could be faster.\n\n Parameters\n ----------\n starts_old, stops_old : list[int]\n The input starts and stops\n\n Returns\n -------\n starts, stops : list[int]\n The reduced starts and stops.\n\n Examples\n --------\n >>> starts = [2, 5]\n >>> stops = [5, 7]\n >>> _join_adjacent_pairs(starts, stops)\n ([2], [7])\n \"\"\"\n if len(starts_old) <= 1:\n return starts_old, stops_old\n\n starts = [starts_old[0]]\n stops = []\n\n for i in range(1, len(starts_old)):\n if starts_old[i] != stops_old[i - 1]:\n starts.append(starts_old[i])\n stops.append(stops_old[i - 1])\n\n stops.append(stops_old[-1])\n\n return starts, stops\n\ndef _filter_pairs(starts, stops, coords, indices): # pragma: no cover\n \"\"\"\n Converts all the pairs into a single integer mask, additionally filtering\n by the indices.\n\n Parameters\n ----------\n starts, stops : list[int]\n The starts and stops to convert into an array.\n coords : np.ndarray\n The coordinates to filter by.\n indices : np.ndarray\n The indices in the form of slices such that indices[:, 0] are starts,\n indices[:, 1] are stops and indices[:, 2] are steps.\n\n Returns\n -------\n mask : list\n The output integer mask.\n\n Examples\n --------\n >>> import numpy as np\n >>> starts = [2]\n >>> stops = [7]\n >>> coords = np.array([[0, 1, 2, 3, 4, 5, 6, 7]])\n >>> indices = np.array([[2, 8, 2]]) # Start, stop, step pairs\n >>> _filter_pairs(starts, stops, coords, indices)\n [2, 4, 6]\n \"\"\"\n mask = []\n\n # For each pair,\n for i in range(len(starts)):\n # For each element match within the pair range\n for j in range(starts[i], stops[i]):\n match = True\n\n # Check if it matches all indices\n for k in range(len(indices)):\n idx = indices[k]\n elem = coords[k, j]\n\n match &= ((elem - idx[0]) % idx[2] == 0 and\n ((idx[2] > 0 and idx[0] <= elem < idx[1])\n or (idx[2] < 0 and idx[0] >= elem > idx[1])))\n\n # and append to the mask if so.\n if match:\n mask.append(j)\n\n return mask\n", "# Author: Robert T. McGibbon, December 2014\n\nfrom numpy import zeros, asarray, complex128, float64, zeros_like\n\n#pythran export levinson(float64[], float64[])\n#pythran export levinson(complex128[], complex128[])\n#runas import numpy as np; x = np.arange(1, 4.)*7; y = np.arange(-11., -9.)\n\ndef levinson(a, b):\n \"\"\"Solve a linear Toeplitz system using Levinson recursion.\n\n Parameters\n ----------\n a : array, dtype=double or complex128, shape=(2n-1,)\n The first column of the matrix in reverse order (without the diagonal)\n followed by the first (see below)\n b : array, dtype=double or complex128, shape=(n,)\n The right hand side vector. Both a and b must have the same type\n (double or complex128).\n\n Notes\n -----\n For example, the 5x5 toeplitz matrix below should be represented as\n the linear array ``a`` on the right ::\n\n [ a0 a1 a2 a3 a4 ]\n [ a-1 a0 a1 a2 a3 ]\n [ a-2 a-1 a0 a1 a2 ] -> [a-4 a-3 a-2 a-1 a0 a1 a2 a3 a4]\n [ a-3 a-2 a-1 a0 a1 ]\n [ a-4 a-3 a-2 a-1 a0 ]\n\n Returns\n -------\n x : arrray, shape=(n,)\n The solution vector\n reflection_coeff : array, shape=(n+1,)\n Toeplitz reflection coefficients. When a is symmetric Toeplitz and\n ``b`` is ``a[n:]``, as in the solution of autoregressive systems,\n then ``reflection_coeff`` also correspond to the partial\n autocorrelation function.\n \"\"\"\n # Adapted from toeplitz.f90 by Alan Miller, accessed at\n # http://jblevins.org/mirror/amiller/toeplitz.f90\n # Released under a Public domain declaration.\n\n n = b.shape[0]\n x = zeros_like(b) # result\n g = zeros_like(b) # workspace\n h = zeros_like(b) # workspace\n reflection_coeff = zeros(n+1, dtype=b.dtype) # history\n assert len(a) == (2*n) - 1\n\n if a[n-1] == 0:\n raise ValueError('Singular principal minor')\n\n x[0] = b[0] / a[n-1]\n reflection_coeff[0] = 1\n reflection_coeff[1] = x[0]\n\n if (n == 1):\n return asarray(x), asarray(reflection_coeff)\n\n g[0] = a[n-2] / a[n-1]\n h[0] = a[n] / a[n-1]\n\n for m in range(1, n):\n # Compute numerator and denominator of x[m]\n x_num = -b[m]\n x_den = -a[n-1]\n for j in range(m):\n nmj = n + m - (j+1)\n x_num = x_num + a[nmj] * x[j]\n x_den = x_den + a[nmj] * g[m-j-1]\n if x_den == 0:\n raise ValueError('Singular principal minor')\n x[m] = x_num / x_den\n reflection_coeff[m+1] = x[m]\n\n # Compute x\n for j in range(m):\n x[j] = x[j] - x[m] * g[m-j-1]\n if m == n-1:\n return asarray(x), asarray(reflection_coeff)\n\n # Compute the numerator and denominator of g[m] and h[m]\n g_num = -a[n-m-2]\n h_num = -a[n+m]\n g_den = -a[n-1]\n for j in range(m):\n g_num = g_num + a[n+j-m-1] * g[j]\n h_num = h_num + a[n+m-j-1] * h[j]\n g_den = g_den + a[n+j-m-1] * h[m-j-1]\n\n if g_den == 0.0:\n raise ValueError(\"Singular principal minor\")\n\n # Compute g and h\n g[m] = g_num / g_den\n h[m] = h_num / x_den\n k = m - 1\n m2 = (m + 1) >> 1\n c1 = g[m]\n c2 = h[m]\n for j in range(m2):\n gj = g[j]\n gk = g[k]\n hj = h[j]\n hk = h[k]\n g[j] = gj - (c1 * hk)\n g[k] = gk - (c1 * hj)\n h[j] = hj - (c2 * gk)\n h[k] = hk - (c2 * gj)\n k -= 1\n", "#from https://gist.github.com/mblondel/c99e575a5207c76a99d714e8c6e08e89\n#pythran export projection_simplex(float[], int)\n#runas import numpy as np; np.random.seed(0); x = np.random.rand(10); projection_simplex(x, 1)\n\nimport numpy as np\ndef projection_simplex(v, z=1):\n \"\"\"\n Old implementation for test and benchmark purposes.\n The arguments v and z should be a vector and a scalar, respectively.\n \"\"\"\n n_features = v.shape[0]\n u = np.sort(v)[::-1]\n cssv = np.cumsum(u) - z\n ind = np.arange(n_features) + 1\n cond = u - cssv / ind > 0\n rho = ind[cond][-1]\n theta = cssv[cond][-1] / float(rho)\n w = np.maximum(v - theta, 0)\n return w\n" ]
[ [ "numpy.max", "numpy.abs" ], [ "numpy.zeros" ], [ "numpy.array", "numpy.searchsorted" ], [ "numpy.asarray", "numpy.zeros_like", "numpy.zeros" ], [ "numpy.arange", "numpy.maximum", "numpy.cumsum", "numpy.sort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
richardrl/ray
[ "cf53b351471716e7bfa71d36368ebea9b0e219c5" ]
[ "rllib/agents/dqn/tests/test_dqn.py" ]
[ "import numpy as np\nimport unittest\n\nimport ray\nimport ray.rllib.agents.dqn as dqn\nfrom ray.rllib.utils.framework import try_import_tf\nfrom ray.rllib.utils.test_utils import check, framework_iterator, \\\n check_compute_action\n\ntf = try_import_tf()\n\n\nclass TestDQN(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n ray.init()\n\n @classmethod\n def tearDownClass(cls) -> None:\n ray.shutdown()\n\n def test_dqn_compilation(self):\n \"\"\"Test whether a DQNTrainer can be built on all frameworks.\"\"\"\n config = dqn.DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 2\n num_iterations = 1\n\n for fw in framework_iterator(config):\n # Double-dueling DQN.\n plain_config = config.copy()\n trainer = dqn.DQNTrainer(config=plain_config, env=\"CartPole-v0\")\n for i in range(num_iterations):\n results = trainer.train()\n print(results)\n\n check_compute_action(trainer)\n\n # Rainbow.\n # TODO(sven): Add torch once DQN-torch supports distributional-Q.\n if fw == \"torch\":\n continue\n rainbow_config = config.copy()\n rainbow_config[\"num_atoms\"] = 10\n rainbow_config[\"noisy\"] = True\n rainbow_config[\"double_q\"] = True\n rainbow_config[\"dueling\"] = True\n rainbow_config[\"n_step\"] = 5\n trainer = dqn.DQNTrainer(config=rainbow_config, env=\"CartPole-v0\")\n for i in range(num_iterations):\n results = trainer.train()\n print(results)\n\n check_compute_action(trainer)\n\n def test_dqn_exploration_and_soft_q_config(self):\n \"\"\"Tests, whether a DQN Agent outputs exploration/softmaxed actions.\"\"\"\n config = dqn.DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 0 # Run locally.\n config[\"env_config\"] = {\"is_slippery\": False, \"map_name\": \"4x4\"}\n obs = np.array(0)\n\n # Test against all frameworks.\n for _ in framework_iterator(config):\n # Default EpsilonGreedy setup.\n trainer = dqn.DQNTrainer(config=config, env=\"FrozenLake-v0\")\n # Setting explore=False should always return the same action.\n a_ = trainer.compute_action(obs, explore=False)\n for _ in range(50):\n a = trainer.compute_action(obs, explore=False)\n check(a, a_)\n # explore=None (default: explore) should return different actions.\n actions = []\n for _ in range(50):\n actions.append(trainer.compute_action(obs))\n check(np.std(actions), 0.0, false=True)\n\n # Low softmax temperature. Behaves like argmax\n # (but no epsilon exploration).\n config[\"exploration_config\"] = {\n \"type\": \"SoftQ\",\n \"temperature\": 0.000001\n }\n trainer = dqn.DQNTrainer(config=config, env=\"FrozenLake-v0\")\n # Due to the low temp, always expect the same action.\n actions = [trainer.compute_action(obs)]\n for _ in range(50):\n actions.append(trainer.compute_action(obs))\n check(np.std(actions), 0.0, decimals=3)\n\n # Higher softmax temperature.\n config[\"exploration_config\"][\"temperature\"] = 1.0\n trainer = dqn.DQNTrainer(config=config, env=\"FrozenLake-v0\")\n\n # Even with the higher temperature, if we set explore=False, we\n # should expect the same actions always.\n a_ = trainer.compute_action(obs, explore=False)\n for _ in range(50):\n a = trainer.compute_action(obs, explore=False)\n check(a, a_)\n\n # Due to the higher temp, expect different actions avg'ing\n # around 1.5.\n actions = []\n for _ in range(300):\n actions.append(trainer.compute_action(obs))\n check(np.std(actions), 0.0, false=True)\n\n # With Random exploration.\n config[\"exploration_config\"] = {\"type\": \"Random\"}\n config[\"explore\"] = True\n trainer = dqn.DQNTrainer(config=config, env=\"FrozenLake-v0\")\n actions = []\n for _ in range(300):\n actions.append(trainer.compute_action(obs))\n check(np.std(actions), 0.0, false=True)\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n sys.exit(pytest.main([\"-v\", __file__]))\n" ]
[ [ "numpy.std", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
oliver0922/yolo3dstereo
[ "24c37c4574eedd85593a0060b7c317b3e08c0460" ]
[ "visualDet3D/data/kitti/kittidata.py" ]
[ "'''\nFile Created: Sunday, 17th March 2019 3:58:52 pm\nAuthor: Peng YUN ([email protected])\nCopyright 2018 - 2019 RAM-Lab, RAM-Lab\n'''\nimport os\nimport math\nimport numpy as np\nfrom numpy.linalg import inv\nfrom .utils import read_image, read_pc_from_bin, _lidar2leftcam, _leftcam2lidar, _leftcam2imgplane\n# KITTI\nclass KittiCalib:\n '''\n class storing KITTI calib data\n self.data(None/dict):keys: 'P0', 'P1', 'P2', 'P3', 'R0_rect', 'Tr_velo_to_cam', 'Tr_imu_to_velo'\n self.R0_rect(np.array): [4,4]\n self.Tr_velo_to_cam(np.array): [4,4]\n '''\n def __init__(self, calib_path):\n self.path = calib_path\n self.data = None\n\n def read_calib_file(self):\n '''\n read KITTI calib file\n '''\n calib = dict()\n with open(self.path, 'r') as f:\n str_list = f.readlines()\n str_list = [itm.rstrip() for itm in str_list if itm != '\\n']\n for itm in str_list:\n calib[itm.split(':')[0]] = itm.split(':')[1]\n for k, v in calib.items():\n calib[k] = [float(itm) for itm in v.split()]\n self.data = calib\n\n self.P2 = np.array(self.data['P2']).reshape(3,4)\n self.P3 = np.array(self.data['P3']).reshape(3,4)\n\n R0_rect = np.zeros([4, 4])\n R0_rect[0:3, 0:3] = np.array(self.data['R0_rect']).reshape(3, 3)\n R0_rect[3, 3] = 1\n self.R0_rect = R0_rect\n\n Tr_velo_to_cam = np.zeros([4, 4])\n Tr_velo_to_cam[0:3, :] = np.array(self.data['Tr_velo_to_cam']).reshape(3, 4)\n Tr_velo_to_cam[3, 3] = 1\n self.Tr_velo_to_cam = Tr_velo_to_cam\n\n return self\n \n def leftcam2lidar(self, pts):\n '''\n transform the pts from the left camera frame to lidar frame\n pts_lidar = Tr_velo_to_cam^{-1} @ R0_rect^{-1} @ pts_cam\n inputs:\n pts(np.array): [#pts, 3]\n points in the left camera frame\n '''\n if self.data is None:\n print(\"read_calib_file should be read first\")\n raise RuntimeError\n return _leftcam2lidar(pts, self.Tr_velo_to_cam, self.R0_rect)\n\n def lidar2leftcam(self, pts):\n '''\n transform the pts from the lidar frame to the left camera frame\n pts_cam = R0_rect @ Tr_velo_to_cam @ pts_lidar\n inputs:\n pts(np.array): [#pts, 3]\n points in the lidar frame\n '''\n if self.data is None:\n print(\"read_calib_file should be read first\")\n raise RuntimeError\n return _lidar2leftcam(pts, self.Tr_velo_to_cam, self.R0_rect)\n\n def leftcam2imgplane(self, pts):\n '''\n project the pts from the left camera frame to left camera plane\n pixels = P2 @ pts_cam\n inputs:\n pts(np.array): [#pts, 3]\n points in the left camera frame\n '''\n if self.data is None:\n print(\"read_calib_file should be read first\")\n raise RuntimeError\n return _leftcam2imgplane(pts, self.P2)\n\nclass KittiLabel:\n '''\n class storing KITTI 3d object detection label\n self.data ([KittiObj])\n '''\n def __init__(self, label_path=None):\n self.path = label_path\n self.data = None\n\n def read_label_file(self, no_dontcare=True):\n '''\n read KITTI label file\n '''\n self.data = []\n with open(self.path, 'r') as f:\n str_list = f.readlines()\n str_list = [itm.rstrip() for itm in str_list if itm != '\\n']\n for s in str_list:\n self.data.append(KittiObj(s))\n if no_dontcare:\n self.data = list(filter(lambda obj: obj.type != \"DontCare\", self.data))\n return self\n #self.data=[Kittiobj(pedestrain 0,3 ,...),Kittiobj(car 0,6,53...)]\n\n def __str__(self):\n '''\n TODO: Unit TEST\n '''\n s = ''\n for obj in self.data:\n s += obj.__str__() + '\\n'\n return s\n\n def equal(self, label, acc_cls, rtol):\n '''\n equal oprator for KittiLabel\n inputs:\n label: KittiLabel\n acc_cls: list [str]\n ['Car', 'Van']\n eot: float\n Notes: O(N^2)\n '''\n if len(self.data) != len(label.data):\n return False\n if len(self.data) == 0:\n return True\n bool_list = []\n for obj1 in self.data:\n bool_obj1 = False\n for obj2 in label.data:\n bool_obj1 = bool_obj1 or obj1.equal(obj2, acc_cls, rtol)\n bool_list.append(bool_obj1)\n return any(bool_list)\n\n def isempty(self):\n '''\n return True if self.data = None or self.data = []\n '''\n return self.data is None or len(self.data) == 0\n\nclass KittiObj():\n '''\n class storing a KITTI 3d object\n '''\n def __init__(self, s=None):\n self.type = None\n self.truncated = None\n self.occluded = None\n self.alpha = None\n self.bbox_l = None\n self.bbox_t = None\n self.bbox_r = None\n self.bbox_b = None\n self.h = None\n self.w = None\n self.l = None\n self.x = None\n self.y = None\n self.z = None\n self.ry = None\n self.score = None\n if s is None:\n return\n if len(s.split()) == 15: # data\n self.truncated, self.occluded, self.alpha,\\\n self.bbox_l, self.bbox_t, self.bbox_r, self.bbox_b, \\\n self.h, self.w, self.l, self.x, self.y, self.z, self.ry = \\\n [float(itm) for itm in s.split()[1:]]\n self.type = s.split()[0]\n elif len(s.split()) == 16: # result\n self.truncated, self.occluded, self.alpha,\\\n self.bbox_l, self.bbox_t, self.bbox_r, self.bbox_b, \\\n self.h, self.w, self.l, self.x, self.y, self.z, self.ry, self.score = \\\n [float(itm) for itm in s.split()[1:]]\n self.type = s.split()[0]\n else:\n raise NotImplementedError\n\n def __str__(self):\n if self.score is None:\n return \"{} {:.2f} {} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}\".format(\n self.type, self.truncated, int(self.occluded), self.alpha,\\\n self.bbox_l, self.bbox_t, self.bbox_r, self.bbox_b, \\\n self.h, self.w, self.l, self.x, self.y, self.z, self.ry)\n else:\n return \"{} {:.2f} {} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}\".format(\n self.type, self.truncated, int(self.occluded), self.alpha,\\\n self.bbox_l, self.bbox_t, self.bbox_r, self.bbox_b, \\\n self.h, self.w, self.l, self.x, self.y, self.z, self.ry, self.score)\n\nclass KittiData:\n '''\n class storing a frame of KITTI data\n '''\n def __init__(self, root_dir, idx, output_dict=None):\n '''\n inputs:\n root_dir(str): kitti dataset dir\n idx(str %6d): data index e.g. \"000000\"\n output_dict: decide what to output\n '''\n self.calib_path = os.path.join(root_dir, \"calib\", idx+'.txt')\n self.image2_path = os.path.join(root_dir, \"image_2\", idx+'.png')\n self.image3_path = os.path.join(root_dir, 'image_3', idx+'.png')\n self.label2_path = os.path.join(root_dir, \"label_2\", idx+'.txt')\n self.velodyne_path = os.path.join(root_dir, \"velodyne\", idx+'.bin')\n self.output_dict = output_dict\n if self.output_dict is None:\n self.output_dict = {\n \"calib\": True,\n \"image\": True,\n \"image_3\": False,\n \"label\": True,\n \"velodyne\": True\n }\n\n def read_data(self):\n '''\n read data\n returns:\n calib(KittiCalib)\n image(np.array): [w, h, 3]\n label(KittiLabel)\n pc(np.array): [# of points, 4]\n point cloud in lidar frame.\n [x, y, z]\n ^x\n |\n y<----.z\n '''\n \n calib = KittiCalib(self.calib_path).read_calib_file() if self.output_dict[\"calib\"] else None\n image = read_image(self.image2_path) if self.output_dict[\"image\"] else None\n label = KittiLabel(self.label2_path).read_label_file() if self.output_dict[\"label\"] else None\n pc = read_pc_from_bin(self.velodyne_path) if self.output_dict[\"velodyne\"] else None\n if 'image_3' in self.output_dict and self.output_dict['image_3']:\n image_3 = read_image(self.image3_path) if self.output_dict[\"image_3\"] else None\n\n return calib, image, image_3, label, pc\n else:\n return calib, image, label, pc\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
side-projects-42/INTERVIEW-PREP-COMPLETE
[ "627a3315cee4bbc38a0e81c256f27f928eac2d63" ]
[ "notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/_Another-One/sorts/random_normaldistribution_quicksort.py" ]
[ "from __future__ import print_function\nfrom random import randint\nfrom tempfile import TemporaryFile\nimport numpy as np\nimport math\n\n\ndef _inPlaceQuickSort(A, start, end):\n count = 0\n if start < end:\n pivot = randint(start, end)\n temp = A[end]\n A[end] = A[pivot]\n A[pivot] = temp\n\n p, count = _inPlacePartition(A, start, end)\n count += _inPlaceQuickSort(A, start, p - 1)\n count += _inPlaceQuickSort(A, p + 1, end)\n return count\n\n\ndef _inPlacePartition(A, start, end):\n\n count = 0\n pivot = randint(start, end)\n temp = A[end]\n A[end] = A[pivot]\n A[pivot] = temp\n newPivotIndex = start - 1\n for index in range(start, end):\n\n count += 1\n if A[index] < A[end]: # check if current val is less than pivot value\n newPivotIndex = newPivotIndex + 1\n temp = A[newPivotIndex]\n A[newPivotIndex] = A[index]\n A[index] = temp\n\n temp = A[newPivotIndex + 1]\n A[newPivotIndex + 1] = A[end]\n A[end] = temp\n return newPivotIndex + 1, count\n\n\noutfile = TemporaryFile()\np = 100 # 1000 elements are to be sorted\n\n\nmu, sigma = 0, 1 # mean and standard deviation\nX = np.random.normal(mu, sigma, p)\nnp.save(outfile, X)\nprint(\"The array is\")\nprint(X)\n\n\noutfile.seek(0) # using the same array\nM = np.load(outfile)\nr = len(M) - 1\nz = _inPlaceQuickSort(M, 0, r)\n\nprint(\n \"No of Comparisons for 100 elements selected from a standard normal distribution is :\"\n)\nprint(z)\n" ]
[ [ "numpy.load", "numpy.random.normal", "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
itayhubara/AcceleratedSparseNeuralTraining
[ "425897dec9c7ef185841d7000c4418ebb1c95896" ]
[ "prune/pruning_method_utils.py" ]
[ "import torch\n\n\ndef validate_tensor_shape_2d_4d(t):\n shape = t.shape\n if len(shape) not in (2, 4):\n raise ValueError(\n \"Only 2D and 4D tensor shapes are supported. Found \"\n \"Found tensor of shape {} with {} dims\".format(shape, len(shape))\n )\n\n\ndef pad_inner_dims(t, pad_to):\n \"\"\" return padded-to-block tensor \"\"\"\n inner_flattened = t.view(t.shape[0], -1)\n co, inners = inner_flattened.shape\n pad_required = pad_to > 1 and inners % pad_to != 0\n pad_size = pad_to - inners % pad_to if pad_required else 0\n pad = torch.zeros(co, pad_size).to(inner_flattened.data)\n t_padded = torch.cat((inner_flattened, pad), 1)\n return t_padded\n\n\ndef clip_padding(t, orig_shape):\n \"\"\" return tensor with clipped padding \"\"\"\n co = orig_shape[0]\n inners = 1\n for s in orig_shape[1:]:\n inners *= s\n t_clipped = t.view(co, -1)[:, :inners]\n return t_clipped\n\n\ndef permute_to_nhwc(t):\n \"\"\" for 4D tensors, convert data layout from NCHW to NHWC \"\"\"\n res = t.permute(0, 2, 3, 1).contiguous() if t.dim() == 4 else t\n return res\n\n\ndef permute_to_nchw(t):\n \"\"\" for 4D tensors, convert data layout from NHWC to NCHW \"\"\"\n res = t.permute(0, 3, 1, 2).contiguous() if t.dim() == 4 else t\n return res\n" ]
[ [ "torch.zeros", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SamIlic/Web-Scraping
[ "fae1e0b000adda18abff44e4c60fbad77e872314" ]
[ "ERC/ERC/spiders/ICOBench.py" ]
[ "# -*- coding: utf-8 -*-\nimport scrapy # needed to scrape\nimport xlrd # used to easily import xlsx file \nimport json\nimport re\nimport pandas as pd\nimport numpy as np\nfrom openpyxl import load_workbook\nimport datetime\n#from datetime import timedelta\n\n\n\n\nclass ICObench(scrapy.Spider):\n name = 'ICOBench' # Name of Script\n \n # file_path = r\"/Users/YoungFreeesh/Visual Studio Code/_Python/Web Scraping/ERC/Data/COINS.xlsx\"\n file_path = r\"/Users/YoungFreeesh/Visual Studio Code/_Python/Web Scraping/ERC/Data/MASTER-1000.xlsx\"\n MASTER_Coin_df = pd.read_excel(file_path, sheet_name = \"Summary\") # read all data from \"Top ERC-20\"\n headers = list(MASTER_Coin_df.columns.values) # get the headers --> ERC-20 Token, Ticker, ID, CoinMarketCap URL, Market Cap (yyyy-mm-dd) \n \n # URLs\n URLs = MASTER_Coin_df['CoinMarketCap URL']\n temp_urls = URLs.values.tolist()\n temp_urls = [url + \"historical-data/\" for url in temp_urls]\n\n\n\n\n print(\"``````````````````````````````````````````````````````````````````````````````\")\n # start_urls = ['https://icobench.com/ico/tezos'] # TEST\n start_urls = temp_urls\n print(\"``````````````````````````````````````````````````````````````````````````````\")\n\n################################################################################################\n################################################################################################\n\n \"\"\"\n Scrape data from ICO-bench for all cryptos in MASTER-1000\n \"\"\"\n def parse(self, response):\n self.logger.info('A response has arrived from %s', response.url)\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n \n # Get Ticker\n tickerTemp = response.css(\"body > div.container.main-section > div > div.col-lg-10.padding-top-1x > div.details-panel.flex-container.bottom-margin-2x > div.details-panel-item--header.flex-container > h1 > span::text\").extract()[0]\n \n\n\n\n # \"\"\"\n # Run after the Scrapy is done scraping/crawling\n # \"\"\"\n # def closed( self, reason ):\n # # Get Time stamp for market cap\n # timeStamp = str(datetime.datetime.today().strftime(' (%Y-%m-%d)')) # Today, as an Integer\n \n # ### Sanity Check\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n # print('Name: ',len(token_name_List))\n # print('Ticker: ',len(ticker_List))\n # print('Rolling Monthly: ', len(rolling_avg_volume_monthly_List))\n # print('Rolling Weekly: ', len(rolling_avg_volume_weekly_List))\n # print('Rank: ', len(scraped_rank_List))\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n \n # ### Append rolling average columns to MASTER_ERC_df\n # # Rolling_df = pd.DataFrame()\n # # Rolling_df['Ticker'] = ticker_List\n # # Rolling_df['Volume: Monthly Rolling Avg'] = rolling_avg_volume_monthly_List\n # # Rolling_df['Market Cap: Monthly Rolling Avg'] = rolling_avg_volume_weekly_List\n \n\n # Rolling_df = pd.DataFrame(list(zip(token_name_List,\n # ticker_List,\n # rolling_avg_volume_monthly_List,\n # rolling_avg_volume_weekly_List\n # )),\n # columns=['Name',\n # 'Ticker',\n # 'Daily Volume ($): Monthly Rolling Avg',\n # 'Daily Volume ($): Weekly Rolling Avg'],\n # index=scraped_rank_List)\n # Rolling_df.index.name = \"CMC Rank\" + timeStamp # Rename Index\n\n # # Sort DataFrame by Index\n # Rolling_df=Rolling_df.sort_index() # Sort by CMC Rank (index)\n\n # print(Rolling_df)\n\n # ### Create new Tab in \"MASTER ERC-20.xlsx\"\n # # fileName = \"MASTER ERC-20 Rolling\" #+ timeStamp\n # file_path_HardDrive = r\"/Users/YoungFreeesh/Visual Studio Code/_Python/Web Scraping/ERC/Data/MASTER-1000.xlsx\"\n # writer_HardDrive = pd.ExcelWriter(file_path_HardDrive, engine='openpyxl')\n\n # # Write to new sheet in existing workbook\n # book_HardDrive = load_workbook(file_path_HardDrive)\n # writer_HardDrive.book = book_HardDrive\n\n # # Write Sheet\n # Rolling_df.to_excel(writer_HardDrive, startrow= 0 , index=True, sheet_name= 'Rolling Averages' + timeStamp) # write to \"MASTER-Ercot.xlsx\" spreadsheet\n\n # writer_HardDrive.save()\n # writer_HardDrive.close()\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.read_excel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
BeylierMPG/Reinforcement-Learning
[ "17495386e36a27c5fc617e24221baf0fc5743c91" ]
[ "algos/models/ddqn_cnn.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.autograd as autograd \nimport torch.nn.functional as F\n\n\nclass DDQNCnn(nn.Module):\n def __init__(self, input_shape, num_actions):\n super(DDQNCnn, self).__init__()\n self.input_shape = input_shape\n self.num_actions = num_actions\n \n self.conv_1 = nn.Sequential(\n nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),\n nn.ReLU()\n )\n \n self.conv_2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n nn.ReLU()\n )\n \n self.conv_3 = nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=3, stride=1),\n nn.ReLU()\n )\n \n self.advantage = nn.Sequential(\n nn.Linear(3136, 512),\n nn.ReLU(),\n nn.Linear(512, self.num_actions)\n )\n\n self.value = nn.Sequential(\n nn.Linear(3136, 512),\n nn.ReLU(),\n nn.Linear(512, 1)\n )\n\n\n def forward(self, x):\n x = self.conv_1(x)\n x = self.conv_2(x)\n x = self.conv_3(x)\n x = x.view(x.size(0), -1)\n advantage = self.advantage(x)\n value = self.value(x)\n return value + advantage - advantage.mean()\n \n def feature_size(self):\n return self.features(autograd.Variable(torch.zeros(1, *self.input_shape))).view(1, -1).size(1)" ]
[ [ "torch.nn.Linear", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lepoeme20/vision
[ "eac3dc7bab436725b0ba65e556d3a6ffd43c24e1" ]
[ "torchvision/prototype/datasets/utils/_internal.py" ]
[ "import enum\nimport functools\nimport gzip\nimport io\nimport lzma\nimport mmap\nimport os\nimport os.path\nimport pathlib\nimport pickle\nimport platform\nfrom typing import BinaryIO\nfrom typing import (\n Sequence,\n Callable,\n Union,\n Any,\n Tuple,\n TypeVar,\n Iterator,\n Dict,\n Optional,\n IO,\n Sized,\n)\nfrom typing import cast\n\nimport numpy as np\nimport PIL.Image\nimport torch\nimport torch.distributed as dist\nimport torch.utils.data\nfrom torchdata.datapipes.iter import IoPathFileLister, IoPathFileLoader, IterDataPipe, ShardingFilter\nfrom torchdata.datapipes.utils import StreamWrapper\n\n\n__all__ = [\n \"INFINITE_BUFFER_SIZE\",\n \"BUILTIN_DIR\",\n \"read_mat\",\n \"image_buffer_from_array\",\n \"SequenceIterator\",\n \"MappingIterator\",\n \"Enumerator\",\n \"getitem\",\n \"path_accessor\",\n \"path_comparator\",\n \"Decompressor\",\n \"fromfile\",\n \"read_flo\",\n \"hint_sharding\",\n]\n\nK = TypeVar(\"K\")\nD = TypeVar(\"D\")\n\n# pseudo-infinite until a true infinite buffer is supported by all datapipes\nINFINITE_BUFFER_SIZE = 1_000_000_000\n\nBUILTIN_DIR = pathlib.Path(__file__).parent.parent / \"_builtin\"\n\n\ndef read_mat(buffer: io.IOBase, **kwargs: Any) -> Any:\n try:\n import scipy.io as sio\n except ImportError as error:\n raise ModuleNotFoundError(\"Package `scipy` is required to be installed to read .mat files.\") from error\n\n if isinstance(buffer, StreamWrapper):\n buffer = buffer.file_obj\n\n return sio.loadmat(buffer, **kwargs)\n\n\ndef image_buffer_from_array(array: np.ndarray, *, format: str = \"png\") -> io.BytesIO:\n image = PIL.Image.fromarray(array)\n buffer = io.BytesIO()\n image.save(buffer, format=format)\n buffer.seek(0)\n return buffer\n\n\nclass SequenceIterator(IterDataPipe[D]):\n def __init__(self, datapipe: IterDataPipe[Sequence[D]]):\n self.datapipe = datapipe\n\n def __iter__(self) -> Iterator[D]:\n for sequence in self.datapipe:\n yield from iter(sequence)\n\n\nclass MappingIterator(IterDataPipe[Union[Tuple[K, D], D]]):\n def __init__(self, datapipe: IterDataPipe[Dict[K, D]], *, drop_key: bool = False) -> None:\n self.datapipe = datapipe\n self.drop_key = drop_key\n\n def __iter__(self) -> Iterator[Union[Tuple[K, D], D]]:\n for mapping in self.datapipe:\n yield from iter(mapping.values() if self.drop_key else mapping.items())\n\n\nclass Enumerator(IterDataPipe[Tuple[int, D]]):\n def __init__(self, datapipe: IterDataPipe[D], start: int = 0) -> None:\n self.datapipe = datapipe\n self.start = start\n\n def __iter__(self) -> Iterator[Tuple[int, D]]:\n yield from enumerate(self.datapipe, self.start)\n\n\ndef _getitem_closure(obj: Any, *, items: Tuple[Any, ...]) -> Any:\n for item in items:\n obj = obj[item]\n return obj\n\n\ndef getitem(*items: Any) -> Callable[[Any], Any]:\n return functools.partial(_getitem_closure, items=items)\n\n\ndef _path_attribute_accessor(path: pathlib.Path, *, name: str) -> D:\n return cast(D, getattr(path, name))\n\n\ndef _path_accessor_closure(data: Tuple[str, Any], *, getter: Callable[[pathlib.Path], D]) -> D:\n return getter(pathlib.Path(data[0]))\n\n\ndef path_accessor(getter: Union[str, Callable[[pathlib.Path], D]]) -> Callable[[Tuple[str, Any]], D]:\n if isinstance(getter, str):\n getter = functools.partial(_path_attribute_accessor, name=getter)\n\n return functools.partial(_path_accessor_closure, getter=getter)\n\n\ndef _path_comparator_closure(data: Tuple[str, Any], *, accessor: Callable[[Tuple[str, Any]], D], value: D) -> bool:\n return accessor(data) == value\n\n\ndef path_comparator(getter: Union[str, Callable[[pathlib.Path], D]], value: D) -> Callable[[Tuple[str, Any]], bool]:\n return functools.partial(_path_comparator_closure, accessor=path_accessor(getter), value=value)\n\n\nclass CompressionType(enum.Enum):\n GZIP = \"gzip\"\n LZMA = \"lzma\"\n\n\nclass Decompressor(IterDataPipe[Tuple[str, io.IOBase]]):\n types = CompressionType\n\n _DECOMPRESSORS = {\n types.GZIP: lambda file: gzip.GzipFile(fileobj=file),\n types.LZMA: lambda file: lzma.LZMAFile(file),\n }\n\n def __init__(\n self,\n datapipe: IterDataPipe[Tuple[str, io.IOBase]],\n *,\n type: Optional[Union[str, CompressionType]] = None,\n ) -> None:\n self.datapipe = datapipe\n if isinstance(type, str):\n type = self.types(type.upper())\n self.type = type\n\n def _detect_compression_type(self, path: str) -> CompressionType:\n if self.type:\n return self.type\n\n # TODO: this needs to be more elaborate\n ext = os.path.splitext(path)[1]\n if ext == \".gz\":\n return self.types.GZIP\n elif ext == \".xz\":\n return self.types.LZMA\n else:\n raise RuntimeError(\"FIXME\")\n\n def __iter__(self) -> Iterator[Tuple[str, io.IOBase]]:\n for path, file in self.datapipe:\n type = self._detect_compression_type(path)\n decompressor = self._DECOMPRESSORS[type]\n yield path, decompressor(file)\n\n\nclass PicklerDataPipe(IterDataPipe):\n def __init__(self, source_datapipe: IterDataPipe[Tuple[str, IO[bytes]]]) -> None:\n self.source_datapipe = source_datapipe\n\n def __iter__(self) -> Iterator[Any]:\n for _, fobj in self.source_datapipe:\n data = pickle.load(fobj)\n for _, d in enumerate(data):\n yield d\n\n\nclass SharderDataPipe(torch.utils.data.datapipes.iter.grouping.ShardingFilterIterDataPipe):\n def __init__(self, source_datapipe: IterDataPipe) -> None:\n super().__init__(source_datapipe)\n self.rank = 0\n self.world_size = 1\n if dist.is_available() and dist.is_initialized():\n self.rank = dist.get_rank()\n self.world_size = dist.get_world_size()\n self.apply_sharding(self.world_size, self.rank)\n\n def __iter__(self) -> Iterator[Any]:\n num_workers = self.world_size\n worker_id = self.rank\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is not None:\n worker_id = worker_id + worker_info.id * num_workers\n num_workers *= worker_info.num_workers\n self.apply_sharding(num_workers, worker_id)\n yield from super().__iter__()\n\n\nclass TakerDataPipe(IterDataPipe):\n def __init__(self, source_datapipe: IterDataPipe, num_take: int) -> None:\n super().__init__()\n self.source_datapipe = source_datapipe\n self.num_take = num_take\n self.world_size = 1\n if dist.is_available() and dist.is_initialized():\n self.world_size = dist.get_world_size()\n\n def __iter__(self) -> Iterator[Any]:\n num_workers = self.world_size\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is not None:\n num_workers *= worker_info.num_workers\n\n # TODO: this is weird as it drops more elements than it should\n num_take = self.num_take // num_workers\n\n for i, data in enumerate(self.source_datapipe):\n if i < num_take:\n yield data\n else:\n break\n\n def __len__(self) -> int:\n num_take = self.num_take // self.world_size\n if isinstance(self.source_datapipe, Sized):\n if len(self.source_datapipe) < num_take:\n num_take = len(self.source_datapipe)\n # TODO: might be weird to not take `num_workers` into account\n return num_take\n\n\ndef _make_sharded_datapipe(root: str, dataset_size: int) -> IterDataPipe[Dict[str, Any]]:\n dp = IoPathFileLister(root=root)\n dp = SharderDataPipe(dp)\n dp = dp.shuffle(buffer_size=INFINITE_BUFFER_SIZE)\n dp = IoPathFileLoader(dp, mode=\"rb\")\n dp = PicklerDataPipe(dp)\n # dp = dp.cycle(2)\n dp = TakerDataPipe(dp, dataset_size)\n return dp\n\n\ndef _read_mutable_buffer_fallback(file: BinaryIO, count: int, item_size: int) -> bytearray:\n # A plain file.read() will give a read-only bytes, so we convert it to bytearray to make it mutable\n return bytearray(file.read(-1 if count == -1 else count * item_size))\n\n\ndef fromfile(\n file: BinaryIO,\n *,\n dtype: torch.dtype,\n byte_order: str,\n count: int = -1,\n) -> torch.Tensor:\n \"\"\"Construct a tensor from a binary file.\n\n .. note::\n\n This function is similar to :func:`numpy.fromfile` with two notable differences:\n\n 1. This function only accepts an open binary file, but not a path to it.\n 2. This function has an additional ``byte_order`` parameter, since PyTorch's ``dtype``'s do not support that\n concept.\n\n .. note::\n\n If the ``file`` was opened in update mode, i.e. \"r+b\" or \"w+b\", reading data is much faster. Be aware that as\n long as the file is still open, inplace operations on the returned tensor will reflect back to the file.\n\n Args:\n file (IO): Open binary file.\n dtype (torch.dtype): Data type of the underlying data as well as of the returned tensor.\n byte_order (str): Byte order of the data. Can be \"little\" or \"big\" endian.\n count (int): Number of values of the returned tensor. If ``-1`` (default), will read the complete file.\n \"\"\"\n byte_order = \"<\" if byte_order == \"little\" else \">\"\n char = \"f\" if dtype.is_floating_point else (\"i\" if dtype.is_signed else \"u\")\n item_size = (torch.finfo if dtype.is_floating_point else torch.iinfo)(dtype).bits // 8\n np_dtype = byte_order + char + str(item_size)\n\n buffer: Union[memoryview, bytearray]\n if platform.system() != \"Windows\":\n # PyTorch does not support tensors with underlying read-only memory. In case\n # - the file has a .fileno(),\n # - the file was opened for updating, i.e. 'r+b' or 'w+b',\n # - the file is seekable\n # we can avoid copying the data for performance. Otherwise we fall back to simply .read() the data and copy it\n # to a mutable location afterwards.\n try:\n buffer = memoryview(mmap.mmap(file.fileno(), 0))[file.tell() :]\n # Reading from the memoryview does not advance the file cursor, so we have to do it manually.\n file.seek(*(0, io.SEEK_END) if count == -1 else (count * item_size, io.SEEK_CUR))\n except (PermissionError, io.UnsupportedOperation):\n buffer = _read_mutable_buffer_fallback(file, count, item_size)\n else:\n # On Windows just trying to call mmap.mmap() on a file that does not support it, may corrupt the internal state\n # so no data can be read afterwards. Thus, we simply ignore the possible speed-up.\n buffer = _read_mutable_buffer_fallback(file, count, item_size)\n\n # We cannot use torch.frombuffer() directly, since it only supports the native byte order of the system. Thus, we\n # read the data with np.frombuffer() with the correct byte order and convert it to the native one with the\n # successive .astype() call.\n return torch.from_numpy(np.frombuffer(buffer, dtype=np_dtype, count=count).astype(np_dtype[1:], copy=False))\n\n\ndef read_flo(file: BinaryIO) -> torch.Tensor:\n if file.read(4) != b\"PIEH\":\n raise ValueError(\"Magic number incorrect. Invalid .flo file\")\n\n width, height = fromfile(file, dtype=torch.int32, byte_order=\"little\", count=2)\n flow = fromfile(file, dtype=torch.float32, byte_order=\"little\", count=height * width * 2)\n return flow.reshape((height, width, 2)).permute((2, 0, 1))\n\n\ndef hint_sharding(datapipe: IterDataPipe[D]) -> IterDataPipe[D]:\n return ShardingFilter(datapipe)\n" ]
[ [ "torch.distributed.is_initialized", "scipy.io.loadmat", "torch.utils.data.get_worker_info", "numpy.frombuffer", "torch.distributed.is_available", "torch.distributed.get_rank", "torch.distributed.get_world_size" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
USEPA/LCIAformatter
[ "e803dccf81b1f7d7441e576909cdac3f823b40ce" ]
[ "lciafmt/recipe.py" ]
[ "# recipe.py (lciafmt)\n# !/usr/bin/env python3\n# coding=utf-8\n\"\"\"\nThis module contains functions needed to compile LCIA methods from the\nReCiPe model\n\"\"\"\n\nimport pandas as pd\nimport openpyxl\n\nimport lciafmt.cache as cache\nimport lciafmt.df as dfutil\nimport lciafmt.xls as xls\n\nfrom .util import datapath, aggregate_factors_for_primary_contexts, log,\\\n format_cas\n\n\ncontexts = {\n 'urban air': 'air/urban',\n 'urban air': 'air/urban',\n 'Urban air': 'air/urban',\n 'Rural air': 'air/rural',\n 'rural air': 'air/rural',\n 'agricultural soil': 'soil/agricultural',\n 'Agricultural soil': 'soil/agricultural',\n 'industrial soil': 'soil/industrial',\n 'Industrial soil': 'soil/industrial',\n 'freshwater': 'water/freshwater',\n 'Freshwater': 'water/freshwater',\n 'fresh water': 'water/freshwater',\n 'seawater': 'water/sea water',\n 'sea water': 'water/sea water',\n 'Sea water': 'water/sea water',\n 'marine water': 'water/sea water'}\nflowables_split = pd.read_csv(datapath + 'ReCiPe2016_split.csv')\n\n\ndef get(add_factors_for_missing_contexts=True, endpoint=True,\n summary=False, file=None, url=None) -> pd.DataFrame:\n \"\"\"Generate a method for ReCiPe 2016 in standard format.\n\n :param add_factors_for_missing_contexts: bool, if True generates average\n factors for unspecified contexts\n :param endpoint: bool, if True generates endpoint indicators from midpoints\n :param summary: bool, if True aggregates endpoint methods into\n summary indicators\n :param file: str, alternate filepath for method, defaults to file stored\n in cache\n :param url: str, alternate url for method, defaults to url in method config\n :return: DataFrame of method in standard format\n \"\"\"\n log.info(\"getting method ReCiPe 2016\")\n f = file\n if f is None:\n fname = \"recipe_2016.xlsx\"\n if url is None:\n url = (\"http://www.rivm.nl/sites/default/files/2018-11/\" +\n \"ReCiPe2016_CFs_v1.1_20180117.xlsx\")\n f = cache.get_or_download(fname, url)\n df = _read(f)\n if add_factors_for_missing_contexts:\n log.info(\"adding average factors for primary contexts\")\n df = aggregate_factors_for_primary_contexts(df)\n\n if endpoint:\n endpoint_df, endpoint_df_by_flow = _read_endpoints(f)\n log.info(\"converting midpoints to endpoints\")\n # first assesses endpoint factors that are specific to flowables\n flowdf = df.merge(endpoint_df_by_flow, how=\"inner\",\n on=[\"Method\", \"Flowable\"])\n flowdf.rename(columns={'Indicator_x': 'Indicator',\n 'Indicator_y': 'EndpointIndicator'},\n inplace=True)\n # next apply endpoint factors by indicator\n df2 = df.merge(endpoint_df, how=\"inner\", on=[\"Method\", \"Indicator\"])\n df2 = df2.append(flowdf, ignore_index=True, sort=False)\n # reformat dataframe and apply conversion\n df2['Characterization Factor'] = df2['Characterization Factor'] * df2['EndpointConversion']\n df2['Method'] = df2['EndpointMethod']\n df2['Indicator'] = df2['EndpointIndicator']\n df2['Indicator unit'] = df2['EndpointUnit']\n df2.drop(columns=['EndpointMethod', 'EndpointIndicator',\n 'EndpointUnit', 'EndpointConversion'],\n inplace=True)\n df = df.append(df2, ignore_index=True, sort=False)\n\n log.info(\"handling manual replacements\")\n \"\"\"due to substances listed more than once with the same name but\n different CAS, this replaces all instances of the Original Flowable with\n a New Flowable based on a csv input file according to the CAS\"\"\"\n for index, row in flowables_split.iterrows():\n newCAS = format_cas(row['CAS'])\n newFlow = row['New Flowable']\n df.loc[df['CAS No'] == newCAS, 'Flowable'] = newFlow\n\n length = len(df)\n df.drop_duplicates(keep='first', inplace=True)\n length = length - len(df)\n log.info(f\"{length} duplicate entries removed\")\n\n if summary:\n log.info(\"summarizing endpoint categories\")\n endpoint_categories = df.groupby(['Method', 'Method UUID',\n 'Indicator unit', 'Flowable',\n 'Flow UUID', 'Context', 'Unit',\n 'CAS No', 'Location',\n 'Location UUID', 'EndpointCategory'],\n as_index=False)['Characterization Factor'].sum()\n endpoint_categories['Indicator'] = endpoint_categories['EndpointCategory']\n endpoint_categories['Indicator UUID'] = \"\"\n endpoint_categories.drop(columns=['EndpointCategory'], inplace=True)\n\n # To append endpoint categories to exisiting endpointLCIA,\n # set append = True, otherwise replaces endpoint LCIA\n append = False\n if append:\n log.info(\"appending endpoint categories\")\n df = pd.concat([df, endpoint_categories], sort=False)\n else:\n log.info(\"applying endpoint categories\")\n df = endpoint_categories\n\n # reorder columns in DF\n df = df.reindex(columns=[\"Method\", \"Method UUID\", \"Indicator\",\n \"Indicator UUID\", \"Indicator unit\", \"Flowable\",\n \"Flow UUID\", \"Context\", \"Unit\", \"CAS No\",\n \"Location\", \"Location UUID\",\n \"Characterization Factor\"])\n return df\n\n\ndef _read(file: str) -> pd.DataFrame:\n log.info(f\"read ReCiPe 2016 from file {file}\")\n wb = openpyxl.load_workbook(file, read_only=True, data_only=True)\n records = []\n for name in wb.sheetnames:\n if _eqstr(name, \"Version\") or _eqstr(\n name, \"Midpoint to endpoint factors\"):\n continue\n _read_mid_points(wb[name], records)\n\n return dfutil.data_frame(records)\n\n\ndef _read_endpoints(file: str) -> pd.DataFrame:\n log.info(f\"reading endpoint factors from file {file}\")\n wb = openpyxl.load_workbook(file, read_only=True, data_only=True)\n endpoint_cols = ['Method', 'EndpointMethod', 'EndpointIndicator',\n 'EndpointUnit', 'EndpointConversion']\n endpoint = pd.DataFrame(columns=endpoint_cols)\n endpoints = []\n perspectives = [\"I\", \"H\", \"E\"]\n indicator = \"\"\n indicator_unit = \"\"\n sheet = wb['Midpoint to endpoint factors']\n start_row, data_col, with_perspectives = _find_data_start(sheet)\n # impact categories in column 1\n flow_col = 0\n\n endpoint_factor_count = 0\n for row in sheet.iter_rows(min_row=start_row):\n indicator = xls.cell_str(row[flow_col])\n indicator_unit = xls.cell_str(row[flow_col+1])\n for i in range(0, 3):\n val = xls.cell_f64(row[data_col + i])\n if val == 0.0:\n continue\n endpoints.append(\"ReCiPe 2016 - Midpoint/\" + perspectives[i])\n endpoints.append(\"ReCiPe 2016 - Endpoint/\" + perspectives[i])\n endpoints.append(indicator)\n endpoints.append(indicator_unit)\n endpoints.append(val)\n to_add = pd.Series(endpoints, index=endpoint_cols)\n endpoint = endpoint.append(to_add, ignore_index=True)\n endpoints = []\n endpoint_factor_count += 1\n log.debug(\"extracted %i endpoint factors\", endpoint_factor_count)\n\n log.info(\"processing endpoint factors\")\n endpoint.loc[endpoint['EndpointUnit'].str.contains('daly', case=False), 'EndpointUnit'] = 'DALY'\n endpoint.loc[endpoint['EndpointUnit'].str.contains('species', case=False), 'EndpointUnit'] = 'species-year'\n endpoint.loc[endpoint['EndpointUnit'].str.contains('USD', case=False), 'EndpointUnit'] = 'USD2013'\n\n endpoint_map = pd.read_csv(datapath + 'ReCiPe2016_endpoint_to_midpoint.csv')\n endpoint = endpoint.merge(endpoint_map, how=\"left\", on='EndpointIndicator')\n\n # split into two dataframes\n endpoint_by_flow = endpoint[endpoint['FlowFlag'] == 1]\n endpoint_by_flow = endpoint_by_flow.drop(columns='FlowFlag')\n endpoint_by_flow.rename(columns={'EndpointIndicator': 'Flowable'},\n inplace=True)\n endpoint = endpoint[endpoint['FlowFlag'].isna()]\n endpoint = endpoint.drop(columns='FlowFlag')\n # return endpoint and endpoint by flow\n return endpoint, endpoint_by_flow\n\n\ndef _read_mid_points(sheet: openpyxl.worksheet.worksheet.Worksheet,\n records: list):\n log.debug(\"try to read midpoint factors from sheet %s\", sheet.title)\n\n start_row, data_col, with_perspectives = _find_data_start(sheet)\n if start_row < 0:\n log.debug(\"could not find a value column in sheet %s\", sheet.title)\n return\n\n flow_col = _find_flow_column(sheet)\n if flow_col < 0:\n return\n\n cas_col = _find_cas_column(sheet)\n indicator_unit, flow_unit, unit_col = _determine_units(sheet)\n compartment, compartment_col = _determine_compartments(sheet)\n\n perspectives = [\"I\", \"H\", \"E\"]\n factor_count = 0\n for row in sheet.iter_rows(min_row=start_row):\n if compartment_col > -1:\n compartment = xls.cell_str(row[compartment_col])\n if compartment in contexts:\n compartment = contexts[compartment]\n if unit_col > -1:\n flow_unit = xls.cell_str(row[unit_col])\n if \"/\" in flow_unit:\n flow_unit = flow_unit.split(\"/\")[1].strip()\n cas = \"\"\n if cas_col > -1:\n cas = format_cas(xls.cell_f64(row[cas_col]))\n\n if with_perspectives:\n for i in range(0, 3):\n val = xls.cell_f64(row[data_col + i])\n if val == 0.0:\n continue\n dfutil.record(records,\n method=\"ReCiPe 2016 - Midpoint/\" + perspectives[i],\n indicator=sheet.title,\n indicator_unit=indicator_unit,\n flow=xls.cell_str(row[flow_col]),\n flow_category=compartment,\n flow_unit=flow_unit,\n cas_number=cas,\n factor=val)\n factor_count += 1\n else:\n val = xls.cell_f64(row[data_col])\n if val == 0.0:\n continue\n for p in perspectives:\n dfutil.record(records,\n method=\"ReCiPe 2016 - Midpoint/\" + p,\n indicator=sheet.title,\n indicator_unit=indicator_unit,\n flow=xls.cell_str(row[flow_col]),\n flow_category=compartment,\n flow_unit=flow_unit,\n cas_number=cas,\n factor=val)\n factor_count += 1\n log.debug(\"extracted %i factors\", factor_count)\n\n\ndef _find_data_start(sheet: openpyxl.worksheet.worksheet.Worksheet) -> (int, int, bool):\n for row in sheet.iter_rows():\n for cell in row:\n s = xls.cell_str(cell)\n if s is None or s == \"\":\n continue\n if _eqstr(s, \"I\") or _containstr(s, \"Individualist\") or _containstr(s, \"Individualistic\"):\n return cell.row + 1, cell.column - 1, True\n if _eqstr(s, \"all perspectives\"):\n return cell.row + 1, cell.column - 1, False\n return -1, -1\n\n\ndef _find_flow_column(sheet: openpyxl.worksheet.worksheet.Worksheet) -> int:\n if _containstr(sheet.title, \"land\", \"occupation\"):\n ncol = 1\n return ncol\n ncol = -1\n for row in sheet.iter_rows():\n for cell in row:\n s = xls.cell_str(cell)\n if _containstr(s, \"name\") or _containstr(s, \"substance\"):\n ncol = cell.column - 1\n log.debug(\"identified column %i %s for flow names\", ncol, s)\n break\n if ncol < 0:\n log.debug(\"no 'name' column in %s, take col=0 for that\", sheet.title)\n ncol = 0\n return ncol\n\n\ndef _find_cas_column(sheet: openpyxl.worksheet.worksheet.Worksheet) -> int:\n ccol = -1\n for row in sheet.iter_rows():\n for cell in row:\n s = xls.cell_str(cell)\n if _eqstr(s, \"cas\"):\n ccol = cell.column - 1\n log.debug(\"identified column %i %s for CAS numbers\", ccol, s)\n break\n return ccol\n\n\ndef _determine_units(sheet: openpyxl.worksheet.worksheet.Worksheet) -> (str, str, int):\n indicator_unit = \"?\"\n flow_unit = \"?\"\n unit_col = -1\n row, col, _ = _find_data_start(sheet)\n row -= 2\n\n if row > 0:\n s = xls.cell_str(sheet.cell(row=row, column=col + 1))\n if s is not None and s != \"\":\n if \"/\" in s:\n parts = s.strip(\" ()\").split(\"/\")\n indicator_unit = parts[0].strip()\n flow_unit = parts[1].strip()\n else:\n indicator_unit = s.strip()\n\n for count, row in enumerate(sheet.iter_rows()):\n if count > 5:\n break\n for cell in row:\n s = xls.cell_str(cell)\n if _eqstr(s, \"Unit\"):\n unit_col = cell.column - 1\n break\n\n if indicator_unit != \"?\":\n log.debug(\"determined indicator unit: %s\", indicator_unit)\n elif _containstr(sheet.title, \"land\", \"transformation\"):\n log.debug(\"unknown indicator unit; assuming it is m2\")\n indicator_unit = \"m2\"\n elif _containstr(sheet.title, \"land\", \"occupation\"):\n log.debug(\"unknown indicator unit; assuming it is m2*a\")\n indicator_unit = \"m2*a\"\n elif _containstr(sheet.title, \"water\", \"consumption\"):\n log.debug(\"unknown indicator unit; assuming it is m3\")\n indicator_unit = \"m3\"\n else:\n log.debug(\"unknown indicator unit\")\n\n if _containstr(flow_unit, \"kg\"):\n flow_unit = \"kg\"\n\n if unit_col > -1:\n log.debug(\"take units from column %i\", unit_col)\n elif flow_unit != \"?\":\n log.debug(\"determined flow unit: %s\", flow_unit)\n elif _containstr(sheet.title, \"land\", \"transformation\"):\n log.debug(\"unknown flow unit; assume it is m2\")\n flow_unit = \"m2\"\n elif _containstr(sheet.title, \"land\", \"occupation\"):\n log.debug(\"unknown flow unit; assuming it is m2*a\")\n flow_unit = \"m2*a\"\n elif _containstr(sheet.title, \"water\", \"consumption\"):\n log.debug(\"unknown flow unit; assuming it is m3\")\n flow_unit = \"m3\"\n else:\n log.debug(\"unknown flow unit; assuming it is 'kg'\")\n flow_unit = \"kg\"\n\n return indicator_unit, flow_unit, unit_col\n\n\ndef _determine_compartments(sheet: openpyxl.worksheet.worksheet.Worksheet) -> (str, int):\n compartment_col = -1\n for count, row in enumerate(sheet.iter_rows()):\n if count > 5:\n break\n for cell in row:\n s = xls.cell_str(cell)\n if _containstr(s, \"compartment\") or _containstr(\n s, \"name\", \"in\", \"ReCiPe\"):\n compartment_col = cell.column - 1\n break\n\n if compartment_col > -1:\n log.debug(\"found compartment column %i\", compartment_col)\n return \"\", compartment_col\n\n elif _containstr(sheet.title, \"global\", \"warming\") \\\n or _containstr(sheet.title, \"ozone\") \\\n or _containstr(sheet.title, \"particulate\") \\\n or _containstr(sheet.title, \"acidification\"):\n log.debug(\"no compartment column; assuming 'air'\")\n return \"air\", -1\n\n elif _containstr(sheet.title, \"mineral\", \"resource\", \"scarcity\"):\n log.debug(\"no compartment column; assuming 'resource/ground'\")\n return \"resource/ground\", -1\n\n elif _containstr(sheet.title, \"fossil\", \"resource\", \"scarcity\"):\n log.debug(\"no compartment column; assuming 'resource'\")\n return \"resource\", -1\n\n if _containstr(sheet.title, \"water\", \"consumption\"):\n log.debug(\"no compartment column; assuming 'resource/fresh water'\")\n return \"resource/fresh water\", -1\n\n log.debug(\"no compartment column\")\n return \"\", -1\n\n\ndef _eqstr(s1: str, s2: str) -> bool:\n if s1 is None or s2 is None:\n return False\n return s1.strip().lower() == s2.strip().lower()\n\n\ndef _containstr(s: str, *words) -> bool:\n if s is None:\n return False\n base = s.lower()\n for w in words:\n if not isinstance(w, str):\n return False\n if w.lower().strip() not in base:\n return False\n return True\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
youngwoon/robot-learning
[ "70da64466fd02dc4cfc97ad9e123c893fc17acd3", "96af508abfca6aadb38d9c55f01602464fecf460" ]
[ "networks/encoder.py", "algorithms/rollouts.py" ]
[ "\"\"\"\nCode reference:\n https://github.com/MishaLaskin/rad/blob/master/encoder.py\n\"\"\"\n\nimport gym.spaces\nimport torch\nimport torch.nn as nn\n\nfrom .utils import CNN, MLP, flatten_ac\n\n\nclass Encoder(nn.Module):\n def __init__(self, config, ob_space):\n super().__init__()\n\n self._encoder_type = config.encoder_type\n self._ob_space = ob_space\n\n self.base = nn.ModuleDict()\n encoder_output_dim = 0\n for k, v in ob_space.spaces.items():\n if len(v.shape) in [3, 4]:\n if self._encoder_type == \"mlp\":\n self.base[k] = None\n encoder_output_dim += gym.spaces.flatdim(v)\n else:\n if len(v.shape) == 3:\n image_dim = v.shape[0]\n elif len(v.shape) == 4:\n image_dim = v.shape[0] * v.shape[1]\n self.base[k] = CNN(config, image_dim)\n encoder_output_dim += self.base[k].output_dim\n elif len(v.shape) == 1:\n self.base[k] = None\n encoder_output_dim += gym.spaces.flatdim(v)\n else:\n raise ValueError(\"Check the shape of observation %s (%s)\" % (k, v))\n\n self.output_dim = encoder_output_dim\n\n def forward(self, ob, detach_conv=False):\n encoder_outputs = []\n for k, v in ob.items():\n if self.base[k] is not None:\n if isinstance(self.base[k], CNN):\n if v.max() > 1.0:\n v = v.float() / 255.0\n encoder_outputs.append(\n self.base[k](v, detach_conv=detach_conv)\n )\n else:\n encoder_outputs.append(v.flatten(start_dim=1))\n out = torch.cat(encoder_outputs, dim=-1)\n assert len(out.shape) == 2\n return out\n\n def copy_conv_weights_from(self, source):\n \"\"\" Tie convolutional layers \"\"\"\n for k in self.base.keys():\n if self.base[k] is not None:\n self.base[k].copy_conv_weights_from(source.base[k])\n", "\"\"\"\nRuns rollouts (RolloutRunner class) and collects transitions using Rollout class.\n\"\"\"\n\nimport random\nimport pickle\nfrom collections import defaultdict\n\nimport numpy as np\nimport cv2\n\nfrom ..utils.logger import logger\nfrom ..utils.info_dict import Info\nfrom ..utils.gym_env import get_non_absorbing_state, zero_value\n\n\nclass Rollout(object):\n \"\"\"\n Rollout storing an episode.\n \"\"\"\n\n def __init__(self):\n \"\"\" Initialize buffer. \"\"\"\n self._history = defaultdict(list)\n\n def add(self, data):\n \"\"\" Add a transition @data to rollout buffer. \"\"\"\n for key, value in data.items():\n self._history[key].append(value)\n\n def get(self):\n \"\"\" Returns rollout buffer and clears buffer. \"\"\"\n batch = {}\n batch[\"ob\"] = self._history[\"ob\"]\n batch[\"ob_next\"] = self._history[\"ob_next\"]\n batch[\"ac\"] = self._history[\"ac\"]\n batch[\"ac_before_activation\"] = self._history[\"ac_before_activation\"]\n batch[\"done\"] = self._history[\"done\"]\n batch[\"done_mask\"] = self._history[\"done_mask\"]\n batch[\"rew\"] = self._history[\"rew\"]\n self._history = defaultdict(list)\n return batch\n\n\nclass RolloutRunner(object):\n \"\"\"\n Run rollout given environment and policy.\n \"\"\"\n\n def __init__(self, config, env, env_eval, pi):\n \"\"\"\n Args:\n config: configurations for the environment.\n env: environment.\n pi: policy.\n \"\"\"\n\n self._config = config\n self._env = env\n self._env_eval = env_eval\n self._pi = pi\n\n def run(\n self,\n is_train=True,\n every_steps=None,\n every_episodes=None,\n log_prefix=\"\",\n step=0,\n ):\n \"\"\"\n Collects trajectories and yield every @every_steps/@every_episodes.\n\n Args:\n is_train: whether rollout is for training or evaluation.\n every_steps: if not None, returns rollouts @every_steps\n every_episodes: if not None, returns rollouts @every_epiosdes\n log_prefix: log as @log_prefix rollout: %s\n \"\"\"\n if every_steps is None and every_episodes is None:\n raise ValueError(\"Both every_steps and every_episodes cannot be None\")\n\n config = self._config\n device = config.device\n env = self._env if is_train else self._env_eval\n pi = self._pi\n il = hasattr(pi, \"predict_reward\")\n\n # initialize rollout buffer\n rollout = Rollout()\n reward_info = Info()\n ep_info = Info()\n episode = 0\n\n while True:\n done = False\n ep_len = 0\n ep_rew = 0\n ep_rew_rl = 0\n if il:\n ep_rew_il = 0\n ob = env.reset()\n\n # run rollout\n while not done:\n # sample action from policy\n if step < config.warm_up_steps:\n ac, ac_before_activation = env.action_space.sample(), 0\n else:\n ac, ac_before_activation = pi.act(ob, is_train=is_train)\n\n rollout.add(\n {\"ob\": ob, \"ac\": ac, \"ac_before_activation\": ac_before_activation}\n )\n\n if il:\n reward_il = pi.predict_reward(ob, ac)\n\n # take a step\n ob, reward, done, info = env.step(ac)\n rollout.add({\"ob_next\": ob})\n\n # replace reward\n if il:\n reward_rl = (\n 1 - config.gail_env_reward\n ) * reward_il + config.gail_env_reward * reward\n else:\n reward_rl = reward\n\n rollout.add({\"done\": done, \"rew\": reward})\n step += 1\n ep_len += 1\n ep_rew += reward\n ep_rew_rl += reward_rl\n if il:\n ep_rew_il += reward_il\n\n if done and ep_len < env.max_episode_steps:\n done_mask = 0 # -1 absorbing, 0 done, 1 not done\n else:\n done_mask = 1\n\n rollout.add(\n {\"done_mask\": done_mask}\n ) # -1 absorbing, 0 done, 1 not done\n\n reward_info.add(info)\n\n if config.absorbing_state and done_mask == 0:\n absorbing_state = env.get_absorbing_state()\n absorbing_action = zero_value(env.action_space)\n rollout._history[\"ob_next\"][-1] = absorbing_state\n rollout.add(\n {\n \"ob\": absorbing_state,\n \"ob_next\": absorbing_state,\n \"ac\": absorbing_action,\n \"ac_before_activation\": absorbing_action,\n \"rew\": 0.0,\n \"done\": 0,\n \"done_mask\": -1, # -1 absorbing, 0 done, 1 not done\n }\n )\n\n if every_steps is not None and step % every_steps == 0:\n yield rollout.get(), ep_info.get_dict(only_scalar=True)\n\n # compute average/sum of information\n ep_info.add({\"len\": ep_len, \"rew\": ep_rew, \"rew_rl\": ep_rew_rl})\n if il:\n ep_info.add({\"rew_il\": ep_rew_il})\n reward_info_dict = reward_info.get_dict(reduction=\"sum\", only_scalar=True)\n ep_info.add(reward_info_dict)\n reward_info_dict.update({\"len\": ep_len, \"rew\": ep_rew, \"rew_rl\": ep_rew_rl})\n if il:\n reward_info_dict.update({\"rew_il\": ep_rew_il})\n\n logger.info(\n log_prefix + \" rollout: %s\",\n {\n k: v\n for k, v in reward_info_dict.items()\n if not \"qpos\" in k and np.isscalar(v)\n },\n )\n\n episode += 1\n if every_episodes is not None and episode % every_episodes == 0:\n yield rollout.get(), ep_info.get_dict(only_scalar=True)\n\n def run_episode(self, max_step=10000, is_train=True, record_video=False):\n \"\"\"\n Runs one episode and returns the rollout (mainly for evaluation).\n\n Args:\n max_step: maximum number of steps of the rollout.\n is_train: whether rollout is for training or evaluation.\n record_video: record video of rollout if True.\n \"\"\"\n config = self._config\n device = config.device\n env = self._env if is_train else self._env_eval\n pi = self._pi\n il = hasattr(pi, \"predict_reward\")\n\n # initialize rollout buffer\n rollout = Rollout()\n reward_info = Info()\n\n done = False\n ep_len = 0\n ep_rew = 0\n ep_rew_rl = 0\n if il:\n ep_rew_il = 0\n\n ob = env.reset()\n\n self._record_frames = []\n if record_video:\n self._store_frame(env, ep_len, ep_rew)\n\n # run rollout\n while not done and ep_len < max_step:\n # sample action from policy\n ac, ac_before_activation = pi.act(ob, is_train=is_train)\n rollout.add(\n {\"ob\": ob, \"ac\": ac, \"ac_before_activation\": ac_before_activation}\n )\n\n if il:\n reward_il = pi.predict_reward(ob, ac)\n\n # take a step\n ob, reward, done, info = env.step(ac)\n\n # replace reward\n if il:\n reward_rl = (\n 1 - config.gail_env_reward\n ) * reward_il + config.gail_env_reward * reward\n else:\n reward_rl = reward\n\n rollout.add({\"done\": done, \"rew\": reward})\n ep_len += 1\n ep_rew += reward\n ep_rew_rl += reward_rl\n if il:\n ep_rew_il += reward_il\n\n reward_info.add(info)\n if record_video:\n frame_info = info.copy()\n if il:\n frame_info.update(\n {\n \"ep_rew_il\": ep_rew_il,\n \"rew_il\": reward_il,\n \"rew_rl\": reward_rl,\n }\n )\n self._store_frame(env, ep_len, ep_rew, frame_info)\n\n # add last observation\n rollout.add({\"ob\": ob})\n\n # compute average/sum of information\n ep_info = {\"len\": ep_len, \"rew\": ep_rew, \"rew_rl\": ep_rew_rl}\n if il:\n ep_info[\"rew_il\"] = ep_rew_il\n ep_info.update(reward_info.get_dict(reduction=\"sum\", only_scalar=True))\n\n return rollout.get(), ep_info, self._record_frames\n\n def _store_frame(self, env, ep_len, ep_rew, info={}):\n \"\"\" Renders a frame and stores in @self._record_frames. \"\"\"\n color = (200, 200, 200)\n\n # render video frame\n frame = env.render(\"rgb_array\")\n if len(frame.shape) == 4:\n frame = frame[0]\n if np.max(frame) <= 1.0:\n frame *= 255.0\n\n h, w = frame.shape[:2]\n if h < 500:\n h, w = 500, 500\n frame = cv2.resize(frame, (h, w))\n frame = np.concatenate([frame, np.zeros((h, w, 3))], 0)\n scale = h / 500\n\n # add caption to video frame\n if self._config.record_video_caption:\n text = \"{:4} {}\".format(ep_len, ep_rew)\n font_size = 0.4 * scale\n thickness = 1\n offset = int(12 * scale)\n x, y = int(5 * scale), h + int(10 * scale)\n cv2.putText(\n frame,\n text,\n (x, y),\n cv2.FONT_HERSHEY_SIMPLEX,\n font_size,\n (255, 255, 0),\n thickness,\n cv2.LINE_AA,\n )\n for i, k in enumerate(info.keys()):\n v = info[k]\n key_text = \"{}: \".format(k)\n (key_width, _), _ = cv2.getTextSize(\n key_text, cv2.FONT_HERSHEY_SIMPLEX, font_size, thickness\n )\n\n cv2.putText(\n frame,\n key_text,\n (x, y + offset * (i + 2)),\n cv2.FONT_HERSHEY_SIMPLEX,\n font_size,\n (66, 133, 244),\n thickness,\n cv2.LINE_AA,\n )\n\n cv2.putText(\n frame,\n str(v),\n (x + key_width, y + offset * (i + 2)),\n cv2.FONT_HERSHEY_SIMPLEX,\n font_size,\n (255, 255, 255),\n thickness,\n cv2.LINE_AA,\n )\n\n self._record_frames.append(frame)\n" ]
[ [ "torch.nn.ModuleDict", "torch.cat" ], [ "numpy.max", "numpy.zeros", "numpy.isscalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
benduffy1/MONAI
[ "2fef7ff5c064a9ff6b6d6b4f2323180afed99934", "046e625b09262261373d7b8039fb652547201368", "e0db5a564225a7cb62e7a23df97267019006302f", "2fef7ff5c064a9ff6b6d6b4f2323180afed99934", "e0db5a564225a7cb62e7a23df97267019006302f", "046e625b09262261373d7b8039fb652547201368" ]
[ "monai/networks/blocks/dints_block.py", "monai/handlers/utils.py", "tests/test_hausdorff_distance.py", "tests/test_resnet.py", "monai/metrics/froc.py", "monai/apps/deepgrow/transforms.py" ]
[ "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import Tuple, Union\n\nimport torch\n\nfrom monai.networks.layers.factories import Conv\nfrom monai.networks.layers.utils import get_act_layer, get_norm_layer\n\n__all__ = [\"FactorizedIncreaseBlock\", \"FactorizedReduceBlock\", \"P3DActiConvNormBlock\", \"ActiConvNormBlock\"]\n\n\nclass FactorizedIncreaseBlock(torch.nn.Sequential):\n \"\"\"\n Up-sampling the features by two using linear interpolation and convolutions.\n \"\"\"\n\n def __init__(\n self,\n in_channel: int,\n out_channel: int,\n spatial_dims: int = 3,\n act_name: Union[Tuple, str] = \"RELU\",\n norm_name: Union[Tuple, str] = \"INSTANCE\",\n ):\n \"\"\"\n Args:\n in_channel: number of input channels\n out_channel: number of output channels\n spatial_dims: number of spatial dimensions\n act_name: activation layer type and arguments.\n norm_name: feature normalization type and arguments.\n \"\"\"\n super().__init__()\n self._in_channel = in_channel\n self._out_channel = out_channel\n self._spatial_dims = spatial_dims\n if self._spatial_dims not in (2, 3):\n raise ValueError(\"spatial_dims must be 2 or 3.\")\n\n conv_type = Conv[Conv.CONV, self._spatial_dims]\n mode = \"trilinear\" if self._spatial_dims == 3 else \"bilinear\"\n self.add_module(\"up\", torch.nn.Upsample(scale_factor=2, mode=mode, align_corners=True))\n self.add_module(\"acti\", get_act_layer(name=act_name))\n self.add_module(\n \"conv\",\n conv_type(\n in_channels=self._in_channel,\n out_channels=self._out_channel,\n kernel_size=1,\n stride=1,\n padding=0,\n groups=1,\n bias=False,\n dilation=1,\n ),\n )\n self.add_module(\n \"norm\", get_norm_layer(name=norm_name, spatial_dims=self._spatial_dims, channels=self._out_channel)\n )\n\n\nclass FactorizedReduceBlock(torch.nn.Module):\n \"\"\"\n Down-sampling the feature by 2 using stride.\n The length along each spatial dimension must be a multiple of 2.\n \"\"\"\n\n def __init__(\n self,\n in_channel: int,\n out_channel: int,\n spatial_dims: int = 3,\n act_name: Union[Tuple, str] = \"RELU\",\n norm_name: Union[Tuple, str] = \"INSTANCE\",\n ):\n \"\"\"\n Args:\n in_channel: number of input channels\n out_channel: number of output channels.\n spatial_dims: number of spatial dimensions.\n act_name: activation layer type and arguments.\n norm_name: feature normalization type and arguments.\n \"\"\"\n super().__init__()\n self._in_channel = in_channel\n self._out_channel = out_channel\n self._spatial_dims = spatial_dims\n if self._spatial_dims not in (2, 3):\n raise ValueError(\"spatial_dims must be 2 or 3.\")\n\n conv_type = Conv[Conv.CONV, self._spatial_dims]\n\n self.act = get_act_layer(name=act_name)\n self.conv_1 = conv_type(\n in_channels=self._in_channel,\n out_channels=self._out_channel // 2,\n kernel_size=1,\n stride=2,\n padding=0,\n groups=1,\n bias=False,\n dilation=1,\n )\n self.conv_2 = conv_type(\n in_channels=self._in_channel,\n out_channels=self._out_channel - self._out_channel // 2,\n kernel_size=1,\n stride=2,\n padding=0,\n groups=1,\n bias=False,\n dilation=1,\n )\n self.norm = get_norm_layer(name=norm_name, spatial_dims=self._spatial_dims, channels=self._out_channel)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n The length along each spatial dimension must be a multiple of 2.\n \"\"\"\n x = self.act(x)\n if self._spatial_dims == 3:\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:, 1:])], dim=1)\n else:\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)\n out = self.norm(out)\n return out\n\n\nclass P3DActiConvNormBlock(torch.nn.Sequential):\n \"\"\"\n -- (act) -- (conv) -- (norm) --\n \"\"\"\n\n def __init__(\n self,\n in_channel: int,\n out_channel: int,\n kernel_size: int,\n padding: int,\n mode: int = 0,\n act_name: Union[Tuple, str] = \"RELU\",\n norm_name: Union[Tuple, str] = \"INSTANCE\",\n ):\n \"\"\"\n Args:\n in_channel: number of input channels.\n out_channel: number of output channels.\n kernel_size: kernel size to be expanded to 3D.\n padding: padding size to be expanded to 3D.\n mode: mode for the anisotropic kernels:\n\n - 0: ``(k, k, 1)``, ``(1, 1, k)``,\n - 1: ``(k, 1, k)``, ``(1, k, 1)``,\n - 2: ``(1, k, k)``. ``(k, 1, 1)``.\n\n act_name: activation layer type and arguments.\n norm_name: feature normalization type and arguments.\n \"\"\"\n super().__init__()\n self._in_channel = in_channel\n self._out_channel = out_channel\n self._p3dmode = int(mode)\n\n conv_type = Conv[Conv.CONV, 3]\n\n if self._p3dmode == 0: # (k, k, 1), (1, 1, k)\n kernel_size0 = (kernel_size, kernel_size, 1)\n kernel_size1 = (1, 1, kernel_size)\n padding0 = (padding, padding, 0)\n padding1 = (0, 0, padding)\n elif self._p3dmode == 1: # (k, 1, k), (1, k, 1)\n kernel_size0 = (kernel_size, 1, kernel_size)\n kernel_size1 = (1, kernel_size, 1)\n padding0 = (padding, 0, padding)\n padding1 = (0, padding, 0)\n elif self._p3dmode == 2: # (1, k, k), (k, 1, 1)\n kernel_size0 = (1, kernel_size, kernel_size)\n kernel_size1 = (kernel_size, 1, 1)\n padding0 = (0, padding, padding)\n padding1 = (padding, 0, 0)\n else:\n raise ValueError(\"`mode` must be 0, 1, or 2.\")\n\n self.add_module(\"acti\", get_act_layer(name=act_name))\n self.add_module(\n \"conv\",\n conv_type(\n in_channels=self._in_channel,\n out_channels=self._in_channel,\n kernel_size=kernel_size0,\n stride=1,\n padding=padding0,\n groups=1,\n bias=False,\n dilation=1,\n ),\n )\n self.add_module(\n \"conv_1\",\n conv_type(\n in_channels=self._in_channel,\n out_channels=self._out_channel,\n kernel_size=kernel_size1,\n stride=1,\n padding=padding1,\n groups=1,\n bias=False,\n dilation=1,\n ),\n )\n self.add_module(\"norm\", get_norm_layer(name=norm_name, spatial_dims=3, channels=self._out_channel))\n\n\nclass ActiConvNormBlock(torch.nn.Sequential):\n \"\"\"\n -- (Acti) -- (Conv) -- (Norm) --\n \"\"\"\n\n def __init__(\n self,\n in_channel: int,\n out_channel: int,\n kernel_size: int = 3,\n padding: int = 1,\n spatial_dims: int = 3,\n act_name: Union[Tuple, str] = \"RELU\",\n norm_name: Union[Tuple, str] = \"INSTANCE\",\n ):\n \"\"\"\n Args:\n in_channel: number of input channels.\n out_channel: number of output channels.\n kernel_size: kernel size of the convolution.\n padding: padding size of the convolution.\n spatial_dims: number of spatial dimensions.\n act_name: activation layer type and arguments.\n norm_name: feature normalization type and arguments.\n \"\"\"\n super().__init__()\n self._in_channel = in_channel\n self._out_channel = out_channel\n self._spatial_dims = spatial_dims\n\n conv_type = Conv[Conv.CONV, self._spatial_dims]\n self.add_module(\"acti\", get_act_layer(name=act_name))\n self.add_module(\n \"conv\",\n conv_type(\n in_channels=self._in_channel,\n out_channels=self._out_channel,\n kernel_size=kernel_size,\n stride=1,\n padding=padding,\n groups=1,\n bias=False,\n dilation=1,\n ),\n )\n self.add_module(\n \"norm\", get_norm_layer(name=norm_name, spatial_dims=self._spatial_dims, channels=self._out_channel)\n )\n", "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom collections import OrderedDict\nfrom typing import TYPE_CHECKING, Dict, Optional, Sequence, Union\n\nimport numpy as np\nimport torch\n\nfrom monai.config import IgniteInfo, KeysCollection, PathLike\nfrom monai.utils import ensure_tuple, look_up_option, min_version, optional_import\n\nidist, _ = optional_import(\"ignite\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"distributed\")\nif TYPE_CHECKING:\n from ignite.engine import Engine\nelse:\n Engine, _ = optional_import(\"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Engine\")\n\n__all__ = [\"stopping_fn_from_metric\", \"stopping_fn_from_loss\", \"write_metrics_reports\", \"from_engine\"]\n\n\ndef stopping_fn_from_metric(metric_name: str):\n \"\"\"\n Returns a stopping function for ignite.handlers.EarlyStopping using the given metric name.\n \"\"\"\n\n def stopping_fn(engine: Engine):\n return engine.state.metrics[metric_name]\n\n return stopping_fn\n\n\ndef stopping_fn_from_loss():\n \"\"\"\n Returns a stopping function for ignite.handlers.EarlyStopping using the loss value.\n \"\"\"\n\n def stopping_fn(engine: Engine):\n return -engine.state.output # type:ignore\n\n return stopping_fn\n\n\ndef write_metrics_reports(\n save_dir: PathLike,\n images: Optional[Sequence[str]],\n metrics: Optional[Dict[str, Union[torch.Tensor, np.ndarray]]],\n metric_details: Optional[Dict[str, Union[torch.Tensor, np.ndarray]]],\n summary_ops: Optional[Union[str, Sequence[str]]],\n deli: str = \",\",\n output_type: str = \"csv\",\n):\n \"\"\"\n Utility function to write the metrics into files, contains 3 parts:\n 1. if `metrics` dict is not None, write overall metrics into file, every line is a metric name and value pair.\n 2. if `metric_details` dict is not None, write raw metric data of every image into file, every line for 1 image.\n 3. if `summary_ops` is not None, compute summary based on operations on `metric_details` and write to file.\n\n Args:\n save_dir: directory to save all the metrics reports.\n images: name or path of every input image corresponding to the metric_details data.\n if None, will use index number as the filename of every input image.\n metrics: a dictionary of (metric name, metric value) pairs.\n metric_details: a dictionary of (metric name, metric raw values) pairs, usually, it comes from metrics\n computation, for example, the raw value can be the mean_dice of every channel of every input image.\n summary_ops: expected computation operations to generate the summary report.\n it can be: None, \"*\" or list of strings, default to None.\n None - don't generate summary report for every expected metric_details.\n \"*\" - generate summary report for every metric_details with all the supported operations.\n list of strings - generate summary report for every metric_details with specified operations, they\n should be within list: [\"mean\", \"median\", \"max\", \"min\", \"<int>percentile\", \"std\", \"notnans\"].\n the number in \"<int>percentile\" should be [0, 100], like: \"15percentile\". default: \"90percentile\".\n for more details, please check: https://numpy.org/doc/stable/reference/generated/numpy.nanpercentile.html.\n note that: for the overall summary, it computes `nanmean` of all classes for each image first,\n then compute summary. example of the generated summary report::\n\n class mean median max 5percentile 95percentile notnans\n class0 6.0000 6.0000 7.0000 5.1000 6.9000 2.0000\n class1 6.0000 6.0000 6.0000 6.0000 6.0000 1.0000\n mean 6.2500 6.2500 7.0000 5.5750 6.9250 2.0000\n\n deli: the delimiter character in the saved file, default to \",\" as the default output type is `csv`.\n to be consistent with: https://docs.python.org/3/library/csv.html#csv.Dialect.delimiter.\n output_type: expected output file type, supported types: [\"csv\"], default to \"csv\".\n\n \"\"\"\n if output_type.lower() != \"csv\":\n raise ValueError(f\"unsupported output type: {output_type}.\")\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n if metrics is not None and len(metrics) > 0:\n with open(os.path.join(save_dir, \"metrics.csv\"), \"w\") as f:\n for k, v in metrics.items():\n f.write(f\"{k}{deli}{str(v)}\\n\")\n if metric_details is not None and len(metric_details) > 0:\n for k, v in metric_details.items():\n if isinstance(v, torch.Tensor):\n v = v.cpu().numpy()\n if v.ndim == 0:\n # reshape to [1, 1] if no batch and class dims\n v = v.reshape((1, 1))\n elif v.ndim == 1:\n # reshape to [N, 1] if no class dim\n v = v.reshape((-1, 1))\n\n # add the average value of all classes to v\n class_labels = [\"class\" + str(i) for i in range(v.shape[1])] + [\"mean\"]\n v = np.concatenate([v, np.nanmean(v, axis=1, keepdims=True)], axis=1)\n\n with open(os.path.join(save_dir, f\"{k}_raw.csv\"), \"w\") as f:\n f.write(f\"filename{deli}{deli.join(class_labels)}\\n\")\n for i, b in enumerate(v):\n f.write(f\"{images[i] if images is not None else str(i)}{deli}{deli.join([str(c) for c in b])}\\n\")\n\n if summary_ops is not None:\n supported_ops = OrderedDict(\n {\n \"mean\": np.nanmean,\n \"median\": np.nanmedian,\n \"max\": np.nanmax,\n \"min\": np.nanmin,\n \"90percentile\": lambda x: np.nanpercentile(x[0], x[1]),\n \"std\": np.nanstd,\n \"notnans\": lambda x: (~np.isnan(x)).sum(),\n }\n )\n ops = ensure_tuple(summary_ops)\n if \"*\" in ops:\n ops = tuple(supported_ops.keys())\n\n def _compute_op(op: str, d: np.ndarray):\n if not op.endswith(\"percentile\"):\n c_op = look_up_option(op, supported_ops)\n return c_op(d)\n\n threshold = int(op.split(\"percentile\")[0])\n return supported_ops[\"90percentile\"]((d, threshold)) # type: ignore\n\n with open(os.path.join(save_dir, f\"{k}_summary.csv\"), \"w\") as f:\n f.write(f\"class{deli}{deli.join(ops)}\\n\")\n for i, c in enumerate(np.transpose(v)):\n f.write(f\"{class_labels[i]}{deli}{deli.join([f'{_compute_op(k, c):.4f}' for k in ops])}\\n\")\n\n\ndef from_engine(keys: KeysCollection, first: bool = False):\n \"\"\"\n Utility function to simplify the `batch_transform` or `output_transform` args of ignite components\n when handling dictionary or list of dictionaries(for example: `engine.state.batch` or `engine.state.output`).\n Users only need to set the expected keys, then it will return a callable function to extract data from\n dictionary and construct a tuple respectively.\n\n If data is a list of dictionaries after decollating, extract expected keys and construct lists respectively,\n for example, if data is `[{\"A\": 1, \"B\": 2}, {\"A\": 3, \"B\": 4}]`, from_engine([\"A\", \"B\"]): `([1, 3], [2, 4])`.\n\n It can help avoid a complicated `lambda` function and make the arg of metrics more straight-forward.\n For example, set the first key as the prediction and the second key as label to get the expected data\n from `engine.state.output` for a metric::\n\n from monai.handlers import MeanDice, from_engine\n\n metric = MeanDice(\n include_background=False,\n output_transform=from_engine([\"pred\", \"label\"])\n )\n\n Args:\n keys: specified keys to extract data from dictionary or decollated list of dictionaries.\n first: whether only extract specified keys from the first item if input data is a list of dictionaries,\n it's used to extract the scalar data which doesn't have batch dim and was replicated into every\n dictionary when decollating, like `loss`, etc.\n\n\n \"\"\"\n keys = ensure_tuple(keys)\n\n def _wrapper(data):\n if isinstance(data, dict):\n return tuple(data[k] for k in keys)\n if isinstance(data, list) and isinstance(data[0], dict):\n # if data is a list of dictionaries, extract expected keys and construct lists,\n # if `first=True`, only extract keys from the first item of the list\n ret = [data[0][k] if first else [i[k] for i in data] for k in keys]\n return tuple(ret) if len(ret) > 1 else ret[0]\n\n return _wrapper\n", "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.metrics import HausdorffDistanceMetric\n\n\ndef create_spherical_seg_3d(\n radius: float = 20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99)\n) -> np.ndarray:\n \"\"\"\n Return a 3D image with a sphere inside. Voxel values will be\n 1 inside the sphere, and 0 elsewhere.\n\n Args:\n radius: radius of sphere (in terms of number of voxels, can be partial)\n centre: location of sphere centre.\n im_shape: shape of image to create\n\n See also:\n :py:meth:`~create_test_image_3d`\n \"\"\"\n # Create image\n image = np.zeros(im_shape, dtype=np.int32)\n spy, spx, spz = np.ogrid[\n -centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]\n ]\n circle = (spx * spx + spy * spy + spz * spz) <= radius * radius\n\n image[circle] = 1\n image[~circle] = 0\n return image\n\n\nTEST_CASES = [\n [[create_spherical_seg_3d(), create_spherical_seg_3d(), 1], [0, 0, 0, 0, 0, 0]],\n [\n [\n create_spherical_seg_3d(radius=20, centre=(20, 20, 20)),\n create_spherical_seg_3d(radius=20, centre=(19, 19, 19)),\n ],\n [1.7320508075688772, 1.7320508075688772, 1, 1, 3, 3],\n ],\n [\n [\n create_spherical_seg_3d(radius=33, centre=(19, 33, 22)),\n create_spherical_seg_3d(radius=33, centre=(20, 33, 22)),\n ],\n [1, 1, 1, 1, 1, 1],\n ],\n [\n [\n create_spherical_seg_3d(radius=20, centre=(20, 33, 22)),\n create_spherical_seg_3d(radius=40, centre=(20, 33, 22)),\n ],\n [20.09975124224178, 20.223748416156685, 15, 20, 24, 35],\n ],\n [\n [\n # pred does not have foreground (but gt has), the metric should be inf\n np.zeros([99, 99, 99]),\n create_spherical_seg_3d(radius=40, centre=(20, 33, 22)),\n ],\n [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf],\n ],\n [\n [\n # gt does not have foreground (but pred has), the metric should be inf\n create_spherical_seg_3d(),\n np.zeros([99, 99, 99]),\n ],\n [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf],\n ],\n [\n [\n create_spherical_seg_3d(radius=20, centre=(20, 33, 22)),\n create_spherical_seg_3d(radius=40, centre=(20, 33, 22)),\n 95,\n ],\n [19.924858845171276, 20.09975124224178, 14, 18, 22, 33],\n ],\n]\n\nTEST_CASES_NANS = [\n [\n [\n # both pred and gt do not have foreground, metric and not_nans should be 0\n np.zeros([99, 99, 99]),\n np.zeros([99, 99, 99]),\n ]\n ]\n]\n\n\nclass TestHausdorffDistance(unittest.TestCase):\n @parameterized.expand(TEST_CASES)\n def test_value(self, input_data, expected_value):\n percentile = None\n if len(input_data) == 3:\n [seg_1, seg_2, percentile] = input_data\n else:\n [seg_1, seg_2] = input_data\n ct = 0\n seg_1 = torch.tensor(seg_1)\n seg_2 = torch.tensor(seg_2)\n for metric in [\"euclidean\", \"chessboard\", \"taxicab\"]:\n for directed in [True, False]:\n hd_metric = HausdorffDistanceMetric(\n include_background=False, distance_metric=metric, percentile=percentile, directed=directed\n )\n # shape of seg_1, seg_2 are: HWD, converts to BNHWD\n batch, n_class = 2, 3\n batch_seg_1 = seg_1.unsqueeze(0).unsqueeze(0).repeat([batch, n_class, 1, 1, 1])\n batch_seg_2 = seg_2.unsqueeze(0).unsqueeze(0).repeat([batch, n_class, 1, 1, 1])\n hd_metric(batch_seg_1, batch_seg_2)\n result = hd_metric.aggregate()\n expected_value_curr = expected_value[ct]\n np.testing.assert_allclose(expected_value_curr, result, rtol=1e-7)\n ct += 1\n\n @parameterized.expand(TEST_CASES_NANS)\n def test_nans(self, input_data):\n [seg_1, seg_2] = input_data\n seg_1 = torch.tensor(seg_1)\n seg_2 = torch.tensor(seg_2)\n hd_metric = HausdorffDistanceMetric(include_background=False, get_not_nans=True)\n batch_seg_1 = seg_1.unsqueeze(0).unsqueeze(0)\n batch_seg_2 = seg_2.unsqueeze(0).unsqueeze(0)\n hd_metric(batch_seg_1, batch_seg_2)\n result, not_nans = hd_metric.aggregate()\n np.testing.assert_allclose(0, result, rtol=1e-7)\n np.testing.assert_allclose(0, not_nans, rtol=1e-7)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom typing import TYPE_CHECKING\n\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.networks import eval_mode\nfrom monai.networks.nets import resnet10, resnet18, resnet34, resnet50, resnet101, resnet152, resnet200\nfrom monai.utils import optional_import\nfrom tests.utils import test_script_save\n\nif TYPE_CHECKING:\n import torchvision\n\n has_torchvision = True\nelse:\n torchvision, has_torchvision = optional_import(\"torchvision\")\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\nTEST_CASE_1 = [ # 3D, batch 3, 2 input channel\n {\"pretrained\": False, \"spatial_dims\": 3, \"n_input_channels\": 2, \"num_classes\": 3},\n (3, 2, 32, 64, 48),\n (3, 3),\n]\n\nTEST_CASE_2 = [ # 2D, batch 2, 1 input channel\n {\"pretrained\": False, \"spatial_dims\": 2, \"n_input_channels\": 1, \"num_classes\": 3},\n (2, 1, 32, 64),\n (2, 3),\n]\n\nTEST_CASE_2_A = [ # 2D, batch 2, 1 input channel, shortcut type A\n {\"pretrained\": False, \"spatial_dims\": 2, \"n_input_channels\": 1, \"num_classes\": 3, \"shortcut_type\": \"A\"},\n (2, 1, 32, 64),\n (2, 3),\n]\n\nTEST_CASE_3 = [ # 1D, batch 1, 2 input channels\n {\"pretrained\": False, \"spatial_dims\": 1, \"n_input_channels\": 2, \"num_classes\": 3},\n (1, 2, 32),\n (1, 3),\n]\n\nTEST_CASE_3_A = [ # 1D, batch 1, 2 input channels\n {\"pretrained\": False, \"spatial_dims\": 1, \"n_input_channels\": 2, \"num_classes\": 3, \"shortcut_type\": \"A\"},\n (1, 2, 32),\n (1, 3),\n]\n\nTEST_CASE_4 = [ # 2D, batch 2, 1 input channel\n {\"pretrained\": False, \"spatial_dims\": 2, \"n_input_channels\": 1, \"num_classes\": 3, \"feed_forward\": False},\n (2, 1, 32, 64),\n ((2, 512), (2, 2048)),\n]\n\nTEST_CASES = []\nfor case in [TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_2_A, TEST_CASE_3_A]:\n for model in [resnet10, resnet18, resnet34, resnet50, resnet101, resnet152, resnet200]:\n TEST_CASES.append([model, *case])\n\nTEST_SCRIPT_CASES = [\n [model, *TEST_CASE_1] for model in [resnet10, resnet18, resnet34, resnet50, resnet101, resnet152, resnet200]\n]\n\n\nclass TestResNet(unittest.TestCase):\n @parameterized.expand(TEST_CASES)\n def test_resnet_shape(self, model, input_param, input_shape, expected_shape):\n net = model(**input_param).to(device)\n with eval_mode(net):\n result = net.forward(torch.randn(input_shape).to(device))\n if input_param.get(\"feed_forward\", True):\n self.assertEqual(result.shape, expected_shape)\n else:\n self.assertTrue(result.shape in expected_shape)\n\n @parameterized.expand(TEST_SCRIPT_CASES)\n def test_script(self, model, input_param, input_shape, expected_shape):\n net = model(**input_param)\n test_data = torch.randn(input_shape)\n test_script_save(net, test_data)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\n\n\ndef compute_fp_tp_probs(\n probs: Union[np.ndarray, torch.Tensor],\n y_coord: Union[np.ndarray, torch.Tensor],\n x_coord: Union[np.ndarray, torch.Tensor],\n evaluation_mask: Union[np.ndarray, torch.Tensor],\n labels_to_exclude: Optional[List] = None,\n resolution_level: int = 0,\n):\n \"\"\"\n This function is modified from the official evaluation code of\n `CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to distinguish\n true positive and false positive predictions. A true positive prediction is defined when\n the detection point is within the annotated ground truth region.\n\n Args:\n probs: an array with shape (n,) that represents the probabilities of the detections.\n Where, n is the number of predicted detections.\n y_coord: an array with shape (n,) that represents the Y-coordinates of the detections.\n x_coord: an array with shape (n,) that represents the X-coordinates of the detections.\n evaluation_mask: the ground truth mask for evaluation.\n labels_to_exclude: labels in this list will not be counted for metric calculation.\n resolution_level: the level at which the evaluation mask is made.\n\n Returns:\n fp_probs: an array that contains the probabilities of the false positive detections.\n tp_probs: an array that contains the probabilities of the True positive detections.\n num_targets: the total number of targets (excluding `labels_to_exclude`) for all images under evaluation.\n\n \"\"\"\n if not (probs.shape == y_coord.shape == x_coord.shape):\n raise AssertionError(\"the shapes for coordinates and probabilities should be the same.\")\n\n if isinstance(probs, torch.Tensor):\n probs = probs.detach().cpu().numpy()\n if isinstance(y_coord, torch.Tensor):\n y_coord = y_coord.detach().cpu().numpy()\n if isinstance(x_coord, torch.Tensor):\n x_coord = x_coord.detach().cpu().numpy()\n if isinstance(evaluation_mask, torch.Tensor):\n evaluation_mask = evaluation_mask.detach().cpu().numpy()\n\n if labels_to_exclude is None:\n labels_to_exclude = []\n\n max_label = np.max(evaluation_mask)\n tp_probs = np.zeros((max_label,), dtype=np.float32)\n\n y_coord = (y_coord / pow(2, resolution_level)).astype(int)\n x_coord = (x_coord / pow(2, resolution_level)).astype(int)\n\n hittedlabel = evaluation_mask[y_coord, x_coord]\n fp_probs = probs[np.where(hittedlabel == 0)]\n for i in range(1, max_label + 1):\n if i not in labels_to_exclude and i in hittedlabel:\n tp_probs[i - 1] = probs[np.where(hittedlabel == i)].max()\n\n num_targets = max_label - len(labels_to_exclude)\n return fp_probs, tp_probs, num_targets\n\n\ndef compute_froc_curve_data(\n fp_probs: Union[np.ndarray, torch.Tensor],\n tp_probs: Union[np.ndarray, torch.Tensor],\n num_targets: int,\n num_images: int,\n):\n \"\"\"\n This function is modified from the official evaluation code of\n `CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to compute\n the required data for plotting the Free Response Operating Characteristic (FROC) curve.\n\n Args:\n fp_probs: an array that contains the probabilities of the false positive detections for all\n images under evaluation.\n tp_probs: an array that contains the probabilities of the True positive detections for all\n images under evaluation.\n num_targets: the total number of targets (excluding `labels_to_exclude`) for all images under evaluation.\n num_images: the number of images under evaluation.\n\n \"\"\"\n if not isinstance(fp_probs, type(tp_probs)):\n raise AssertionError(\"fp and tp probs should have same type.\")\n if isinstance(fp_probs, torch.Tensor):\n fp_probs = fp_probs.detach().cpu().numpy()\n if isinstance(tp_probs, torch.Tensor):\n tp_probs = tp_probs.detach().cpu().numpy()\n\n total_fps, total_tps = [], []\n all_probs = sorted(set(list(fp_probs) + list(tp_probs)))\n for thresh in all_probs[1:]:\n total_fps.append((fp_probs >= thresh).sum())\n total_tps.append((tp_probs >= thresh).sum())\n total_fps.append(0)\n total_tps.append(0)\n fps_per_image = np.asarray(total_fps) / float(num_images)\n total_sensitivity = np.asarray(total_tps) / float(num_targets)\n return fps_per_image, total_sensitivity\n\n\ndef compute_froc_score(\n fps_per_image: np.ndarray, total_sensitivity: np.ndarray, eval_thresholds: Tuple = (0.25, 0.5, 1, 2, 4, 8)\n):\n \"\"\"\n This function is modified from the official evaluation code of\n `CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to compute\n the challenge's second evaluation metric, which is defined as the average sensitivity at\n the predefined false positive rates per whole slide image.\n\n Args:\n fps_per_image: the average number of false positives per image for different thresholds.\n total_sensitivity: sensitivities (true positive rates) for different thresholds.\n eval_thresholds: the false positive rates for calculating the average sensitivity. Defaults\n to (0.25, 0.5, 1, 2, 4, 8) which is the same as the CAMELYON 16 Challenge.\n\n \"\"\"\n interp_sens = np.interp(eval_thresholds, fps_per_image[::-1], total_sensitivity[::-1])\n return np.mean(interp_sens)\n", "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nfrom typing import Callable, Dict, Hashable, List, Optional, Sequence, Union\n\nimport numpy as np\nimport torch\n\nfrom monai.config import IndexSelection, KeysCollection\nfrom monai.networks.layers import GaussianFilter\nfrom monai.transforms import Resize, SpatialCrop\nfrom monai.transforms.transform import MapTransform, Randomizable, Transform\nfrom monai.transforms.utils import generate_spatial_bounding_box, is_positive\nfrom monai.utils import InterpolateMode, deprecated_arg, ensure_tuple, ensure_tuple_rep, min_version, optional_import\nfrom monai.utils.enums import PostFix\n\nmeasure, _ = optional_import(\"skimage.measure\", \"0.14.2\", min_version)\ndistance_transform_cdt, _ = optional_import(\"scipy.ndimage.morphology\", name=\"distance_transform_cdt\")\n\nDEFAULT_POST_FIX = PostFix.meta()\n\n\n# Transforms to support Training for Deepgrow models\nclass FindAllValidSlicesd(Transform):\n \"\"\"\n Find/List all valid slices in the label.\n Label is assumed to be a 4D Volume with shape CDHW, where C=1.\n\n Args:\n label: key to the label source.\n sids: key to store slices indices having valid label map.\n \"\"\"\n\n def __init__(self, label: str = \"label\", sids: str = \"sids\"):\n self.label = label\n self.sids = sids\n\n def _apply(self, label):\n sids = []\n for sid in range(label.shape[1]): # Assume channel is first\n if np.sum(label[0][sid]) != 0:\n sids.append(sid)\n return np.asarray(sids)\n\n def __call__(self, data):\n d: Dict = dict(data)\n label = d[self.label]\n if label.shape[0] != 1:\n raise ValueError(\"Only supports single channel labels!\")\n\n if len(label.shape) != 4: # only for 3D\n raise ValueError(\"Only supports label with shape CDHW!\")\n\n sids = self._apply(label)\n if sids is not None and len(sids):\n d[self.sids] = sids\n return d\n\n\nclass AddInitialSeedPointd(Randomizable, Transform):\n \"\"\"\n Add random guidance as initial seed point for a given label.\n\n Note that the label is of size (C, D, H, W) or (C, H, W)\n\n The guidance is of size (2, N, # of dims) where N is number of guidance added.\n # of dims = 4 when C, D, H, W; # of dims = 3 when (C, H, W)\n\n Args:\n label: label source.\n guidance: key to store guidance.\n sids: key that represents list of valid slice indices for the given label.\n sid: key that represents the slice to add initial seed point. If not present, random sid will be chosen.\n connected_regions: maximum connected regions to use for adding initial points.\n \"\"\"\n\n def __init__(\n self,\n label: str = \"label\",\n guidance: str = \"guidance\",\n sids: str = \"sids\",\n sid: str = \"sid\",\n connected_regions: int = 5,\n ):\n self.label = label\n self.sids_key = sids\n self.sid_key = sid\n self.sid = None\n self.guidance = guidance\n self.connected_regions = connected_regions\n\n def randomize(self, data):\n sid = data.get(self.sid_key, None)\n sids = data.get(self.sids_key, None)\n if sids is not None:\n if sid is None or sid not in sids:\n sid = self.R.choice(sids, replace=False)\n else:\n sid = None\n self.sid = sid\n\n def _apply(self, label, sid):\n dimensions = 3 if len(label.shape) > 3 else 2\n default_guidance = [-1] * (dimensions + 1)\n\n dims = dimensions\n if sid is not None and dimensions == 3:\n dims = 2\n label = label[0][sid][np.newaxis] # Assume channel is first\n\n label = (label > 0.5).astype(np.float32)\n blobs_labels = measure.label(label.astype(int), background=0) if dims == 2 else label\n if np.max(blobs_labels) <= 0:\n raise AssertionError(\"Not a valid Label\")\n\n pos_guidance = []\n for ridx in range(1, 2 if dims == 3 else self.connected_regions + 1):\n if dims == 2:\n label = (blobs_labels == ridx).astype(np.float32)\n if np.sum(label) == 0:\n pos_guidance.append(default_guidance)\n continue\n\n distance = distance_transform_cdt(label).flatten()\n probability = np.exp(distance) - 1.0\n\n idx = np.where(label.flatten() > 0)[0]\n seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))\n dst = distance[seed]\n\n g = np.asarray(np.unravel_index(seed, label.shape)).transpose().tolist()[0]\n g[0] = dst[0] # for debug\n if dimensions == 2 or dims == 3:\n pos_guidance.append(g)\n else:\n pos_guidance.append([g[0], sid, g[-2], g[-1]])\n\n return np.asarray([pos_guidance, [default_guidance] * len(pos_guidance)])\n\n def __call__(self, data):\n d = dict(data)\n self.randomize(data)\n d[self.guidance] = json.dumps(self._apply(d[self.label], self.sid).astype(int, copy=False).tolist())\n return d\n\n\nclass AddGuidanceSignald(Transform):\n \"\"\"\n Add Guidance signal for input image.\n\n Based on the \"guidance\" points, apply gaussian to them and add them as new channel for input image.\n\n Args:\n image: key to the image source.\n guidance: key to store guidance.\n sigma: standard deviation for Gaussian kernel.\n number_intensity_ch: channel index.\n\n \"\"\"\n\n def __init__(self, image: str = \"image\", guidance: str = \"guidance\", sigma: int = 2, number_intensity_ch: int = 1):\n self.image = image\n self.guidance = guidance\n self.sigma = sigma\n self.number_intensity_ch = number_intensity_ch\n\n def _get_signal(self, image, guidance):\n dimensions = 3 if len(image.shape) > 3 else 2\n guidance = guidance.tolist() if isinstance(guidance, np.ndarray) else guidance\n guidance = json.loads(guidance) if isinstance(guidance, str) else guidance\n if dimensions == 3:\n signal = np.zeros((len(guidance), image.shape[-3], image.shape[-2], image.shape[-1]), dtype=np.float32)\n else:\n signal = np.zeros((len(guidance), image.shape[-2], image.shape[-1]), dtype=np.float32)\n\n sshape = signal.shape\n for i, g_i in enumerate(guidance):\n for point in g_i:\n if np.any(np.asarray(point) < 0):\n continue\n\n if dimensions == 3:\n p1 = max(0, min(int(point[-3]), sshape[-3] - 1))\n p2 = max(0, min(int(point[-2]), sshape[-2] - 1))\n p3 = max(0, min(int(point[-1]), sshape[-1] - 1))\n signal[i, p1, p2, p3] = 1.0\n else:\n p1 = max(0, min(int(point[-2]), sshape[-2] - 1))\n p2 = max(0, min(int(point[-1]), sshape[-1] - 1))\n signal[i, p1, p2] = 1.0\n\n if np.max(signal[i]) > 0:\n signal_tensor = torch.tensor(signal[i])\n pt_gaussian = GaussianFilter(len(signal_tensor.shape), sigma=self.sigma)\n signal_tensor = pt_gaussian(signal_tensor.unsqueeze(0).unsqueeze(0))\n signal_tensor = signal_tensor.squeeze(0).squeeze(0)\n signal[i] = signal_tensor.detach().cpu().numpy()\n signal[i] = (signal[i] - np.min(signal[i])) / (np.max(signal[i]) - np.min(signal[i]))\n return signal\n\n def _apply(self, image, guidance):\n signal = self._get_signal(image, guidance)\n image = image[0 : 0 + self.number_intensity_ch, ...]\n return np.concatenate([image, signal], axis=0)\n\n def __call__(self, data):\n d = dict(data)\n image = d[self.image]\n guidance = d[self.guidance]\n\n d[self.image] = self._apply(image, guidance)\n return d\n\n\nclass FindDiscrepancyRegionsd(Transform):\n \"\"\"\n Find discrepancy between prediction and actual during click interactions during training.\n\n Args:\n label: key to label source.\n pred: key to prediction source.\n discrepancy: key to store discrepancies found between label and prediction.\n\n \"\"\"\n\n def __init__(self, label: str = \"label\", pred: str = \"pred\", discrepancy: str = \"discrepancy\"):\n self.label = label\n self.pred = pred\n self.discrepancy = discrepancy\n\n @staticmethod\n def disparity(label, pred):\n label = (label > 0.5).astype(np.float32)\n pred = (pred > 0.5).astype(np.float32)\n disparity = label - pred\n\n pos_disparity = (disparity > 0).astype(np.float32)\n neg_disparity = (disparity < 0).astype(np.float32)\n return [pos_disparity, neg_disparity]\n\n def _apply(self, label, pred):\n return self.disparity(label, pred)\n\n def __call__(self, data):\n d = dict(data)\n label = d[self.label]\n pred = d[self.pred]\n\n d[self.discrepancy] = self._apply(label, pred)\n return d\n\n\nclass AddRandomGuidanced(Randomizable, Transform):\n \"\"\"\n Add random guidance based on discrepancies that were found between label and prediction.\n input shape is as below:\n Guidance is of shape (2, N, # of dim)\n Discrepancy is of shape (2, C, D, H, W) or (2, C, H, W)\n Probability is of shape (1)\n\n Args:\n guidance: key to guidance source.\n discrepancy: key that represents discrepancies found between label and prediction.\n probability: key that represents click/interaction probability.\n\n \"\"\"\n\n def __init__(self, guidance: str = \"guidance\", discrepancy: str = \"discrepancy\", probability: str = \"probability\"):\n self.guidance = guidance\n self.discrepancy = discrepancy\n self.probability = probability\n self._will_interact = None\n\n def randomize(self, data=None):\n probability = data[self.probability]\n self._will_interact = self.R.choice([True, False], p=[probability, 1.0 - probability])\n\n def find_guidance(self, discrepancy):\n distance = distance_transform_cdt(discrepancy).flatten()\n probability = np.exp(distance) - 1.0\n idx = np.where(discrepancy.flatten() > 0)[0]\n\n if np.sum(discrepancy > 0) > 0:\n seed = self.R.choice(idx, size=1, p=probability[idx] / np.sum(probability[idx]))\n dst = distance[seed]\n\n g = np.asarray(np.unravel_index(seed, discrepancy.shape)).transpose().tolist()[0]\n g[0] = dst[0]\n return g\n return None\n\n def add_guidance(self, discrepancy, will_interact):\n if not will_interact:\n return None, None\n\n pos_discr = discrepancy[0]\n neg_discr = discrepancy[1]\n\n can_be_positive = np.sum(pos_discr) > 0\n can_be_negative = np.sum(neg_discr) > 0\n correct_pos = np.sum(pos_discr) >= np.sum(neg_discr)\n\n if correct_pos and can_be_positive:\n return self.find_guidance(pos_discr), None\n\n if not correct_pos and can_be_negative:\n return None, self.find_guidance(neg_discr)\n return None, None\n\n def _apply(self, guidance, discrepancy):\n guidance = guidance.tolist() if isinstance(guidance, np.ndarray) else guidance\n guidance = json.loads(guidance) if isinstance(guidance, str) else guidance\n pos, neg = self.add_guidance(discrepancy, self._will_interact)\n if pos:\n guidance[0].append(pos)\n guidance[1].append([-1] * len(pos))\n if neg:\n guidance[0].append([-1] * len(neg))\n guidance[1].append(neg)\n\n return json.dumps(np.asarray(guidance, dtype=int).tolist())\n\n def __call__(self, data):\n d = dict(data)\n guidance = d[self.guidance]\n discrepancy = d[self.discrepancy]\n\n self.randomize(data)\n d[self.guidance] = self._apply(guidance, discrepancy)\n return d\n\n\nclass SpatialCropForegroundd(MapTransform):\n \"\"\"\n Crop only the foreground object of the expected images.\n\n Difference VS :py:class:`monai.transforms.CropForegroundd`:\n\n 1. If the bounding box is smaller than spatial size in all dimensions then this transform will crop the\n object using box's center and spatial_size.\n\n 2. This transform will set \"start_coord_key\", \"end_coord_key\", \"original_shape_key\" and \"cropped_shape_key\"\n in data[{key}_{meta_key_postfix}]\n\n The typical usage is to help training and evaluation if the valid part is small in the whole medical image.\n The valid part can be determined by any field in the data with `source_key`, for example:\n\n - Select values > 0 in image field as the foreground and crop on all fields specified by `keys`.\n - Select label = 3 in label field as the foreground to crop on all fields specified by `keys`.\n - Select label > 0 in the third channel of a One-Hot label field as the foreground to crop all `keys` fields.\n\n Users can define arbitrary function to select expected foreground from the whole source image or specified\n channels. And it can also add margin to every dim of the bounding box of foreground object.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n See also: :py:class:`monai.transforms.MapTransform`\n source_key: data source to generate the bounding box of foreground, can be image or label, etc.\n spatial_size: minimal spatial size of the image patch e.g. [128, 128, 128] to fit in.\n select_fn: function to select expected foreground, default is to select values > 0.\n channel_indices: if defined, select foreground only on the specified channels\n of image. if None, select foreground on the whole image.\n margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.\n meta_keys: explicitly indicate the key of the corresponding meta data dictionary.\n for example, for data with key `image`, the metadata by default is in `image_meta_dict`.\n the meta data is a dictionary object which contains: filename, original_shape, etc.\n it can be a sequence of string, map to the `keys`.\n if None, will try to construct meta_keys by `key_{meta_key_postfix}`.\n meta_key_postfix: if meta_keys is None, use `{key}_{meta_key_postfix}` to fetch/store the meta data according\n to the key data, default is `meta_dict`, the meta data is a dictionary object.\n For example, to handle key `image`, read/write affine matrices from the\n metadata `image_meta_dict` dictionary's `affine` field.\n start_coord_key: key to record the start coordinate of spatial bounding box for foreground.\n end_coord_key: key to record the end coordinate of spatial bounding box for foreground.\n original_shape_key: key to record original shape for foreground.\n cropped_shape_key: key to record cropped shape for foreground.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n source_key: str,\n spatial_size: Union[Sequence[int], np.ndarray],\n select_fn: Callable = is_positive,\n channel_indices: Optional[IndexSelection] = None,\n margin: int = 0,\n meta_keys: Optional[KeysCollection] = None,\n meta_key_postfix=DEFAULT_POST_FIX,\n start_coord_key: str = \"foreground_start_coord\",\n end_coord_key: str = \"foreground_end_coord\",\n original_shape_key: str = \"foreground_original_shape\",\n cropped_shape_key: str = \"foreground_cropped_shape\",\n allow_missing_keys: bool = False,\n ) -> None:\n super().__init__(keys, allow_missing_keys)\n\n self.source_key = source_key\n self.spatial_size = list(spatial_size)\n self.select_fn = select_fn\n self.channel_indices = channel_indices\n self.margin = margin\n self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)\n if len(self.keys) != len(self.meta_keys):\n raise ValueError(\"meta_keys should have the same length as keys.\")\n self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))\n self.start_coord_key = start_coord_key\n self.end_coord_key = end_coord_key\n self.original_shape_key = original_shape_key\n self.cropped_shape_key = cropped_shape_key\n\n def __call__(self, data):\n d = dict(data)\n box_start, box_end = generate_spatial_bounding_box(\n d[self.source_key], self.select_fn, self.channel_indices, self.margin\n )\n\n center = list(np.mean([box_start, box_end], axis=0).astype(int, copy=False))\n current_size = list(np.subtract(box_end, box_start).astype(int, copy=False))\n\n if np.all(np.less(current_size, self.spatial_size)):\n cropper = SpatialCrop(roi_center=center, roi_size=self.spatial_size)\n box_start = np.array([s.start for s in cropper.slices])\n box_end = np.array([s.stop for s in cropper.slices])\n else:\n cropper = SpatialCrop(roi_start=box_start, roi_end=box_end)\n\n for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):\n meta_key = meta_key or f\"{key}_{meta_key_postfix}\"\n d[meta_key][self.start_coord_key] = box_start\n d[meta_key][self.end_coord_key] = box_end\n d[meta_key][self.original_shape_key] = d[key].shape\n\n image = cropper(d[key])\n d[meta_key][self.cropped_shape_key] = image.shape\n d[key] = image\n return d\n\n\n# Transforms to support Inference for Deepgrow models\nclass AddGuidanceFromPointsd(Transform):\n \"\"\"\n Add guidance based on user clicks.\n\n We assume the input is loaded by LoadImaged and has the shape of (H, W, D) originally.\n Clicks always specify the coordinates in (H, W, D)\n\n If depth_first is True:\n\n Input is now of shape (D, H, W), will return guidance that specifies the coordinates in (D, H, W)\n\n else:\n\n Input is now of shape (H, W, D), will return guidance that specifies the coordinates in (H, W, D)\n\n Args:\n ref_image: key to reference image to fetch current and original image details.\n guidance: output key to store guidance.\n foreground: key that represents user foreground (+ve) clicks.\n background: key that represents user background (-ve) clicks.\n axis: axis that represents slices in 3D volume. (axis to Depth)\n depth_first: if depth (slices) is positioned at first dimension.\n spatial_dims: dimensions based on model used for deepgrow (2D vs 3D).\n slice_key: key that represents applicable slice to add guidance.\n meta_keys: explicitly indicate the key of the meta data dictionary of `ref_image`.\n for example, for data with key `image`, the metadata by default is in `image_meta_dict`.\n the meta data is a dictionary object which contains: filename, original_shape, etc.\n if None, will try to construct meta_keys by `{ref_image}_{meta_key_postfix}`.\n meta_key_postfix: if meta_key is None, use `{ref_image}_{meta_key_postfix}` to fetch the meta data according\n to the key data, default is `meta_dict`, the meta data is a dictionary object.\n For example, to handle key `image`, read/write affine matrices from the\n metadata `image_meta_dict` dictionary's `affine` field.\n\n .. deprecated:: 0.6.0\n ``dimensions`` is deprecated, use ``spatial_dims`` instead.\n\n \"\"\"\n\n @deprecated_arg(name=\"dimensions\", since=\"0.6\", msg_suffix=\"Please use `spatial_dims` instead.\")\n def __init__(\n self,\n ref_image,\n guidance: str = \"guidance\",\n foreground: str = \"foreground\",\n background: str = \"background\",\n axis: int = 0,\n depth_first: bool = True,\n spatial_dims: int = 2,\n slice_key: str = \"slice\",\n meta_keys: Optional[str] = None,\n meta_key_postfix: str = DEFAULT_POST_FIX,\n dimensions: Optional[int] = None,\n ):\n self.ref_image = ref_image\n self.guidance = guidance\n self.foreground = foreground\n self.background = background\n self.axis = axis\n self.depth_first = depth_first\n self.dimensions = spatial_dims if dimensions is None else dimensions\n self.slice = slice_key\n self.meta_keys = meta_keys\n self.meta_key_postfix = meta_key_postfix\n\n def _apply(self, pos_clicks, neg_clicks, factor, slice_num):\n pos = neg = []\n\n if self.dimensions == 2:\n points = list(pos_clicks)\n points.extend(neg_clicks)\n points = np.array(points)\n\n slices = list(np.unique(points[:, self.axis]))\n slice_idx = slices[0] if slice_num is None else next(x for x in slices if x == slice_num)\n\n if len(pos_clicks):\n pos_clicks = np.array(pos_clicks)\n pos = (pos_clicks[np.where(pos_clicks[:, self.axis] == slice_idx)] * factor)[:, 1:].astype(int).tolist()\n if len(neg_clicks):\n neg_clicks = np.array(neg_clicks)\n neg = (neg_clicks[np.where(neg_clicks[:, self.axis] == slice_idx)] * factor)[:, 1:].astype(int).tolist()\n\n guidance = [pos, neg, slice_idx]\n else:\n if len(pos_clicks):\n pos = np.multiply(pos_clicks, factor).astype(int, copy=False).tolist()\n if len(neg_clicks):\n neg = np.multiply(neg_clicks, factor).astype(int, copy=False).tolist()\n guidance = [pos, neg]\n return guidance\n\n def __call__(self, data):\n d = dict(data)\n meta_dict_key = self.meta_keys or f\"{self.ref_image}_{self.meta_key_postfix}\"\n if meta_dict_key not in d:\n raise RuntimeError(f\"Missing meta_dict {meta_dict_key} in data!\")\n if \"spatial_shape\" not in d[meta_dict_key]:\n raise RuntimeError('Missing \"spatial_shape\" in meta_dict!')\n original_shape = d[meta_dict_key][\"spatial_shape\"]\n current_shape = list(d[self.ref_image].shape)\n\n if self.depth_first:\n if self.axis != 0:\n raise RuntimeError(\"Depth first means the depth axis should be 0.\")\n # in here we assume the depth dimension was in the last dimension of \"original_shape\"\n original_shape = np.roll(original_shape, 1)\n\n factor = np.array(current_shape) / original_shape\n\n fg_bg_clicks = []\n for key in [self.foreground, self.background]:\n clicks = d[key]\n clicks = list(np.array(clicks, dtype=int))\n if self.depth_first:\n for i in range(len(clicks)):\n clicks[i] = list(np.roll(clicks[i], 1))\n fg_bg_clicks.append(clicks)\n d[self.guidance] = self._apply(fg_bg_clicks[0], fg_bg_clicks[1], factor, d.get(self.slice))\n return d\n\n\nclass SpatialCropGuidanced(MapTransform):\n \"\"\"\n Crop image based on guidance with minimal spatial size.\n\n - If the bounding box is smaller than spatial size in all dimensions then this transform will crop the\n object using box's center and spatial_size.\n\n - This transform will set \"start_coord_key\", \"end_coord_key\", \"original_shape_key\" and \"cropped_shape_key\"\n in data[{key}_{meta_key_postfix}]\n\n Input data is of shape (C, spatial_1, [spatial_2, ...])\n\n Args:\n keys: keys of the corresponding items to be transformed.\n guidance: key to the guidance. It is used to generate the bounding box of foreground\n spatial_size: minimal spatial size of the image patch e.g. [128, 128, 128] to fit in.\n margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.\n meta_keys: explicitly indicate the key of the corresponding meta data dictionary.\n for example, for data with key `image`, the metadata by default is in `image_meta_dict`.\n the meta data is a dictionary object which contains: filename, original_shape, etc.\n it can be a sequence of string, map to the `keys`.\n if None, will try to construct meta_keys by `key_{meta_key_postfix}`.\n meta_key_postfix: if meta_keys is None, use `key_{postfix}` to fetch the meta data according\n to the key data, default is `meta_dict`, the meta data is a dictionary object.\n For example, to handle key `image`, read/write affine matrices from the\n metadata `image_meta_dict` dictionary's `affine` field.\n start_coord_key: key to record the start coordinate of spatial bounding box for foreground.\n end_coord_key: key to record the end coordinate of spatial bounding box for foreground.\n original_shape_key: key to record original shape for foreground.\n cropped_shape_key: key to record cropped shape for foreground.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n guidance: str,\n spatial_size,\n margin=20,\n meta_keys: Optional[KeysCollection] = None,\n meta_key_postfix=DEFAULT_POST_FIX,\n start_coord_key: str = \"foreground_start_coord\",\n end_coord_key: str = \"foreground_end_coord\",\n original_shape_key: str = \"foreground_original_shape\",\n cropped_shape_key: str = \"foreground_cropped_shape\",\n allow_missing_keys: bool = False,\n ) -> None:\n super().__init__(keys, allow_missing_keys)\n\n self.guidance = guidance\n self.spatial_size = list(spatial_size)\n self.margin = margin\n self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)\n if len(self.keys) != len(self.meta_keys):\n raise ValueError(\"meta_keys should have the same length as keys.\")\n self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))\n self.start_coord_key = start_coord_key\n self.end_coord_key = end_coord_key\n self.original_shape_key = original_shape_key\n self.cropped_shape_key = cropped_shape_key\n\n def bounding_box(self, points, img_shape):\n ndim = len(img_shape)\n margin = ensure_tuple_rep(self.margin, ndim)\n for m in margin:\n if m < 0:\n raise ValueError(\"margin value should not be negative number.\")\n\n box_start = [0] * ndim\n box_end = [0] * ndim\n\n for di in range(ndim):\n dt = points[..., di]\n min_d = max(min(dt - margin[di]), 0)\n max_d = min(img_shape[di], max(dt + margin[di] + 1))\n box_start[di], box_end[di] = min_d, max_d\n return box_start, box_end\n\n def __call__(self, data):\n d: Dict = dict(data)\n first_key: Union[Hashable, List] = self.first_key(d)\n if first_key == []:\n return d\n\n guidance = d[self.guidance]\n original_spatial_shape = d[first_key].shape[1:]\n box_start, box_end = self.bounding_box(np.array(guidance[0] + guidance[1]), original_spatial_shape)\n center = list(np.mean([box_start, box_end], axis=0).astype(int, copy=False))\n spatial_size = self.spatial_size\n\n box_size = list(np.subtract(box_end, box_start).astype(int, copy=False))\n spatial_size = spatial_size[-len(box_size) :]\n\n if len(spatial_size) < len(box_size):\n # If the data is in 3D and spatial_size is specified as 2D [256,256]\n # Then we will get all slices in such case\n diff = len(box_size) - len(spatial_size)\n spatial_size = list(original_spatial_shape[1 : (1 + diff)]) + spatial_size\n\n if np.all(np.less(box_size, spatial_size)):\n if len(center) == 3:\n # 3D Deepgrow: set center to be middle of the depth dimension (D)\n center[0] = spatial_size[0] // 2\n cropper = SpatialCrop(roi_center=center, roi_size=spatial_size)\n else:\n cropper = SpatialCrop(roi_start=box_start, roi_end=box_end)\n\n # update bounding box in case it was corrected by the SpatialCrop constructor\n box_start = np.array([s.start for s in cropper.slices])\n box_end = np.array([s.stop for s in cropper.slices])\n for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):\n if not np.array_equal(d[key].shape[1:], original_spatial_shape):\n raise RuntimeError(\"All the image specified in keys should have same spatial shape\")\n meta_key = meta_key or f\"{key}_{meta_key_postfix}\"\n d[meta_key][self.start_coord_key] = box_start\n d[meta_key][self.end_coord_key] = box_end\n d[meta_key][self.original_shape_key] = d[key].shape\n\n image = cropper(d[key])\n d[meta_key][self.cropped_shape_key] = image.shape\n d[key] = image\n\n pos_clicks, neg_clicks = guidance[0], guidance[1]\n pos = np.subtract(pos_clicks, box_start).tolist() if len(pos_clicks) else []\n neg = np.subtract(neg_clicks, box_start).tolist() if len(neg_clicks) else []\n\n d[self.guidance] = [pos, neg]\n return d\n\n\nclass ResizeGuidanced(Transform):\n \"\"\"\n Resize the guidance based on cropped vs resized image.\n\n This transform assumes that the images have been cropped and resized. And the shape after cropped is store inside\n the meta dict of ref image.\n\n Args:\n guidance: key to guidance\n ref_image: key to reference image to fetch current and original image details\n meta_keys: explicitly indicate the key of the meta data dictionary of `ref_image`.\n for example, for data with key `image`, the metadata by default is in `image_meta_dict`.\n the meta data is a dictionary object which contains: filename, original_shape, etc.\n if None, will try to construct meta_keys by `{ref_image}_{meta_key_postfix}`.\n meta_key_postfix: if meta_key is None, use `{ref_image}_{meta_key_postfix}` to to fetch the meta data according\n to the key data, default is `meta_dict`, the meta data is a dictionary object.\n For example, to handle key `image`, read/write affine matrices from the\n metadata `image_meta_dict` dictionary's `affine` field.\n cropped_shape_key: key that records cropped shape for foreground.\n \"\"\"\n\n def __init__(\n self,\n guidance: str,\n ref_image: str,\n meta_keys: Optional[str] = None,\n meta_key_postfix: str = DEFAULT_POST_FIX,\n cropped_shape_key: str = \"foreground_cropped_shape\",\n ) -> None:\n self.guidance = guidance\n self.ref_image = ref_image\n self.meta_keys = meta_keys\n self.meta_key_postfix = meta_key_postfix\n self.cropped_shape_key = cropped_shape_key\n\n def __call__(self, data):\n d = dict(data)\n guidance = d[self.guidance]\n meta_dict: Dict = d[self.meta_keys or f\"{self.ref_image}_{self.meta_key_postfix}\"]\n current_shape = d[self.ref_image].shape[1:]\n cropped_shape = meta_dict[self.cropped_shape_key][1:]\n factor = np.divide(current_shape, cropped_shape)\n\n pos_clicks, neg_clicks = guidance[0], guidance[1]\n pos = np.multiply(pos_clicks, factor).astype(int, copy=False).tolist() if len(pos_clicks) else []\n neg = np.multiply(neg_clicks, factor).astype(int, copy=False).tolist() if len(neg_clicks) else []\n\n d[self.guidance] = [pos, neg]\n return d\n\n\nclass RestoreLabeld(MapTransform):\n \"\"\"\n Restores label based on the ref image.\n\n The ref_image is assumed that it went through the following transforms:\n\n 1. Fetch2DSliced (If 2D)\n 2. Spacingd\n 3. SpatialCropGuidanced\n 4. Resized\n\n And its shape is assumed to be (C, D, H, W)\n\n This transform tries to undo these operation so that the result label can be overlapped with original volume.\n It does the following operation:\n\n 1. Undo Resized\n 2. Undo SpatialCropGuidanced\n 3. Undo Spacingd\n 4. Undo Fetch2DSliced\n\n The resulting label is of shape (D, H, W)\n\n Args:\n keys: keys of the corresponding items to be transformed.\n ref_image: reference image to fetch current and original image details\n slice_only: apply only to an applicable slice, in case of 2D model/prediction\n mode: {``\"constant\"``, ``\"edge\"``, ``\"linear_ramp\"``, ``\"maximum\"``, ``\"mean\"``,\n ``\"median\"``, ``\"minimum\"``, ``\"reflect\"``, ``\"symmetric\"``, ``\"wrap\"``, ``\"empty\"``}\n One of the listed string values or a user supplied function for padding. Defaults to ``\"constant\"``.\n See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html\n align_corners: Geometrically, we consider the pixels of the input as squares rather than points.\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html\n It also can be a sequence of bool, each element corresponds to a key in ``keys``.\n meta_keys: explicitly indicate the key of the corresponding meta data dictionary.\n for example, for data with key `image`, the metadata by default is in `image_meta_dict`.\n the meta data is a dictionary object which contains: filename, original_shape, etc.\n it can be a sequence of string, map to the `keys`.\n if None, will try to construct meta_keys by `key_{meta_key_postfix}`.\n meta_key_postfix: if meta_key is None, use `key_{meta_key_postfix} to fetch the meta data according\n to the key data, default is `meta_dict`, the meta data is a dictionary object.\n For example, to handle key `image`, read/write affine matrices from the\n metadata `image_meta_dict` dictionary's `affine` field.\n start_coord_key: key that records the start coordinate of spatial bounding box for foreground.\n end_coord_key: key that records the end coordinate of spatial bounding box for foreground.\n original_shape_key: key that records original shape for foreground.\n cropped_shape_key: key that records cropped shape for foreground.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self,\n keys: KeysCollection,\n ref_image: str,\n slice_only: bool = False,\n mode: Union[Sequence[Union[InterpolateMode, str]], InterpolateMode, str] = InterpolateMode.NEAREST,\n align_corners: Union[Sequence[Optional[bool]], Optional[bool]] = None,\n meta_keys: Optional[str] = None,\n meta_key_postfix: str = DEFAULT_POST_FIX,\n start_coord_key: str = \"foreground_start_coord\",\n end_coord_key: str = \"foreground_end_coord\",\n original_shape_key: str = \"foreground_original_shape\",\n cropped_shape_key: str = \"foreground_cropped_shape\",\n allow_missing_keys: bool = False,\n ) -> None:\n super().__init__(keys, allow_missing_keys)\n self.ref_image = ref_image\n self.slice_only = slice_only\n self.mode = ensure_tuple_rep(mode, len(self.keys))\n self.align_corners = ensure_tuple_rep(align_corners, len(self.keys))\n self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)\n if len(self.keys) != len(self.meta_keys):\n raise ValueError(\"meta_keys should have the same length as keys.\")\n self.meta_key_postfix = meta_key_postfix\n self.start_coord_key = start_coord_key\n self.end_coord_key = end_coord_key\n self.original_shape_key = original_shape_key\n self.cropped_shape_key = cropped_shape_key\n\n def __call__(self, data):\n d = dict(data)\n meta_dict: Dict = d[f\"{self.ref_image}_{self.meta_key_postfix}\"]\n\n for key, mode, align_corners, meta_key in self.key_iterator(d, self.mode, self.align_corners, self.meta_keys):\n image = d[key]\n\n # Undo Resize\n current_shape = image.shape\n cropped_shape = meta_dict[self.cropped_shape_key]\n if np.any(np.not_equal(current_shape, cropped_shape)):\n resizer = Resize(spatial_size=cropped_shape[1:], mode=mode)\n image = resizer(image, mode=mode, align_corners=align_corners)\n\n # Undo Crop\n original_shape = meta_dict[self.original_shape_key]\n result = np.zeros(original_shape, dtype=np.float32)\n box_start = meta_dict[self.start_coord_key]\n box_end = meta_dict[self.end_coord_key]\n\n spatial_dims = min(len(box_start), len(image.shape[1:]))\n slices = [slice(None)] + [slice(s, e) for s, e in zip(box_start[:spatial_dims], box_end[:spatial_dims])]\n slices = tuple(slices)\n result[slices] = image\n\n # Undo Spacing\n current_size = result.shape[1:]\n # change spatial_shape from HWD to DHW\n spatial_shape = list(np.roll(meta_dict[\"spatial_shape\"], 1))\n spatial_size = spatial_shape[-len(current_size) :]\n\n if np.any(np.not_equal(current_size, spatial_size)):\n resizer = Resize(spatial_size=spatial_size, mode=mode)\n result = resizer(result, mode=mode, align_corners=align_corners)\n\n # Undo Slicing\n slice_idx = meta_dict.get(\"slice_idx\")\n if slice_idx is None or self.slice_only:\n final_result = result if len(result.shape) <= 3 else result[0]\n else:\n slice_idx = meta_dict[\"slice_idx\"][0]\n final_result = np.zeros(tuple(spatial_shape))\n final_result[slice_idx] = result\n d[key] = final_result\n\n meta_key = meta_key or f\"{key}_{self.meta_key_postfix}\"\n meta = d.get(meta_key)\n if meta is None:\n meta = dict()\n d[meta_key] = meta\n meta[\"slice_idx\"] = slice_idx\n meta[\"affine\"] = meta_dict[\"original_affine\"]\n return d\n\n\nclass Fetch2DSliced(MapTransform):\n \"\"\"\n Fetch one slice in case of a 3D volume.\n\n The volume only contains spatial coordinates.\n\n Args:\n keys: keys of the corresponding items to be transformed.\n guidance: key that represents guidance.\n axis: axis that represents slice in 3D volume.\n meta_keys: explicitly indicate the key of the corresponding meta data dictionary.\n for example, for data with key `image`, the metadata by default is in `image_meta_dict`.\n the meta data is a dictionary object which contains: filename, original_shape, etc.\n it can be a sequence of string, map to the `keys`.\n if None, will try to construct meta_keys by `key_{meta_key_postfix}`.\n meta_key_postfix: use `key_{meta_key_postfix}` to fetch the meta data according to the key data,\n default is `meta_dict`, the meta data is a dictionary object.\n For example, to handle key `image`, read/write affine matrices from the\n metadata `image_meta_dict` dictionary's `affine` field.\n allow_missing_keys: don't raise exception if key is missing.\n \"\"\"\n\n def __init__(\n self,\n keys,\n guidance=\"guidance\",\n axis: int = 0,\n meta_keys: Optional[KeysCollection] = None,\n meta_key_postfix: str = DEFAULT_POST_FIX,\n allow_missing_keys: bool = False,\n ):\n super().__init__(keys, allow_missing_keys)\n self.guidance = guidance\n self.axis = axis\n self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)\n if len(self.keys) != len(self.meta_keys):\n raise ValueError(\"meta_keys should have the same length as keys.\")\n self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))\n\n def _apply(self, image, guidance):\n slice_idx = guidance[2] # (pos, neg, slice_idx)\n idx = []\n for i, size_i in enumerate(image.shape):\n idx.append(slice_idx) if i == self.axis else idx.append(slice(0, size_i))\n\n idx = tuple(idx)\n return image[idx], idx\n\n def __call__(self, data):\n d = dict(data)\n guidance = d[self.guidance]\n if len(guidance) < 3:\n raise RuntimeError(\"Guidance does not container slice_idx!\")\n for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):\n img_slice, idx = self._apply(d[key], guidance)\n d[key] = img_slice\n d[meta_key or f\"{key}_{meta_key_postfix}\"][\"slice_idx\"] = idx\n return d\n" ]
[ [ "torch.nn.Upsample" ], [ "numpy.isnan", "numpy.nanpercentile", "numpy.nanmean", "numpy.transpose" ], [ "numpy.zeros", "numpy.testing.assert_allclose", "torch.tensor" ], [ "torch.randn", "torch.cuda.is_available" ], [ "numpy.asarray", "numpy.max", "numpy.mean", "numpy.interp", "numpy.zeros", "numpy.where" ], [ "numpy.asarray", "numpy.concatenate", "numpy.max", "numpy.mean", "numpy.exp", "numpy.where", "numpy.roll", "numpy.divide", "numpy.unique", "numpy.less", "numpy.subtract", "torch.tensor", "numpy.unravel_index", "numpy.zeros", "numpy.multiply", "numpy.min", "numpy.not_equal", "numpy.array", "numpy.sum", "numpy.array_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
awsm-research/LineVul
[ "246baf18c1932094564a10c9b81efb21914b2978" ]
[ "bow_rf/rf_main.py" ]
[ "import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nimport pickle\nimport numpy as np\n\n\n# load train, val data\ntrain = pd.read_csv('../data/big-vul_dataset/train.csv')\nval = pd.read_csv('../data/big-vul_dataset/val.csv')\n# use train + val data to fit the model\ntrain_data = pd.concat([train, val])\n# load test data\ntest_data = pd.read_csv('../data/big-vul_dataset/test.csv')\n# textual code data\nX_train = train_data[\"processed_func\"]\nX_test = test_data[\"processed_func\"]\n# labels\ny_train = train_data[\"target\"]\ny_test = test_data[\"target\"]\n# apply BoW feature extraction\nvectorizer = TfidfVectorizer(norm='l2', max_features=1000)\nvectorizer = vectorizer.fit(X_train)\nX_train = vectorizer.transform(X_train).todense()\nX_test = vectorizer.transform(X_test).todense()\n# train the model\nrf = RandomForestClassifier(n_estimators=1000,\n n_jobs=-1,\n verbose=1)\nrf.fit(X_train, y_train)\npreds = rf.predict(X_test)\nf1 = f1_score(y_true=y_test, y_pred=preds)\nprecision = precision_score(y_true=y_test, y_pred=preds)\nrecall = recall_score(y_true=y_test, y_pred=preds)\nprint(f\"F1 Score: {f1}\")\nprint(f\"Precision: {precision}\")\nprint(f\"Recall: {recall}\")\n\nwith open('./saved_models/best_f1_rf.pkl', 'wb') as f:\n pickle.dump(rf, f)\n\nprint(\"done\")\n" ]
[ [ "pandas.concat", "pandas.read_csv", "sklearn.ensemble.RandomForestClassifier", "sklearn.metrics.precision_score", "sklearn.metrics.f1_score", "sklearn.metrics.recall_score", "sklearn.feature_extraction.text.TfidfVectorizer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
robcalon/PyETM
[ "323418ad57b2df7d47f2495919c943db28ca55cc" ]
[ "pyETM/curves/hourly_hydrogen_curves.py" ]
[ "import io\nimport pandas\n\nclass HourlyHydrogenCurves:\n \n @property\n def hourly_hydrogen_curves(self):\n \n # get hourly hydrogen curves\n if self._hourly_hydrogen_curves is None:\n self.get_hourly_hydrogen_curves()\n \n return self._hourly_hydrogen_curves\n \n def get_hourly_hydrogen_curves(self):\n \"\"\"get the hourly hydrogen curves\"\"\"\n \n # raise without scenario id\n self._raise_scenario_id()\n \n # prepare post\n headers = {'Connection':'close'}\n post = f'/scenarios/{self.scenario_id}/curves/hydrogen'\n \n # request response and extract data\n resp = self.get(post, headers=headers)\n data = io.StringIO(resp)\n \n # convert data to dataframe and set DateTime\n curves = pandas.read_csv(data, index_col='Time', \n parse_dates=True).asfreq('H')\n curves.index.name = 'DateTime'\n \n # set corresponsing parameter property\n self._hourly_hydrogen_curves = curves\n \n return curves" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
mengwanguc/torchvision-meng
[ "395942756b0b29053d25b50cdef86f709601453e", "09ce6758ad6ab3177f3fdec3b1bb7c7033b16712" ]
[ "torchvision/models/squeezenet.py", "torchvision/transforms/transforms.py" ]
[ "import time\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom .utils import load_state_dict_from_url\nfrom typing import Any\n\n__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1']\n\nmodel_urls = {\n 'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',\n 'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',\n}\n\n\nclass Fire(nn.Module):\n\n def __init__(\n self,\n inplanes: int,\n squeeze_planes: int,\n expand1x1_planes: int,\n expand3x3_planes: int\n ) -> None:\n super(Fire, self).__init__()\n self.inplanes = inplanes\n self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)\n self.squeeze_activation = nn.ReLU(inplace=True)\n self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,\n kernel_size=1)\n self.expand1x1_activation = nn.ReLU(inplace=True)\n self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,\n kernel_size=3, padding=1)\n self.expand3x3_activation = nn.ReLU(inplace=True)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.squeeze_activation(self.squeeze(x))\n return torch.cat([\n self.expand1x1_activation(self.expand1x1(x)),\n self.expand3x3_activation(self.expand3x3(x))\n ], 1)\n\n\nclass SqueezeNet(nn.Module):\n\n def __init__(\n self,\n version: str = '1_0',\n num_classes: int = 1000\n ) -> None:\n super(SqueezeNet, self).__init__()\n self.num_classes = num_classes\n if version == '1_0':\n self.features = nn.Sequential(\n nn.Conv2d(3, 96, kernel_size=7, stride=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(96, 16, 64, 64),\n Fire(128, 16, 64, 64),\n Fire(128, 32, 128, 128),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(256, 32, 128, 128),\n Fire(256, 48, 192, 192),\n Fire(384, 48, 192, 192),\n Fire(384, 64, 256, 256),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(512, 64, 256, 256),\n )\n elif version == '1_1':\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, stride=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(64, 16, 64, 64),\n Fire(128, 16, 64, 64),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(128, 32, 128, 128),\n Fire(256, 32, 128, 128),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(256, 48, 192, 192),\n Fire(384, 48, 192, 192),\n Fire(384, 64, 256, 256),\n Fire(512, 64, 256, 256),\n )\n else:\n # FIXME: Is this needed? SqueezeNet should only be called from the\n # FIXME: squeezenet1_x() functions\n # FIXME: This checking is not done for the other models\n raise ValueError(\"Unsupported SqueezeNet version {version}:\"\n \"1_0 or 1_1 expected\".format(version=version))\n\n # Final convolution is initialized differently from the rest\n final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)\n self.classifier = nn.Sequential(\n nn.Dropout(p=0.5),\n final_conv,\n nn.ReLU(inplace=True),\n nn.AdaptiveAvgPool2d((1, 1))\n )\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n if m is final_conv:\n init.normal_(m.weight, mean=0.0, std=0.01)\n else:\n init.kaiming_uniform_(m.weight)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n end = time.time()\n x = self.features(x)\n x = self.classifier(x)\n res = torch.flatten(x, 1)\n single_forward_time = time.time() - end\n print(\"squeezenet single_forward_time: {}\".format(single_forward_time))\n return res\n\ndef _squeezenet(version: str, pretrained: bool, progress: bool, **kwargs: Any) -> SqueezeNet:\n model = SqueezeNet(version, **kwargs)\n if pretrained:\n arch = 'squeezenet' + version\n state_dict = load_state_dict_from_url(model_urls[arch],\n progress=progress)\n model.load_state_dict(state_dict)\n return model\n\n\ndef squeezenet1_0(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> SqueezeNet:\n r\"\"\"SqueezeNet model architecture from the `\"SqueezeNet: AlexNet-level\n accuracy with 50x fewer parameters and <0.5MB model size\"\n <https://arxiv.org/abs/1602.07360>`_ paper.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _squeezenet('1_0', pretrained, progress, **kwargs)\n\n\ndef squeezenet1_1(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> SqueezeNet:\n r\"\"\"SqueezeNet 1.1 model from the `official SqueezeNet repo\n <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.\n SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters\n than SqueezeNet 1.0, without sacrificing accuracy.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _squeezenet('1_1', pretrained, progress, **kwargs)\n", "import math\nimport numbers\nimport random\nimport warnings\nfrom collections.abc import Sequence\nfrom typing import Tuple, List, Optional\n\nimport torch\nfrom torch import Tensor\n\ntry:\n import accimage\nexcept ImportError:\n accimage = None\n\nfrom . import functional as F\nfrom .functional import InterpolationMode, _interpolation_modes_from_int\n\n\n__all__ = [\"Compose\", \"ToTensor\", \"PILToTensor\", \"ConvertImageDtype\", \"ToPILImage\", \"Normalize\", \"Resize\", \"Scale\",\n \"CenterCrop\", \"Pad\", \"Lambda\", \"RandomApply\", \"RandomChoice\", \"RandomOrder\", \"RandomCrop\",\n \"RandomHorizontalFlip\", \"RandomVerticalFlip\", \"RandomResizedCrop\", \"RandomSizedCrop\", \"FiveCrop\", \"TenCrop\",\n \"LinearTransformation\", \"ColorJitter\", \"RandomRotation\", \"RandomAffine\", \"Grayscale\", \"RandomGrayscale\",\n \"RandomPerspective\", \"RandomErasing\", \"GaussianBlur\", \"InterpolationMode\", \"RandomInvert\", \"RandomPosterize\",\n \"RandomSolarize\", \"RandomAdjustSharpness\", \"RandomAutocontrast\", \"RandomEqualize\"]\n\n\nclass Compose:\n \"\"\"Composes several transforms together. This transform does not support torchscript.\n Please, see the note below.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n\n .. note::\n In order to script the transformations, please use ``torch.nn.Sequential`` as below.\n\n >>> transforms = torch.nn.Sequential(\n >>> transforms.CenterCrop(10),\n >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n >>> )\n >>> scripted_transforms = torch.jit.script(transforms)\n\n Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require\n `lambda` functions or ``PIL.Image``.\n\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass ToTensor:\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. This transform does not support torchscript.\n\n Converts a PIL Image or numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]\n if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)\n or if the numpy.ndarray has dtype = np.uint8\n\n In the other cases, tensors are returned without scaling.\n\n .. note::\n Because the input image is scaled to [0.0, 1.0], this transformation should not be used when\n transforming target image masks. See the `references`_ for implementing the transforms for image masks.\n\n .. _references: https://github.com/pytorch/vision/tree/master/references/segmentation\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return F.to_tensor(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass PILToTensor:\n \"\"\"Convert a ``PIL Image`` to a tensor of the same type. This transform does not support torchscript.\n\n Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return F.pil_to_tensor(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass ConvertImageDtype(torch.nn.Module):\n \"\"\"Convert a tensor image to the given ``dtype`` and scale the values accordingly\n This function does not support PIL Image.\n\n Args:\n dtype (torch.dtype): Desired data type of the output\n\n .. note::\n\n When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.\n If converted back and forth, this mismatch has no effect.\n\n Raises:\n RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as\n well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to\n overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range\n of the integer ``dtype``.\n \"\"\"\n\n def __init__(self, dtype: torch.dtype) -> None:\n super().__init__()\n self.dtype = dtype\n\n def forward(self, image):\n return F.convert_image_dtype(image, self.dtype)\n\n\nclass ToPILImage:\n \"\"\"Convert a tensor or an ndarray to PIL Image. This transform does not support torchscript.\n\n Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape\n H x W x C to a PIL Image while preserving the value range.\n\n Args:\n mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).\n If ``mode`` is ``None`` (default) there are some assumptions made about the input data:\n - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.\n - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.\n - If the input has 2 channels, the ``mode`` is assumed to be ``LA``.\n - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,\n ``short``).\n\n .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes\n \"\"\"\n def __init__(self, mode=None):\n self.mode = mode\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.\n\n Returns:\n PIL Image: Image converted to PIL Image.\n\n \"\"\"\n return F.to_pil_image(pic, self.mode)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n if self.mode is not None:\n format_string += 'mode={0}'.format(self.mode)\n format_string += ')'\n return format_string\n\n\nclass Normalize(torch.nn.Module):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n This transform does not support PIL Image.\n Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``\n channels, this transform will normalize each channel of the input\n ``torch.*Tensor`` i.e.,\n ``output[channel] = (input[channel] - mean[channel]) / std[channel]``\n\n .. note::\n This transform acts out of place, i.e., it does not mutate the input tensor.\n\n Args:\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channel.\n inplace(bool,optional): Bool to make this operation in-place.\n\n \"\"\"\n\n def __init__(self, mean, std, inplace=False):\n super().__init__()\n self.mean = mean\n self.std = std\n self.inplace = inplace\n\n def forward(self, tensor: Tensor) -> Tensor:\n \"\"\"\n Args:\n tensor (Tensor): Tensor image to be normalized.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n return F.normalize(tensor, self.mean, self.std, self.inplace)\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\n\nclass Resize(torch.nn.Module):\n \"\"\"Resize the input image to the given size.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), output size will be matched to this. If size is an int,\n smaller edge of the image will be matched to this number.\n i.e, if height > width, then image will be rescaled to\n (size * height / width, size).\n In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and\n ``InterpolationMode.BICUBIC`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n\n \"\"\"\n\n def __init__(self, size, interpolation=InterpolationMode.BILINEAR):\n super().__init__()\n if not isinstance(size, (int, Sequence)):\n raise TypeError(\"Size should be int or sequence. Got {}\".format(type(size)))\n if isinstance(size, Sequence) and len(size) not in (1, 2):\n raise ValueError(\"If size is a sequence, it should have 1 or 2 values\")\n self.size = size\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n self.interpolation = interpolation\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be scaled.\n\n Returns:\n PIL Image or Tensor: Rescaled image.\n \"\"\"\n return F.resize(img, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = self.interpolation.value\n return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)\n\n\nclass Scale(Resize):\n \"\"\"\n Note: This transform is deprecated in favor of Resize.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.Scale transform is deprecated, \" +\n \"please use transforms.Resize instead.\")\n super(Scale, self).__init__(*args, **kwargs)\n\n\nclass CenterCrop(torch.nn.Module):\n \"\"\"Crops the given image at the center.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n \"\"\"\n\n def __init__(self, size):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n PIL Image or Tensor: Cropped image.\n \"\"\"\n return F.center_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass Pad(torch.nn.Module):\n \"\"\"Pad the given image on all sides with the given \"pad\" value.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,\n at most 3 leading dimensions for mode edge,\n and an arbitrary number of leading dimensions for mode constant\n\n Args:\n padding (int or sequence): Padding on each border. If a single int is provided this\n is used to pad all borders. If sequence of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a sequence of length 4 is provided\n this is the padding for the left, top, right and bottom borders respectively.\n In torchscript mode padding as single int is not supported, use a sequence of length 1: ``[padding, ]``.\n fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant.\n Only number is supported for torch Tensor.\n Only int or str or tuple value is supported for PIL Image.\n padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.\n Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value at the edge of the image,\n if input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2\n\n - reflect: pads with reflection of image without repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def __init__(self, padding, fill=0, padding_mode=\"constant\"):\n super().__init__()\n if not isinstance(padding, (numbers.Number, tuple, list)):\n raise TypeError(\"Got inappropriate padding arg\")\n\n if not isinstance(fill, (numbers.Number, str, tuple)):\n raise TypeError(\"Got inappropriate fill arg\")\n\n if padding_mode not in [\"constant\", \"edge\", \"reflect\", \"symmetric\"]:\n raise ValueError(\"Padding mode should be either constant, edge, reflect or symmetric\")\n\n if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:\n raise ValueError(\"Padding must be an int or a 1, 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n self.padding = padding\n self.fill = fill\n self.padding_mode = padding_mode\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be padded.\n\n Returns:\n PIL Image or Tensor: Padded image.\n \"\"\"\n return F.pad(img, self.padding, self.fill, self.padding_mode)\n\n def __repr__(self):\n return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\\\n format(self.padding, self.fill, self.padding_mode)\n\n\nclass Lambda:\n \"\"\"Apply a user-defined lambda as a transform. This transform does not support torchscript.\n\n Args:\n lambd (function): Lambda/function to be used for transform.\n \"\"\"\n\n def __init__(self, lambd):\n if not callable(lambd):\n raise TypeError(\"Argument lambd should be callable, got {}\".format(repr(type(lambd).__name__)))\n self.lambd = lambd\n\n def __call__(self, img):\n return self.lambd(img)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass RandomTransforms:\n \"\"\"Base class for a list of transformations with randomness\n\n Args:\n transforms (sequence): list of transformations\n \"\"\"\n\n def __init__(self, transforms):\n if not isinstance(transforms, Sequence):\n raise TypeError(\"Argument transforms should be a sequence\")\n self.transforms = transforms\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError()\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomApply(torch.nn.Module):\n \"\"\"Apply randomly a list of transformations with a given probability.\n\n .. note::\n In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of\n transforms as shown below:\n\n >>> transforms = transforms.RandomApply(torch.nn.ModuleList([\n >>> transforms.ColorJitter(),\n >>> ]), p=0.3)\n >>> scripted_transforms = torch.jit.script(transforms)\n\n Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require\n `lambda` functions or ``PIL.Image``.\n\n Args:\n transforms (sequence or torch.nn.Module): list of transformations\n p (float): probability\n \"\"\"\n\n def __init__(self, transforms, p=0.5):\n super().__init__()\n self.transforms = transforms\n self.p = p\n\n def forward(self, img):\n if self.p < torch.rand(1):\n return img\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += '\\n p={}'.format(self.p)\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomOrder(RandomTransforms):\n \"\"\"Apply a list of transformations in a random order. This transform does not support torchscript.\n \"\"\"\n def __call__(self, img):\n order = list(range(len(self.transforms)))\n random.shuffle(order)\n for i in order:\n img = self.transforms[i](img)\n return img\n\n\nclass RandomChoice(RandomTransforms):\n \"\"\"Apply single transformation randomly picked from a list. This transform does not support torchscript.\n \"\"\"\n def __call__(self, img):\n t = random.choice(self.transforms)\n return t(img)\n\n\nclass RandomCrop(torch.nn.Module):\n \"\"\"Crop the given image at a random location.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions,\n but if non-constant padding is used, the input is expected to have at most 2 leading dimensions\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n padding (int or sequence, optional): Optional padding on each border\n of the image. Default is None. If a single int is provided this\n is used to pad all borders. If sequence of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a sequence of length 4 is provided\n this is the padding for the left, top, right and bottom borders respectively.\n In torchscript mode padding as single int is not supported, use a sequence of length 1: ``[padding, ]``.\n pad_if_needed (boolean): It will pad the image if smaller than the\n desired size to avoid raising an exception. Since cropping is done\n after padding, the padding seems to be done at a random offset.\n fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant.\n Only number is supported for torch Tensor.\n Only int or str or tuple value is supported for PIL Image.\n padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n \"\"\"\n\n @staticmethod\n def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:\n \"\"\"Get parameters for ``crop`` for a random crop.\n\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n output_size (tuple): Expected output size of the crop.\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.\n \"\"\"\n w, h = F._get_image_size(img)\n th, tw = output_size\n\n if h + 1 < th or w + 1 < tw:\n raise ValueError(\n \"Required crop size {} is larger then input image size {}\".format((th, tw), (h, w))\n )\n\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = torch.randint(0, h - th + 1, size=(1, )).item()\n j = torch.randint(0, w - tw + 1, size=(1, )).item()\n return i, j, th, tw\n\n def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode=\"constant\"):\n super().__init__()\n\n self.size = tuple(_setup_size(\n size, error_msg=\"Please provide only two dimensions (h, w) for size.\"\n ))\n\n self.padding = padding\n self.pad_if_needed = pad_if_needed\n self.fill = fill\n self.padding_mode = padding_mode\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n PIL Image or Tensor: Cropped image.\n \"\"\"\n if self.padding is not None:\n img = F.pad(img, self.padding, self.fill, self.padding_mode)\n\n width, height = F._get_image_size(img)\n # pad the width if needed\n if self.pad_if_needed and width < self.size[1]:\n padding = [self.size[1] - width, 0]\n img = F.pad(img, padding, self.fill, self.padding_mode)\n # pad the height if needed\n if self.pad_if_needed and height < self.size[0]:\n padding = [0, self.size[0] - height]\n img = F.pad(img, padding, self.fill, self.padding_mode)\n\n i, j, h, w = self.get_params(img, self.size)\n\n return F.crop(img, i, j, h, w)\n\n def __repr__(self):\n return self.__class__.__name__ + \"(size={0}, padding={1})\".format(self.size, self.padding)\n\n\nclass RandomHorizontalFlip(torch.nn.Module):\n \"\"\"Horizontally flip the given image randomly with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be flipped.\n\n Returns:\n PIL Image or Tensor: Randomly flipped image.\n \"\"\"\n if torch.rand(1) < self.p:\n return F.hflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomVerticalFlip(torch.nn.Module):\n \"\"\"Vertically flip the given image randomly with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be flipped.\n\n Returns:\n PIL Image or Tensor: Randomly flipped image.\n \"\"\"\n if torch.rand(1) < self.p:\n return F.vflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomPerspective(torch.nn.Module):\n \"\"\"Performs a random perspective transformation of the given image with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.\n Default is 0.5.\n p (float): probability of the image being transformed. Default is 0.5.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n fill (sequence or number): Pixel fill value for the area outside the transformed\n image. Default is ``0``. If given a number, the value is used for all bands respectively.\n If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.\n \"\"\"\n\n def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0):\n super().__init__()\n self.p = p\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n self.interpolation = interpolation\n self.distortion_scale = distortion_scale\n\n if fill is None:\n fill = 0\n elif not isinstance(fill, (Sequence, numbers.Number)):\n raise TypeError(\"Fill should be either a sequence or a number.\")\n\n self.fill = fill\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be Perspectively transformed.\n\n Returns:\n PIL Image or Tensor: Randomly transformed image.\n \"\"\"\n\n fill = self.fill\n if isinstance(img, Tensor):\n if isinstance(fill, (int, float)):\n fill = [float(fill)] * F._get_image_num_channels(img)\n else:\n fill = [float(f) for f in fill]\n\n if torch.rand(1) < self.p:\n width, height = F._get_image_size(img)\n startpoints, endpoints = self.get_params(width, height, self.distortion_scale)\n return F.perspective(img, startpoints, endpoints, self.interpolation, fill)\n return img\n\n @staticmethod\n def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[List[int]], List[List[int]]]:\n \"\"\"Get parameters for ``perspective`` for a random perspective transform.\n\n Args:\n width (int): width of the image.\n height (int): height of the image.\n distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.\n\n Returns:\n List containing [top-left, top-right, bottom-right, bottom-left] of the original image,\n List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.\n \"\"\"\n half_height = height // 2\n half_width = width // 2\n topleft = [\n int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),\n int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())\n ]\n topright = [\n int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),\n int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())\n ]\n botright = [\n int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),\n int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())\n ]\n botleft = [\n int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),\n int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())\n ]\n startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]\n endpoints = [topleft, topright, botright, botleft]\n return startpoints, endpoints\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomResizedCrop(torch.nn.Module):\n \"\"\"Crop the given image to random size and aspect ratio.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n A crop of random size (default: of 0.08 to 1.0) of the original size and a random\n aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop\n is finally resized to given size.\n This is popularly used to train the Inception networks.\n\n Args:\n size (int or sequence): expected output size of each edge. If size is an\n int instead of sequence like (h, w), a square output size ``(size, size)`` is\n made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.\n scale (tuple of float): scale range of the cropped image before resizing, relatively to the origin image.\n ratio (tuple of float): aspect ratio range of the cropped image before resizing.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and\n ``InterpolationMode.BICUBIC`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n\n \"\"\"\n\n def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=InterpolationMode.BILINEAR):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n if not isinstance(scale, Sequence):\n raise TypeError(\"Scale should be a sequence\")\n if not isinstance(ratio, Sequence):\n raise TypeError(\"Ratio should be a sequence\")\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"Scale and ratio should be of kind (min, max)\")\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n self.interpolation = interpolation\n self.scale = scale\n self.ratio = ratio\n\n @staticmethod\n def get_params(\n img: Tensor, scale: List[float], ratio: List[float]\n ) -> Tuple[int, int, int, int]:\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image or Tensor): Input image.\n scale (list): range of scale of the origin size cropped\n ratio (list): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n width, height = F._get_image_size(img)\n area = height * width\n\n for _ in range(10):\n target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n log_ratio = torch.log(torch.tensor(ratio))\n aspect_ratio = torch.exp(\n torch.empty(1).uniform_(log_ratio[0], log_ratio[1])\n ).item()\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if 0 < w <= width and 0 < h <= height:\n i = torch.randint(0, height - h + 1, size=(1,)).item()\n j = torch.randint(0, width - w + 1, size=(1,)).item()\n return i, j, h, w\n\n # Fallback to central crop\n in_ratio = float(width) / float(height)\n if in_ratio < min(ratio):\n w = width\n h = int(round(w / min(ratio)))\n elif in_ratio > max(ratio):\n h = height\n w = int(round(h * max(ratio)))\n else: # whole image\n w = width\n h = height\n i = (height - h) // 2\n j = (width - w) // 2\n return i, j, h, w\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped and resized.\n\n Returns:\n PIL Image or Tensor: Randomly cropped and resized image.\n \"\"\"\n i, j, h, w = self.get_params(img, self.scale, self.ratio)\n return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = self.interpolation.value\n format_string = self.__class__.__name__ + '(size={0}'.format(self.size)\n format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))\n format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))\n format_string += ', interpolation={0})'.format(interpolate_str)\n return format_string\n\n\nclass RandomSizedCrop(RandomResizedCrop):\n \"\"\"\n Note: This transform is deprecated in favor of RandomResizedCrop.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.RandomSizedCrop transform is deprecated, \" +\n \"please use transforms.RandomResizedCrop instead.\")\n super(RandomSizedCrop, self).__init__(*args, **kwargs)\n\n\nclass FiveCrop(torch.nn.Module):\n \"\"\"Crop the given image into four corners and the central crop.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an ``int``\n instead of sequence like (h, w), a square crop of size (size, size) is made.\n If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n\n Example:\n >>> transform = Compose([\n >>> FiveCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n tuple of 5 images. Image can be PIL Image or Tensor\n \"\"\"\n return F.five_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass TenCrop(torch.nn.Module):\n \"\"\"Crop the given image into four corners and the central crop plus the flipped version of\n these (horizontal flipping is used by default).\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n vertical_flip (bool): Use vertical flipping instead of horizontal\n\n Example:\n >>> transform = Compose([\n >>> TenCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size, vertical_flip=False):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n self.vertical_flip = vertical_flip\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n tuple of 10 images. Image can be PIL Image or Tensor\n \"\"\"\n return F.ten_crop(img, self.size, self.vertical_flip)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)\n\n\nclass LinearTransformation(torch.nn.Module):\n \"\"\"Transform a tensor image with a square transformation matrix and a mean_vector computed\n offline.\n This transform does not support PIL Image.\n Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and\n subtract mean_vector from it which is then followed by computing the dot\n product with the transformation matrix and then reshaping the tensor to its\n original shape.\n\n Applications:\n whitening transformation: Suppose X is a column vector zero-centered data.\n Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),\n perform SVD on this matrix and pass it as transformation_matrix.\n\n Args:\n transformation_matrix (Tensor): tensor [D x D], D = C x H x W\n mean_vector (Tensor): tensor [D], D = C x H x W\n \"\"\"\n\n def __init__(self, transformation_matrix, mean_vector):\n super().__init__()\n if transformation_matrix.size(0) != transformation_matrix.size(1):\n raise ValueError(\"transformation_matrix should be square. Got \" +\n \"[{} x {}] rectangular matrix.\".format(*transformation_matrix.size()))\n\n if mean_vector.size(0) != transformation_matrix.size(0):\n raise ValueError(\"mean_vector should have the same length {}\".format(mean_vector.size(0)) +\n \" as any one of the dimensions of the transformation_matrix [{}]\"\n .format(tuple(transformation_matrix.size())))\n\n if transformation_matrix.device != mean_vector.device:\n raise ValueError(\"Input tensors should be on the same device. Got {} and {}\"\n .format(transformation_matrix.device, mean_vector.device))\n\n self.transformation_matrix = transformation_matrix\n self.mean_vector = mean_vector\n\n def forward(self, tensor: Tensor) -> Tensor:\n \"\"\"\n Args:\n tensor (Tensor): Tensor image to be whitened.\n\n Returns:\n Tensor: Transformed image.\n \"\"\"\n shape = tensor.shape\n n = shape[-3] * shape[-2] * shape[-1]\n if n != self.transformation_matrix.shape[0]:\n raise ValueError(\"Input tensor and transformation matrix have incompatible shape.\" +\n \"[{} x {} x {}] != \".format(shape[-3], shape[-2], shape[-1]) +\n \"{}\".format(self.transformation_matrix.shape[0]))\n\n if tensor.device.type != self.mean_vector.device.type:\n raise ValueError(\"Input tensor should be on the same device as transformation matrix and mean vector. \"\n \"Got {} vs {}\".format(tensor.device, self.mean_vector.device))\n\n flat_tensor = tensor.view(-1, n) - self.mean_vector\n transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n tensor = transformed_tensor.view(shape)\n return tensor\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(transformation_matrix='\n format_string += (str(self.transformation_matrix.tolist()) + ')')\n format_string += (\", (mean_vector=\" + str(self.mean_vector.tolist()) + ')')\n return format_string\n\n\nclass ColorJitter(torch.nn.Module):\n \"\"\"Randomly change the brightness, contrast, saturation and hue of an image.\n If the image is torch Tensor, it is expected\n to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, mode \"1\", \"L\", \"I\", \"F\" and modes with transparency (alpha channel) are not supported.\n\n Args:\n brightness (float or tuple of float (min, max)): How much to jitter brightness.\n brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]\n or the given [min, max]. Should be non negative numbers.\n contrast (float or tuple of float (min, max)): How much to jitter contrast.\n contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]\n or the given [min, max]. Should be non negative numbers.\n saturation (float or tuple of float (min, max)): How much to jitter saturation.\n saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]\n or the given [min, max]. Should be non negative numbers.\n hue (float or tuple of float (min, max)): How much to jitter hue.\n hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].\n Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.\n \"\"\"\n\n def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):\n super().__init__()\n self.brightness = self._check_input(brightness, 'brightness')\n self.contrast = self._check_input(contrast, 'contrast')\n self.saturation = self._check_input(saturation, 'saturation')\n self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),\n clip_first_on_zero=False)\n\n @torch.jit.unused\n def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):\n if isinstance(value, numbers.Number):\n if value < 0:\n raise ValueError(\"If {} is a single number, it must be non negative.\".format(name))\n value = [center - float(value), center + float(value)]\n if clip_first_on_zero:\n value[0] = max(value[0], 0.0)\n elif isinstance(value, (tuple, list)) and len(value) == 2:\n if not bound[0] <= value[0] <= value[1] <= bound[1]:\n raise ValueError(\"{} values should be between {}\".format(name, bound))\n else:\n raise TypeError(\"{} should be a single number or a list/tuple with lenght 2.\".format(name))\n\n # if value is 0 or (1., 1.) for brightness/contrast/saturation\n # or (0., 0.) for hue, do nothing\n if value[0] == value[1] == center:\n value = None\n return value\n\n @staticmethod\n def get_params(brightness: Optional[List[float]],\n contrast: Optional[List[float]],\n saturation: Optional[List[float]],\n hue: Optional[List[float]]\n ) -> Tuple[Tensor, Optional[float], Optional[float], Optional[float], Optional[float]]:\n \"\"\"Get the parameters for the randomized transform to be applied on image.\n\n Args:\n brightness (tuple of float (min, max), optional): The range from which the brightness_factor is chosen\n uniformly. Pass None to turn off the transformation.\n contrast (tuple of float (min, max), optional): The range from which the contrast_factor is chosen\n uniformly. Pass None to turn off the transformation.\n saturation (tuple of float (min, max), optional): The range from which the saturation_factor is chosen\n uniformly. Pass None to turn off the transformation.\n hue (tuple of float (min, max), optional): The range from which the hue_factor is chosen uniformly.\n Pass None to turn off the transformation.\n\n Returns:\n tuple: The parameters used to apply the randomized transform\n along with their random order.\n \"\"\"\n fn_idx = torch.randperm(4)\n\n b = None if brightness is None else float(torch.empty(1).uniform_(brightness[0], brightness[1]))\n c = None if contrast is None else float(torch.empty(1).uniform_(contrast[0], contrast[1]))\n s = None if saturation is None else float(torch.empty(1).uniform_(saturation[0], saturation[1]))\n h = None if hue is None else float(torch.empty(1).uniform_(hue[0], hue[1]))\n\n return fn_idx, b, c, s, h\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Input image.\n\n Returns:\n PIL Image or Tensor: Color jittered image.\n \"\"\"\n fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = \\\n self.get_params(self.brightness, self.contrast, self.saturation, self.hue)\n\n for fn_id in fn_idx:\n if fn_id == 0 and brightness_factor is not None:\n img = F.adjust_brightness(img, brightness_factor)\n elif fn_id == 1 and contrast_factor is not None:\n img = F.adjust_contrast(img, contrast_factor)\n elif fn_id == 2 and saturation_factor is not None:\n img = F.adjust_saturation(img, saturation_factor)\n elif fn_id == 3 and hue_factor is not None:\n img = F.adjust_hue(img, hue_factor)\n\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += 'brightness={0}'.format(self.brightness)\n format_string += ', contrast={0}'.format(self.contrast)\n format_string += ', saturation={0}'.format(self.saturation)\n format_string += ', hue={0})'.format(self.hue)\n return format_string\n\n\nclass RandomRotation(torch.nn.Module):\n \"\"\"Rotate the image by angle.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n degrees (sequence or number): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees).\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n expand (bool, optional): Optional expansion flag.\n If true, expands the output to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner.\n Default is the center of the image.\n fill (sequence or number): Pixel fill value for the area outside the rotated\n image. Default is ``0``. If given a number, the value is used for all bands respectively.\n If input is PIL Image, the options is only available for ``Pillow>=5.2.0``.\n resample (int, optional): deprecated argument and will be removed since v0.10.0.\n Please use the ``interpolation`` parameter instead.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(\n self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=0, resample=None\n ):\n super().__init__()\n if resample is not None:\n warnings.warn(\n \"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead\"\n )\n interpolation = _interpolation_modes_from_int(resample)\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n self.degrees = _setup_angle(degrees, name=\"degrees\", req_sizes=(2, ))\n\n if center is not None:\n _check_sequence_input(center, \"center\", req_sizes=(2, ))\n\n self.center = center\n\n self.resample = self.interpolation = interpolation\n self.expand = expand\n\n if fill is None:\n fill = 0\n elif not isinstance(fill, (Sequence, numbers.Number)):\n raise TypeError(\"Fill should be either a sequence or a number.\")\n\n self.fill = fill\n\n @staticmethod\n def get_params(degrees: List[float]) -> float:\n \"\"\"Get parameters for ``rotate`` for a random rotation.\n\n Returns:\n float: angle parameter to be passed to ``rotate`` for random rotation.\n \"\"\"\n angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())\n return angle\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be rotated.\n\n Returns:\n PIL Image or Tensor: Rotated image.\n \"\"\"\n fill = self.fill\n if isinstance(img, Tensor):\n if isinstance(fill, (int, float)):\n fill = [float(fill)] * F._get_image_num_channels(img)\n else:\n fill = [float(f) for f in fill]\n angle = self.get_params(self.degrees)\n\n return F.rotate(img, angle, self.resample, self.expand, self.center, fill)\n\n def __repr__(self):\n interpolate_str = self.interpolation.value\n format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)\n format_string += ', interpolation={0}'.format(interpolate_str)\n format_string += ', expand={0}'.format(self.expand)\n if self.center is not None:\n format_string += ', center={0}'.format(self.center)\n if self.fill is not None:\n format_string += ', fill={0}'.format(self.fill)\n format_string += ')'\n return format_string\n\n\nclass RandomAffine(torch.nn.Module):\n \"\"\"Random affine transformation of the image keeping center invariant.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n degrees (sequence or number): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees). Set to 0 to deactivate rotations.\n translate (tuple, optional): tuple of maximum absolute fraction for horizontal\n and vertical translations. For example translate=(a, b), then horizontal shift\n is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is\n randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.\n scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is\n randomly sampled from the range a <= scale <= b. Will keep original scale by default.\n shear (sequence or number, optional): Range of degrees to select from.\n If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)\n will be applied. Else if shear is a sequence of 2 values a shear parallel to the x axis in the\n range (shear[0], shear[1]) will be applied. Else if shear is a sequence of 4 values,\n a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.\n Will not apply shear by default.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n fill (sequence or number): Pixel fill value for the area outside the transformed\n image. Default is ``0``. If given a number, the value is used for all bands respectively.\n If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.\n fillcolor (sequence or number, optional): deprecated argument and will be removed since v0.10.0.\n Please use the ``fill`` parameter instead.\n resample (int, optional): deprecated argument and will be removed since v0.10.0.\n Please use the ``interpolation`` parameter instead.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(\n self, degrees, translate=None, scale=None, shear=None, interpolation=InterpolationMode.NEAREST, fill=0,\n fillcolor=None, resample=None\n ):\n super().__init__()\n if resample is not None:\n warnings.warn(\n \"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead\"\n )\n interpolation = _interpolation_modes_from_int(resample)\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n if fillcolor is not None:\n warnings.warn(\n \"Argument fillcolor is deprecated and will be removed since v0.10.0. Please, use fill instead\"\n )\n fill = fillcolor\n\n self.degrees = _setup_angle(degrees, name=\"degrees\", req_sizes=(2, ))\n\n if translate is not None:\n _check_sequence_input(translate, \"translate\", req_sizes=(2, ))\n for t in translate:\n if not (0.0 <= t <= 1.0):\n raise ValueError(\"translation values should be between 0 and 1\")\n self.translate = translate\n\n if scale is not None:\n _check_sequence_input(scale, \"scale\", req_sizes=(2, ))\n for s in scale:\n if s <= 0:\n raise ValueError(\"scale values should be positive\")\n self.scale = scale\n\n if shear is not None:\n self.shear = _setup_angle(shear, name=\"shear\", req_sizes=(2, 4))\n else:\n self.shear = shear\n\n self.resample = self.interpolation = interpolation\n\n if fill is None:\n fill = 0\n elif not isinstance(fill, (Sequence, numbers.Number)):\n raise TypeError(\"Fill should be either a sequence or a number.\")\n\n self.fillcolor = self.fill = fill\n\n @staticmethod\n def get_params(\n degrees: List[float],\n translate: Optional[List[float]],\n scale_ranges: Optional[List[float]],\n shears: Optional[List[float]],\n img_size: List[int]\n ) -> Tuple[float, Tuple[int, int], float, Tuple[float, float]]:\n \"\"\"Get parameters for affine transformation\n\n Returns:\n params to be passed to the affine transformation\n \"\"\"\n angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())\n if translate is not None:\n max_dx = float(translate[0] * img_size[0])\n max_dy = float(translate[1] * img_size[1])\n tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))\n ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))\n translations = (tx, ty)\n else:\n translations = (0, 0)\n\n if scale_ranges is not None:\n scale = float(torch.empty(1).uniform_(scale_ranges[0], scale_ranges[1]).item())\n else:\n scale = 1.0\n\n shear_x = shear_y = 0.0\n if shears is not None:\n shear_x = float(torch.empty(1).uniform_(shears[0], shears[1]).item())\n if len(shears) == 4:\n shear_y = float(torch.empty(1).uniform_(shears[2], shears[3]).item())\n\n shear = (shear_x, shear_y)\n\n return angle, translations, scale, shear\n\n def forward(self, img):\n \"\"\"\n img (PIL Image or Tensor): Image to be transformed.\n\n Returns:\n PIL Image or Tensor: Affine transformed image.\n \"\"\"\n fill = self.fill\n if isinstance(img, Tensor):\n if isinstance(fill, (int, float)):\n fill = [float(fill)] * F._get_image_num_channels(img)\n else:\n fill = [float(f) for f in fill]\n\n img_size = F._get_image_size(img)\n\n ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img_size)\n\n return F.affine(img, *ret, interpolation=self.interpolation, fill=fill)\n\n def __repr__(self):\n s = '{name}(degrees={degrees}'\n if self.translate is not None:\n s += ', translate={translate}'\n if self.scale is not None:\n s += ', scale={scale}'\n if self.shear is not None:\n s += ', shear={shear}'\n if self.interpolation != InterpolationMode.NEAREST:\n s += ', interpolation={interpolation}'\n if self.fill != 0:\n s += ', fill={fill}'\n s += ')'\n d = dict(self.__dict__)\n d['interpolation'] = self.interpolation.value\n return s.format(name=self.__class__.__name__, **d)\n\n\nclass Grayscale(torch.nn.Module):\n \"\"\"Convert image to grayscale.\n If the image is torch Tensor, it is expected\n to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n num_output_channels (int): (1 or 3) number of channels desired for output image\n\n Returns:\n PIL Image: Grayscale version of the input.\n - If ``num_output_channels == 1`` : returned image is single channel\n - If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, num_output_channels=1):\n super().__init__()\n self.num_output_channels = num_output_channels\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be converted to grayscale.\n\n Returns:\n PIL Image or Tensor: Grayscaled image.\n \"\"\"\n return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels)\n\n def __repr__(self):\n return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)\n\n\nclass RandomGrayscale(torch.nn.Module):\n \"\"\"Randomly convert image to grayscale with a probability of p (default 0.1).\n If the image is torch Tensor, it is expected\n to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n p (float): probability that image should be converted to grayscale.\n\n Returns:\n PIL Image or Tensor: Grayscale version of the input image with probability p and unchanged\n with probability (1-p).\n - If input image is 1 channel: grayscale version is 1 channel\n - If input image is 3 channel: grayscale version is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, p=0.1):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be converted to grayscale.\n\n Returns:\n PIL Image or Tensor: Randomly grayscaled image.\n \"\"\"\n num_output_channels = F._get_image_num_channels(img)\n if torch.rand(1) < self.p:\n return F.rgb_to_grayscale(img, num_output_channels=num_output_channels)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={0})'.format(self.p)\n\n\nclass RandomErasing(torch.nn.Module):\n \"\"\" Randomly selects a rectangle region in an torch Tensor image and erases its pixels.\n This transform does not support PIL Image.\n 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896\n\n Args:\n p: probability that the random erasing operation will be performed.\n scale: range of proportion of erased area against input image.\n ratio: range of aspect ratio of erased area.\n value: erasing value. Default is 0. If a single int, it is used to\n erase all pixels. If a tuple of length 3, it is used to erase\n R, G, B channels respectively.\n If a str of 'random', erasing each pixel with random values.\n inplace: boolean to make this transform inplace. Default set to False.\n\n Returns:\n Erased Image.\n\n Example:\n >>> transform = transforms.Compose([\n >>> transforms.RandomHorizontalFlip(),\n >>> transforms.ToTensor(),\n >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n >>> transforms.RandomErasing(),\n >>> ])\n \"\"\"\n\n def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):\n super().__init__()\n if not isinstance(value, (numbers.Number, str, tuple, list)):\n raise TypeError(\"Argument value should be either a number or str or a sequence\")\n if isinstance(value, str) and value != \"random\":\n raise ValueError(\"If value is str, it should be 'random'\")\n if not isinstance(scale, (tuple, list)):\n raise TypeError(\"Scale should be a sequence\")\n if not isinstance(ratio, (tuple, list)):\n raise TypeError(\"Ratio should be a sequence\")\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"Scale and ratio should be of kind (min, max)\")\n if scale[0] < 0 or scale[1] > 1:\n raise ValueError(\"Scale should be between 0 and 1\")\n if p < 0 or p > 1:\n raise ValueError(\"Random erasing probability should be between 0 and 1\")\n\n self.p = p\n self.scale = scale\n self.ratio = ratio\n self.value = value\n self.inplace = inplace\n\n @staticmethod\n def get_params(\n img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None\n ) -> Tuple[int, int, int, int, Tensor]:\n \"\"\"Get parameters for ``erase`` for a random erasing.\n\n Args:\n img (Tensor): Tensor image to be erased.\n scale (sequence): range of proportion of erased area against input image.\n ratio (sequence): range of aspect ratio of erased area.\n value (list, optional): erasing value. If None, it is interpreted as \"random\"\n (erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number,\n i.e. ``value[0]``.\n\n Returns:\n tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.\n \"\"\"\n img_c, img_h, img_w = img.shape[-3], img.shape[-2], img.shape[-1]\n area = img_h * img_w\n\n for _ in range(10):\n erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n aspect_ratio = torch.empty(1).uniform_(ratio[0], ratio[1]).item()\n\n h = int(round(math.sqrt(erase_area * aspect_ratio)))\n w = int(round(math.sqrt(erase_area / aspect_ratio)))\n if not (h < img_h and w < img_w):\n continue\n\n if value is None:\n v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()\n else:\n v = torch.tensor(value)[:, None, None]\n\n i = torch.randint(0, img_h - h + 1, size=(1, )).item()\n j = torch.randint(0, img_w - w + 1, size=(1, )).item()\n return i, j, h, w, v\n\n # Return original image\n return 0, 0, img_h, img_w, img\n\n def forward(self, img):\n \"\"\"\n Args:\n img (Tensor): Tensor image to be erased.\n\n Returns:\n img (Tensor): Erased Tensor image.\n \"\"\"\n if torch.rand(1) < self.p:\n\n # cast self.value to script acceptable type\n if isinstance(self.value, (int, float)):\n value = [self.value, ]\n elif isinstance(self.value, str):\n value = None\n elif isinstance(self.value, tuple):\n value = list(self.value)\n else:\n value = self.value\n\n if value is not None and not (len(value) in (1, img.shape[-3])):\n raise ValueError(\n \"If value is a sequence, it should have either a single value or \"\n \"{} (number of input channels)\".format(img.shape[-3])\n )\n\n x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value)\n return F.erase(img, x, y, h, w, v, self.inplace)\n return img\n\n\nclass GaussianBlur(torch.nn.Module):\n \"\"\"Blurs image with randomly chosen Gaussian blur.\n If the image is torch Tensor, it is expected\n to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n kernel_size (int or sequence): Size of the Gaussian kernel.\n sigma (float or tuple of float (min, max)): Standard deviation to be used for\n creating kernel to perform blurring. If float, sigma is fixed. If it is tuple\n of float (min, max), sigma is chosen uniformly at random to lie in the\n given range.\n\n Returns:\n PIL Image or Tensor: Gaussian blurred version of the input image.\n\n \"\"\"\n\n def __init__(self, kernel_size, sigma=(0.1, 2.0)):\n super().__init__()\n self.kernel_size = _setup_size(kernel_size, \"Kernel size should be a tuple/list of two integers\")\n for ks in self.kernel_size:\n if ks <= 0 or ks % 2 == 0:\n raise ValueError(\"Kernel size value should be an odd and positive number.\")\n\n if isinstance(sigma, numbers.Number):\n if sigma <= 0:\n raise ValueError(\"If sigma is a single number, it must be positive.\")\n sigma = (sigma, sigma)\n elif isinstance(sigma, Sequence) and len(sigma) == 2:\n if not 0. < sigma[0] <= sigma[1]:\n raise ValueError(\"sigma values should be positive and of the form (min, max).\")\n else:\n raise ValueError(\"sigma should be a single number or a list/tuple with length 2.\")\n\n self.sigma = sigma\n\n @staticmethod\n def get_params(sigma_min: float, sigma_max: float) -> float:\n \"\"\"Choose sigma for random gaussian blurring.\n\n Args:\n sigma_min (float): Minimum standard deviation that can be chosen for blurring kernel.\n sigma_max (float): Maximum standard deviation that can be chosen for blurring kernel.\n\n Returns:\n float: Standard deviation to be passed to calculate kernel for gaussian blurring.\n \"\"\"\n return torch.empty(1).uniform_(sigma_min, sigma_max).item()\n\n def forward(self, img: Tensor) -> Tensor:\n \"\"\"\n Args:\n img (PIL Image or Tensor): image to be blurred.\n\n Returns:\n PIL Image or Tensor: Gaussian blurred image\n \"\"\"\n sigma = self.get_params(self.sigma[0], self.sigma[1])\n return F.gaussian_blur(img, self.kernel_size, [sigma, sigma])\n\n def __repr__(self):\n s = '(kernel_size={}, '.format(self.kernel_size)\n s += 'sigma={})'.format(self.sigma)\n return self.__class__.__name__ + s\n\n\ndef _setup_size(size, error_msg):\n if isinstance(size, numbers.Number):\n return int(size), int(size)\n\n if isinstance(size, Sequence) and len(size) == 1:\n return size[0], size[0]\n\n if len(size) != 2:\n raise ValueError(error_msg)\n\n return size\n\n\ndef _check_sequence_input(x, name, req_sizes):\n msg = req_sizes[0] if len(req_sizes) < 2 else \" or \".join([str(s) for s in req_sizes])\n if not isinstance(x, Sequence):\n raise TypeError(\"{} should be a sequence of length {}.\".format(name, msg))\n if len(x) not in req_sizes:\n raise ValueError(\"{} should be sequence of length {}.\".format(name, msg))\n\n\ndef _setup_angle(x, name, req_sizes=(2, )):\n if isinstance(x, numbers.Number):\n if x < 0:\n raise ValueError(\"If {} is a single number, it must be positive.\".format(name))\n x = [-x, x]\n else:\n _check_sequence_input(x, name, req_sizes)\n\n return [float(d) for d in x]\n\n\nclass RandomInvert(torch.nn.Module):\n \"\"\"Inverts the colors of the given image randomly with a given probability.\n If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,\n where ... means it can have an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"L\" or \"RGB\".\n\n Args:\n p (float): probability of the image being color inverted. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be inverted.\n\n Returns:\n PIL Image or Tensor: Randomly color inverted image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.invert(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomPosterize(torch.nn.Module):\n \"\"\"Posterize the image randomly with a given probability by reducing the\n number of bits for each color channel. If the image is torch Tensor, it should be of type torch.uint8,\n and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"L\" or \"RGB\".\n\n Args:\n bits (int): number of bits to keep for each channel (0-8)\n p (float): probability of the image being color inverted. Default value is 0.5\n \"\"\"\n\n def __init__(self, bits, p=0.5):\n super().__init__()\n self.bits = bits\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be posterized.\n\n Returns:\n PIL Image or Tensor: Randomly posterized image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.posterize(img, self.bits)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(bits={},p={})'.format(self.bits, self.p)\n\n\nclass RandomSolarize(torch.nn.Module):\n \"\"\"Solarize the image randomly with a given probability by inverting all pixel\n values above a threshold. If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,\n where ... means it can have an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"L\" or \"RGB\".\n\n Args:\n threshold (float): all pixels equal or above this value are inverted.\n p (float): probability of the image being color inverted. Default value is 0.5\n \"\"\"\n\n def __init__(self, threshold, p=0.5):\n super().__init__()\n self.threshold = threshold\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be solarized.\n\n Returns:\n PIL Image or Tensor: Randomly solarized image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.solarize(img, self.threshold)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(threshold={},p={})'.format(self.threshold, self.p)\n\n\nclass RandomAdjustSharpness(torch.nn.Module):\n \"\"\"Adjust the sharpness of the image randomly with a given probability. If the image is torch Tensor,\n it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n sharpness_factor (float): How much to adjust the sharpness. Can be\n any non negative number. 0 gives a blurred image, 1 gives the\n original image while 2 increases the sharpness by a factor of 2.\n p (float): probability of the image being color inverted. Default value is 0.5\n \"\"\"\n\n def __init__(self, sharpness_factor, p=0.5):\n super().__init__()\n self.sharpness_factor = sharpness_factor\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be sharpened.\n\n Returns:\n PIL Image or Tensor: Randomly sharpened image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.adjust_sharpness(img, self.sharpness_factor)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(sharpness_factor={},p={})'.format(self.sharpness_factor, self.p)\n\n\nclass RandomAutocontrast(torch.nn.Module):\n \"\"\"Autocontrast the pixels of the given image randomly with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"L\" or \"RGB\".\n\n Args:\n p (float): probability of the image being autocontrasted. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be autocontrasted.\n\n Returns:\n PIL Image or Tensor: Randomly autocontrasted image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.autocontrast(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomEqualize(torch.nn.Module):\n \"\"\"Equalize the histogram of the given image randomly with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"P\", \"L\" or \"RGB\".\n\n Args:\n p (float): probability of the image being equalized. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be equalized.\n\n Returns:\n PIL Image or Tensor: Randomly equalized image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.equalize(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n" ]
[ [ "torch.nn.Dropout", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.init.kaiming_uniform_", "torch.nn.MaxPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.init.normal_", "torch.flatten", "torch.nn.ReLU" ], [ "torch.mm", "torch.randint", "torch.empty", "torch.randperm", "torch.tensor", "torch.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ncilfone/mabwiser
[ "329125d4110312d6001e9486e1cb3490a90565c4" ]
[ "tests/test_lints.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport datetime\nimport math\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\n\nfrom mabwiser.mab import LearningPolicy\nfrom tests.test_base import BaseTest\n\n\nclass LinTSTest(BaseTest):\n\n def test_alpha0_0001(self):\n arm, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3, 1],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=0.0001, scale=True),\n context_history=np.array([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]]),\n contexts=np.array([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]]),\n seed=123456,\n num_run=3,\n is_predict=True)\n\n self.assertEqual(len(arm), 3)\n self.assertEqual(arm, [[2, 3], [2, 3], [3, 3]])\n\n def test_alpha0_0001_expectations(self):\n exps, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3, 1],\n rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=0.0001, scale=True),\n context_history=np.array([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]]),\n contexts=np.array([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]]),\n seed=123456,\n num_run=1,\n is_predict=False)\n\n self.assertListAlmostEqual(exps[0].values(),\n [-0.23459369004297587, 0.0002702455674444537, 4.588547880979047e-05])\n self.assertListAlmostEqual(exps[1].values(),\n [-0.192811601170233, -2.3415795345448245e-05, 0.00016619626256880228])\n\n def test_alpha1(self):\n arm, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=np.array([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]]),\n contexts=np.array([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]]),\n seed=123456,\n num_run=1,\n is_predict=True)\n self.assertEqual(len(arm), 2)\n self.assertEqual(arm, [2, 3])\n\n def test_alpha1_expectations(self):\n exps, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=np.array([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]]),\n contexts=np.array([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]]),\n seed=123456,\n num_run=1,\n is_predict=False)\n self.assertListAlmostEqual(exps[0].values(), [-0.6029872358950072, 3.105765259323796, 1.0208598325762464])\n self.assertListAlmostEqual(exps[1].values(), [0.572141413757231, 0.45473267178654997, 1.773376616755168])\n\n def test_np(self):\n\n arm, mab = self.predict(arms=[1, 2, 3],\n decisions=np.asarray([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),\n rewards=np.asarray([0, 0, 1, 0, 0, 0, 0, 1, 1, 1]),\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=3,\n is_predict=True)\n\n self.assertEqual(len(arm), 3)\n self.assertEqual(arm, [[2, 3], [2, 1], [3, 1]])\n\n def test_df(self):\n\n df = pd.DataFrame({'decisions': [1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n 'rewards': [0, 0, 1, 0, 0, 0, 0, 1, 1, 1]})\n\n arm, mab = self.predict(arms=[1, 2, 3],\n decisions=df['decisions'],\n rewards=df['rewards'],\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=pd.DataFrame([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]]),\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=3,\n is_predict=True)\n\n self.assertEqual(len(arm), 3)\n self.assertEqual(arm, [[2, 3], [2, 1], [3, 1]])\n\n def test_df_list(self):\n\n df = pd.DataFrame({'decisions': [1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n 'rewards': [0, 0, 1, 0, 0, 0, 0, 1, 1, 1]})\n\n arm, mab = self.predict(arms=[1, 2, 3],\n decisions=df['decisions'],\n rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=3,\n is_predict=True)\n\n self.assertEqual(len(arm), 3)\n self.assertEqual(arm, [[2, 3], [2, 1], [3, 1]])\n\n def test_lints_t1(self):\n\n arm, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 3, 2, 2, 3, 1, 3, 1],\n rewards=[0, 1, 1, 0, 1, 0, 1, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=0.24),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arm), 4)\n self.assertEqual(arm, [[2, 1], [2, 1], [2, 1], [1, 2]])\n\n def test_lints_t2(self):\n\n arm, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 3, 2, 2, 3, 1, 3, 1],\n rewards=[0, 1, 1, 0, 1, 0, 1, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1.5),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=71,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arm), 4)\n self.assertEqual(arm, [[3, 1], [2, 1], [2, 3], [3, 3]])\n\n def test_lints_t3(self):\n\n arm, mab = self.predict(arms=[1, 2, 4],\n decisions=[1, 1, 4, 4, 2, 2, 1, 1, 4, 2, 1, 4, 1, 2, 4, 1],\n rewards=[7, 9, 10, 20, 2, 5, 8, 15, 17, 11, 0, 5, 2, 9, 3, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1.25),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0], [0, 1, 4, 3, 5], [0, 1, 2, 4, 5],\n [1, 2, 1, 1, 3], [0, 2, 1, 0, 0], [0, 2, 2, 3, 5], [1, 3, 1, 1, 1]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arm), 4)\n self.assertEqual(arm, [[4, 4], [4, 4], [1, 4], [1, 4]])\n\n def test_lints_t4(self):\n\n arm, mab = self.predict(arms=[1, 2, 4],\n decisions=[1, 1, 4, 4, 2, 2, 1, 1, 4, 2, 1, 4, 1, 2, 4, 1],\n rewards=[7, 9, 10, 20, 2, 5, 8, 15, 17, 11, 0, 5, 2, 9, 3, 1],\n learning_policy=LearningPolicy.LinTS(alpha=2),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0], [0, 1, 4, 3, 5], [0, 1, 2, 4, 5],\n [1, 2, 1, 1, 3], [0, 2, 1, 0, 0], [0, 2, 2, 3, 5], [1, 3, 1, 1, 1]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=23,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arm), 4)\n self.assertEqual(arm, [[4, 4], [1, 4], [4, 4], [4, 4]])\n\n def test_lints_t5(self):\n\n arm, mab = self.predict(arms=['one', 'two', 'three'],\n decisions=['one', 'one', 'one', 'three', 'two', 'two', 'three', 'one', 'three', 'two'],\n rewards=[1, 0, 1, 0, 1, 0, 1, 1, 1, 0],\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=23,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arm), 4)\n self.assertEqual(arm, [['two', 'two'], ['three', 'two'], ['two', 'two'], ['one', 'two']])\n\n def test_lints_t6(self):\n\n arm, mab = self.predict(arms=['one', 'two', 'three'],\n decisions=['one', 'one', 'one', 'three', 'two', 'two', 'three', 'one', 'three', 'two',\n 'one'],\n rewards=[2, 7, 7, 9, 1, 3, 1, 2, 6, 4, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1.25),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0], [0, 1, 4, 3, 5]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=17,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arm), 4)\n self.assertEqual(arm, [['three', 'one'], ['two', 'one'], ['two', 'one'], ['three', 'one']])\n\n def test_lints_t7(self):\n\n arm, mab = self.predict(arms=['a', 'b', 'c'],\n decisions=['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'a'],\n rewards=[-1.25, 12, 0.7, 10, 12, 9.2, -1, -10, 4, 0, 1],\n learning_policy=LearningPolicy.UCB1(alpha=1.25),\n seed=123456,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arm), 4)\n self.assertEqual(arm, ['b', 'b', 'b', 'b'])\n\n def test_lints_t8(self):\n\n arm, mab = self.predict(arms=['a', 'b', 'c'],\n decisions=['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a'],\n rewards=[-1.25, 0.7, 12, 10, 12, 9.2, -1, -10, 4, 0],\n learning_policy=LearningPolicy.LinTS(alpha=0.5),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=9,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arm), 4)\n self.assertEqual(arm, [['c', 'c'], ['c', 'c'], ['c', 'c'], ['c', 'c']])\n\n def test_lints_t9(self):\n\n # Dates to test\n a = datetime.datetime(2018, 1, 1)\n b = datetime.datetime(2017, 7, 31)\n c = datetime.datetime(2018, 9, 15)\n\n arm, mab = self.predict(arms=[a, b, c],\n decisions=[a, b, c, a, b, c, a, b, c, a],\n rewards=[1.25, 0.7, 12, 10, 1.43, 0.2, -1, -10, 4, 0],\n learning_policy=LearningPolicy.LinTS(alpha=0.25),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arm), 4)\n self.assertEqual(arm, [[c, c], [c, c], [c, c], [c, c]])\n\n def test_lints_t10(self):\n\n # Dates to test\n a = datetime.datetime(2018, 1, 1)\n b = datetime.datetime(2017, 7, 31)\n c = datetime.datetime(2018, 9, 15)\n\n arm, mab = self.predict(arms=[a, b, c],\n decisions=[a, b, c, a, b, c, a, b, c, a, b, b, a],\n rewards=[7, 12, 1, -10, 5, 1, 2, 9, 3, 3, 6, 7, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0], [0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=71,\n num_run=4,\n is_predict=True)\n\n self.assertEqual(len(arm), 4)\n self.assertEqual(arm, [[b, b], [b, b], [b, b], [b, b]])\n\n def test_unused_arm_scale(self):\n\n arms, mab = self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1, scale=True),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n self.assertEqual(arms, [4, 3])\n\n def test_unused_arm(self):\n\n exps, mab = self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=False)\n\n self.assertListAlmostEqual(exps[0].values(), [-0.6029872358950072, 3.10576525932379,\n 1.0208598325762497, 4.45334163892619])\n self.assertListAlmostEqual(exps[1].values(), [0.5721414137572303, 0.4547326717865491,\n 1.773376616755162, -1.4333556875425306])\n\n def test_unused_arm2(self):\n\n arms, mab = self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n self.assertEqual(arms, [4, 3])\n\n def test_unused_arm_scaled(self):\n\n context_history = np.array([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]], dtype='float64')\n scaler = StandardScaler()\n scaled_contexts = scaler.fit_transform(context_history)\n scaled_predict = scaler.transform(np.array([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]], dtype='float64'))\n\n exps, mab = self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=scaled_contexts,\n contexts=scaled_predict,\n seed=123456,\n num_run=1,\n is_predict=False)\n\n self.assertListAlmostEqual(exps[0].values(), [-0.6846042491588905, 1.8728586982060706,\n 0.39597711947956443, 2.326370889902805])\n self.assertListAlmostEqual(exps[1].values(), [-0.9156881567627143, -1.01000793116177,\n 1.6774048483779203, 0.6624211256038636])\n\n def test_unused_arm_scaled2(self):\n\n context_history = np.array([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]], dtype='float64')\n scaler = StandardScaler()\n scaled_contexts = scaler.fit_transform(context_history)\n scaled_predict = scaler.transform(np.array([[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]], dtype='float64'))\n\n arms, mab = self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=scaled_contexts,\n contexts=scaled_predict,\n seed=7,\n num_run=1,\n is_predict=True)\n\n self.assertEqual(arms, [3, 3])\n\n def test_fit_twice(self):\n\n arm, mab = self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n self.assertEqual(arm, [4, 3])\n\n b_1 = mab._imp.arm_to_model[1].beta\n self.assertTrue(math.isclose(-0.0825688, b_1[0], abs_tol=0.00001))\n\n b_3 = mab._imp.arm_to_model[3].beta\n self.assertTrue(math.isclose(0.023696, b_3[0], abs_tol=0.00001))\n\n self.assertTrue(4 in mab._imp.arm_to_model.keys())\n\n # Fit again\n decisions2 = [1, 3, 4]\n rewards2 = [0, 1, 1]\n context_history2 = [[0, 1, 1, 1, 1], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]]\n mab.fit(decisions2, rewards2, context_history2)\n\n b_1 = mab._imp.arm_to_model[1].beta\n self.assertEqual(b_1[0], 0)\n\n b_2 = mab._imp.arm_to_model[2].beta\n self.assertEqual(b_2[0], 0)\n\n b_3 = mab._imp.arm_to_model[3].beta\n self.assertTrue(math.isclose(b_3[0], 0.16667, abs_tol=0.00001))\n\n b_4 = mab._imp.arm_to_model[4].beta\n self.assertEqual(b_4[0], 0)\n\n def test_partial_fit(self):\n\n arm, mab = self.predict(arms=[1, 2, 3, 4],\n decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],\n rewards=[0, 0, 1, 0, 0, 0, 0, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=1),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=1,\n is_predict=True)\n\n self.assertEqual(arm, [4, 3])\n\n b_1 = mab._imp.arm_to_model[1].beta\n self.assertTrue(math.isclose(-0.0825688, b_1[0], abs_tol=0.00001))\n\n b_3 = mab._imp.arm_to_model[3].beta\n self.assertTrue(math.isclose(0.023696, b_3[0], abs_tol=0.00001))\n\n self.assertTrue(4 in mab._imp.arm_to_model.keys())\n\n # Fit again\n decisions2 = [1, 3, 4]\n rewards2 = [0, 1, 1]\n context_history2 = [[0, 1, 1, 1, 1], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0]]\n mab.partial_fit(decisions2, rewards2, context_history2)\n\n b_1 = mab._imp.arm_to_model[1].beta\n self.assertTrue(math.isclose(-0.05142857, b_1[0], abs_tol=0.00001))\n b_2 = mab._imp.arm_to_model[2].beta\n self.assertEqual(b_2[0], 0)\n\n b_3 = mab._imp.arm_to_model[3].beta\n self.assertTrue(math.isclose(b_3[0], 0.22099152, abs_tol=0.00001))\n\n b_4 = mab._imp.arm_to_model[4].beta\n self.assertEqual(b_4[0], 0)\n\n def test_add_arm(self):\n arm, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 3, 2, 2, 3, 1, 3, 1],\n rewards=[0, 1, 1, 0, 1, 0, 1, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=0.24),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=4,\n is_predict=True)\n mab.add_arm(4)\n self.assertTrue(4 in mab.arms)\n self.assertTrue(4 in mab._imp.arms)\n self.assertTrue(4 in mab._imp.arm_to_expectation.keys())\n self.assertTrue(mab._imp.arm_to_model[4] is not None)\n\n def test_remove_arm(self):\n arm, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 3, 2, 2, 3, 1, 3, 1],\n rewards=[0, 1, 1, 0, 1, 0, 1, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=0.24),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=4,\n is_predict=True)\n mab.remove_arm(3)\n self.assertTrue(3 not in mab.arms)\n self.assertTrue(3 not in mab._imp.arms)\n self.assertTrue(3 not in mab._imp.arm_to_expectation)\n self.assertTrue(3 not in mab._imp.arm_to_model)\n\n def test_warm_start(self):\n _, mab = self.predict(arms=[1, 2, 3],\n decisions=[1, 1, 1, 1, 2, 2, 2, 1, 2, 1],\n rewards=[0, 1, 1, 0, 1, 0, 1, 1, 1, 1],\n learning_policy=LearningPolicy.LinTS(alpha=0.24),\n context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],\n [0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],\n [0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],\n [0, 2, 1, 0, 0]],\n contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],\n seed=123456,\n num_run=4,\n is_predict=True)\n\n # Before warm start\n self.assertEqual(mab._imp.trained_arms, [1, 2])\n self.assertDictEqual(mab._imp.arm_to_expectation, {1: 0.0, 2: 0.0, 3: 0.0})\n self.assertListAlmostEqual(mab._imp.arm_to_model[1].beta, [0.19635284, 0.11556404, 0.57675997, 0.30597964, -0.39100933])\n self.assertListAlmostEqual(mab._imp.arm_to_model[3].beta, [0, 0, 0, 0, 0])\n\n # Warm start\n mab.warm_start(arm_to_features={1: [0, 1], 2: [0, 0], 3: [0.5, 0.5]}, distance_quantile=0.5)\n self.assertListAlmostEqual(mab._imp.arm_to_model[3].beta, [0.19635284, 0.11556404, 0.57675997, 0.30597964, -0.39100933])\n" ]
[ [ "numpy.asarray", "sklearn.preprocessing.StandardScaler", "numpy.array", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
BrianOfrim/boja
[ "6571fbbfb7f015e96e80e822d9dc96b4636b4119" ]
[ "vision/predict/predict_spin.py" ]
[ "import os\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport PySpin\nimport torch\nimport torchvision.transforms.functional as F\n\nfrom .._file_utils import get_highest_numbered_file\nfrom .._image_utils import RGB8Image, draw_bboxes\nfrom .. import _models\nfrom .._s3_utils import s3_bucket_exists, s3_download_highest_numbered_file\nfrom .._settings import (\n DEFAULT_LOCAL_DATA_DIR,\n DEFAULT_S3_DATA_DIR,\n LABEL_FILE_NAME,\n MODEL_STATE_DIR_NAME,\n MODEL_STATE_FILE_TYPE,\n NETWORKS,\n)\n\n\nmatplotlib.use(\"TKAgg\")\n\nINFERENCE_WINDOW_NAME = \"Inference\"\n\n\ndef get_newest_saved_model_path(model_dir_path: str, filter_keyword=None) -> str:\n return get_highest_numbered_file(\n model_dir_path, MODEL_STATE_FILE_TYPE, filter_keyword\n )\n\n\ndef get_newest_image(cam, pixel_format):\n try:\n spinnaker_image = cam.GetNextImage()\n retrieved_image = RGB8Image(\n spinnaker_image.GetWidth(),\n spinnaker_image.GetHeight(),\n pixel_format,\n spinnaker_image.GetData().copy(),\n )\n spinnaker_image.Release()\n return retrieved_image\n except ValueError as err:\n print(err)\n return None\n\n\ndef key_press(event, continue_streaming):\n\n if event.key == \"escape\":\n continue_streaming[0] = False\n\n\ndef display_images(\n cam, labels, network_type, saved_model_file_path, threshold=0.5\n) -> None:\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n # get the model using our helper function\n model = _models.__dict__[network_type](\n len(labels),\n box_score_thresh=threshold,\n min_size=600,\n max_size=800,\n box_nms_thresh=0.3,\n )\n\n print(\"Loading model state from: %s\" % saved_model_file_path)\n\n checkpoint = torch.load(saved_model_file_path, map_location=device)\n model.load_state_dict(checkpoint[\"model\"])\n\n # move model to the right device\n model.to(device)\n\n model.eval()\n\n # create plots\n fig, inference_ax = plt.subplots()\n\n fig.canvas.set_window_title(\"Predict\")\n\n continue_streaming = [True]\n\n fig.canvas.mpl_connect(\n \"key_press_event\", lambda event: key_press(event, continue_streaming)\n )\n\n print(\"Model state loaded\")\n\n label_colors = plt.get_cmap(\"hsv\")(np.linspace(0, 0.9, len(labels)))\n\n print(\"Starting inference\")\n\n print(\"Starting live stream.\")\n cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)\n cam.BeginAcquisition()\n\n pixel_format = cam.PixelFormat.GetCurrentEntry().GetSymbolic()\n\n while continue_streaming[0]:\n retrieved_image = get_newest_image(cam, pixel_format)\n\n if retrieved_image is None:\n break\n\n image_data = RGB8Image.to_bgr(retrieved_image.get_data())\n\n tensor_image = F.to_tensor(image_data)\n tensor_image = tensor_image.to(device)\n\n outputs = []\n with torch.no_grad():\n outputs = model([tensor_image])\n\n outputs = [\n {k: v.to(torch.device(\"cpu\")) for k, v in t.items()} for t in outputs\n ]\n\n # filter out the background labels and scores bellow threshold\n filtered_output = [\n (outputs[0][\"boxes\"][j], outputs[0][\"labels\"][j], outputs[0][\"scores\"][j],)\n for j in range(len(outputs[0][\"boxes\"]))\n if outputs[0][\"scores\"][j] > threshold and outputs[0][\"labels\"][j] > 0\n ]\n\n inference_boxes, inference_labels, inference_scores = (\n zip(*filtered_output) if len(filtered_output) > 0 else ([], [], [])\n )\n\n inference_ax.clear()\n\n inference_ax.imshow(image_data)\n\n draw_bboxes(\n inference_ax,\n inference_boxes,\n inference_labels,\n labels,\n label_colors,\n inference_scores,\n )\n\n plt.pause(0.001)\n\n print(\"Ending live stream\")\n cam.EndAcquisition()\n\n\ndef apply_camera_settings(cam, framerate=30.0) -> None:\n # Configure newest only buffer handling\n s_node_map = cam.GetTLStreamNodeMap()\n\n # Retrieve Buffer Handling Mode Information\n handling_mode = PySpin.CEnumerationPtr(\n s_node_map.GetNode(\"StreamBufferHandlingMode\")\n )\n handling_mode_entry = handling_mode.GetEntryByName(\"NewestOnly\")\n handling_mode.SetIntValue(handling_mode_entry.GetValue())\n\n # Set stream buffer Count Mode to manual\n stream_buffer_count_mode = PySpin.CEnumerationPtr(\n s_node_map.GetNode(\"StreamBufferCountMode\")\n )\n stream_buffer_count_mode_manual = PySpin.CEnumEntryPtr(\n stream_buffer_count_mode.GetEntryByName(\"Manual\")\n )\n stream_buffer_count_mode.SetIntValue(stream_buffer_count_mode_manual.GetValue())\n\n # Retrieve and modify Stream Buffer Count\n buffer_count = PySpin.CIntegerPtr(s_node_map.GetNode(\"StreamBufferCountManual\"))\n\n buffer_count.SetValue(3)\n\n # Display Buffer Info\n print(\"Buffer Handling Mode: %s\" % handling_mode_entry.GetDisplayName())\n print(\"Buffer Count: %d\" % buffer_count.GetValue())\n print(\"Maximum Buffer Count: %d\" % buffer_count.GetMax())\n\n # Configure frame rate\n cam.AcquisitionFrameRateEnable.SetValue(True)\n cam.AcquisitionFrameRate.SetValue(min(framerate, cam.AcquisitionFrameRate.GetMax()))\n print(\"Acquisition frame rate set to: %3.1f\" % cam.AcquisitionFrameRate.GetValue())\n\n\ndef main(args):\n\n use_s3 = True if args.s3_bucket_name is not None else False\n\n if use_s3:\n if not s3_bucket_exists(args.s3_bucket_name):\n use_s3 = False\n print(\n \"Bucket: %s either does not exist or you do not have access to it\"\n % args.s3_bucket_name\n )\n else:\n print(\"Bucket: %s exists and you have access to it\" % args.s3_bucket_name)\n\n if use_s3:\n # Get the newest model\n s3_download_highest_numbered_file(\n args.s3_bucket_name,\n \"/\".join([args.s3_data_dir, MODEL_STATE_DIR_NAME]),\n os.path.join(args.local_data_dir, MODEL_STATE_DIR_NAME),\n MODEL_STATE_FILE_TYPE,\n args.network,\n )\n\n label_file_path = os.path.join(args.local_data_dir, LABEL_FILE_NAME)\n if not os.path.isfile(label_file_path):\n print(\"Missing file %s\" % label_file_path)\n return\n\n # read in the category labels\n labels = open(label_file_path).read().splitlines()\n\n if len(labels) == 0:\n print(\"No label categories found in %s\" % label_file_path)\n return\n\n # Add the background as the first class\n labels.insert(0, \"background\")\n\n print(\"Labels found:\")\n print(labels)\n\n saved_model_file_path = (\n args.model_path\n if args.model_path is not None\n else get_newest_saved_model_path(\n os.path.join(args.local_data_dir, MODEL_STATE_DIR_NAME), args.network,\n )\n )\n\n if saved_model_file_path is None:\n print(\"No saved model state found\")\n return\n\n # Retrieve singleton reference to system object\n system = PySpin.System.GetInstance()\n\n # Retrieve list of cameras from the system\n cam_list = system.GetCameras()\n\n num_cameras = cam_list.GetSize()\n\n print(\"Number of cameras detected: %d\" % num_cameras)\n # Finish if there are no cameras\n if num_cameras == 0:\n # Clear camera list before releasing system\n cam_list.Clear()\n\n # Release system instance\n system.ReleaseInstance()\n\n print(\"Not enough cameras!\")\n input(\"Done! Press Enter to exit...\")\n return\n\n cam = cam_list.GetByIndex(0)\n\n cam.Init()\n\n apply_camera_settings(cam)\n\n display_images(cam, labels, args.network, saved_model_file_path, args.threshold)\n\n cam.DeInit()\n\n del cam\n cam_list.Clear()\n system.ReleaseInstance()\n print(\"Exiting.\")\n\n\nif __name__ == \"__main__\":\n\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--s3_bucket_name\", type=str)\n parser.add_argument(\n \"--s3_data_dir\",\n type=str,\n default=DEFAULT_S3_DATA_DIR,\n help=\"Prefix of the s3 data objects\",\n )\n parser.add_argument(\n \"--local_data_dir\", type=str, default=DEFAULT_LOCAL_DATA_DIR,\n )\n parser.add_argument(\"--model_path\", type=str, help=\"The model to load\")\n parser.add_argument(\n \"--network\",\n type=str,\n choices=NETWORKS,\n default=NETWORKS[0],\n help=\"The neural network to use for object detection\",\n )\n parser.add_argument(\n \"--threshold\",\n type=float,\n default=0.5,\n help=\"The threshold above which to display predicted bounding boxes\",\n )\n parser.add_argument(\n \"--frame_rate\", type=float, default=30.0,\n )\n\n args = parser.parse_args()\n\n main(args)\n\n" ]
[ [ "torch.load", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.get_cmap", "torch.no_grad", "torch.cuda.is_available", "torch.device", "matplotlib.pyplot.pause" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tolleybot/tensorflow-face-detection
[ "97ddd30107efa87184e1d26d61a747b7a58cf0f8" ]
[ "server.py" ]
[ "from imagezmq import imagezmq\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nimport time\n\nfrom utils import label_map_util\nfrom utils import visualization_utils_color as vis_util\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nPATH_TO_CKPT = './model/frozen_inference_graph_face.pb'\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = './protos/face_label_map.pbtxt'\n\nNUM_CLASSES = 2\n\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,\n use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n\nclass TensoflowFaceDector(object):\n def __init__(self, PATH_TO_CKPT):\n \"\"\"Tensorflow detector\n \"\"\"\n\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n with self.detection_graph.as_default():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(graph=self.detection_graph, config=config)\n self.windowNotSet = True\n\n def run(self, image):\n \"\"\"image: bgr image\n return (boxes, scores, classes, num_detections)\n \"\"\"\n\n image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n\n # the array based representation of the image will be used later in order to prepare the\n # result image with boxes and labels on it.\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n # Actual detection.\n start_time = time.time()\n (boxes, scores, classes, num_detections) = self.sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n elapsed_time = time.time() - start_time\n print('inference time cost: {}'.format(elapsed_time))\n\n return (boxes, scores, classes, num_detections)\n\ndef predict(model_data_path, port):\n \"\"\" starts are server\"\"\"\n\n tDetector = TensoflowFaceDector(model_data_path)\n\n # setup our server\n image_hub = imagezmq.ImageHub(open_port='tcp://*:' + port)\n\n print(\"Server Started on port {}..\\n\".format(port))\n\n while True:\n _, image = image_hub.recv_image()\n\n (boxes, scores, classes, num_detections) = tDetector.run(image)\n\n vis_util.visualize_boxes_and_labels_on_image_array(\n image,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=4)\n\n image_hub.send_image('OK', image)\n\n\ndef main():\n # Parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--port', help='Directory of images to predict', default='5555', type=str)\n\n args = parser.parse_args()\n\n # if args.test:\n print(\"Starting up server..\\n\")\n # serversample(args.port)\n # else:\n predict(PATH_TO_CKPT, args.port)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.Graph", "numpy.expand_dims", "tensorflow.import_graph_def", "tensorflow.gfile.GFile", "numpy.squeeze", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.GraphDef" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chrisburr/hist
[ "d10132ab8d03f41152f0b934a18291ce699453b2" ]
[ "src/hist/basehist.py" ]
[ "# -*- coding: utf-8 -*-\nfrom .axestuple import NamedAxesTuple\nfrom .quick_construct import MetaConstructor\nfrom .utils import set_family, HIST_FAMILY\nfrom .storage import Storage\n\nimport warnings\nimport functools\nimport operator\nimport histoprint\n\nimport numpy as np\nimport boost_histogram as bh\n\nfrom typing import Callable, Optional, Tuple, Union, Dict, Any, TYPE_CHECKING\nfrom .svgplots import html_hist, svg_hist_1d, svg_hist_1d_c, svg_hist_2d, svg_hist_nd\n\n\nif TYPE_CHECKING:\n from mplhep.plot import Hist1DArtists, Hist2DArtists\n import matplotlib.axes\n\n\n@set_family(HIST_FAMILY)\nclass BaseHist(bh.Histogram, metaclass=MetaConstructor):\n __slots__ = ()\n\n def __init__(self, *args, storage: Optional[Storage] = None, metadata=None):\n \"\"\"\n Initialize BaseHist object. Axis params can contain the names.\n \"\"\"\n self._hist: Any = None\n self.axes: NamedAxesTuple\n\n if len(args):\n if isinstance(storage, type):\n msg = (\n f\"Please use '{storage.__name__}()' instead of '{storage.__name__}'\"\n )\n warnings.warn(msg)\n storage = storage()\n super().__init__(*args, storage=storage, metadata=metadata)\n valid_names = [ax.name for ax in self.axes if ax.name]\n if len(valid_names) != len(set(valid_names)):\n raise KeyError(\n f\"{self.__class__.__name__} instance cannot contain axes with duplicated names\"\n )\n for i, ax in enumerate(self.axes):\n # label will return name if label is not set, so this is safe\n if not ax.label:\n ax.label = f\"Axis {i}\"\n\n def _generate_axes_(self) -> NamedAxesTuple:\n \"\"\"\n This is called to fill in the axes. Subclasses can override it if they need\n to change the axes tuple.\n \"\"\"\n\n return NamedAxesTuple(self._axis(i) for i in range(self.ndim))\n\n def _repr_html_(self):\n if self.ndim == 1:\n if self.axes[0].options.circular:\n return str(html_hist(self, svg_hist_1d_c))\n else:\n return str(html_hist(self, svg_hist_1d))\n elif self.ndim == 2:\n return str(html_hist(self, svg_hist_2d))\n elif self.ndim > 2:\n return str(html_hist(self, svg_hist_nd))\n return str(self)\n\n def _name_to_index(self, name: str) -> int:\n \"\"\"\n Transform axis name to axis index, given axis name, return axis \\\n index.\n \"\"\"\n for index, axis in enumerate(self.axes):\n if name == axis.name:\n return index\n\n raise ValueError(\"The axis names could not be found\")\n\n def project(self, *args: Union[int, str]):\n \"\"\"\n Projection of axis idx.\n \"\"\"\n int_args = [self._name_to_index(a) if isinstance(a, str) else a for a in args]\n return super().project(*int_args)\n\n def fill(\n self, *args, weight=None, sample=None, threads: Optional[int] = None, **kwargs\n ):\n \"\"\"\n Insert data into the histogram using names and indices, return\n a Hist object.\n \"\"\"\n\n data_dict = {\n self._name_to_index(k) if isinstance(k, str) else k: v\n for k, v in kwargs.items()\n }\n\n if set(data_dict) != set(range(len(args), self.ndim)):\n raise TypeError(\"All axes must be accounted for in fill\")\n\n data = (data_dict[i] for i in range(len(args), self.ndim))\n\n total_data = tuple(args) + tuple(data) # Python 2 can't unpack twice\n return super().fill(*total_data, weight=weight, sample=sample, threads=threads)\n\n def _loc_shortcut(self, x):\n \"\"\"\n Convert some specific indices to location.\n \"\"\"\n\n if isinstance(x, slice):\n return slice(\n self._loc_shortcut(x.start),\n self._loc_shortcut(x.stop),\n self._step_shortcut(x.step),\n )\n elif isinstance(x, complex):\n if x.real % 1 != 0:\n raise ValueError(\"The real part should be an integer\")\n else:\n return bh.loc(x.imag, int(x.real))\n elif isinstance(x, str):\n return bh.loc(x)\n else:\n return x\n\n def _step_shortcut(self, x):\n \"\"\"\n Convert some specific indices to step.\n \"\"\"\n\n if isinstance(x, complex):\n if x.real != 0:\n raise ValueError(\"The step should not have real part\")\n elif x.imag % 1 != 0:\n raise ValueError(\"The imaginary part should be an integer\")\n else:\n return bh.rebin(int(x.imag))\n else:\n return x\n\n def _index_transform(self, index):\n \"\"\"\n Auxiliary function for __getitem__ and __setitem__.\n \"\"\"\n\n if isinstance(index, dict):\n new_indices = {\n (\n self._name_to_index(k) if isinstance(k, str) else k\n ): self._loc_shortcut(v)\n for k, v in index.items()\n }\n if len(new_indices) != len(index):\n raise ValueError(\n \"Duplicate index keys, numbers and names cannot overlap\"\n )\n return new_indices\n\n elif not hasattr(index, \"__iter__\"):\n index = (index,)\n\n return tuple(self._loc_shortcut(v) for v in index)\n\n def __getitem__(self, index):\n \"\"\"\n Get histogram item.\n \"\"\"\n\n return super().__getitem__(self._index_transform(index))\n\n def __setitem__(self, index, value):\n \"\"\"\n Set histogram item.\n \"\"\"\n\n return super().__setitem__(self._index_transform(index), value)\n\n def density(self) -> np.ndarray:\n \"\"\"\n Density numpy array.\n \"\"\"\n total = self.sum() * functools.reduce(operator.mul, self.axes.widths)\n return self.view() / np.where(total > 0, total, 1)\n\n def show(self, **kwargs):\n \"\"\"\n Pretty print histograms to the console.\n \"\"\"\n\n return histoprint.print_hist(self, **kwargs)\n\n def plot(self, *args, **kwargs) -> \"Union[Hist1DArtists, Hist2DArtists]\":\n \"\"\"\n Plot method for BaseHist object.\n \"\"\"\n if self.ndim == 1:\n return self.plot1d(*args, **kwargs)\n elif self.ndim == 2:\n return self.plot2d(*args, **kwargs)\n else:\n raise NotImplementedError(\"Please project to 1D or 2D before calling plot\")\n\n def plot1d(\n self,\n *,\n ax: \"Optional[matplotlib.axes.Axes]\" = None,\n **kwargs,\n ) -> \"Hist1DArtists\":\n \"\"\"\n Plot1d method for BaseHist object.\n \"\"\"\n\n import hist.plot\n\n return hist.plot.histplot(self, ax=ax, **kwargs)\n\n def plot2d(\n self,\n *,\n ax: \"Optional[matplotlib.axes.Axes]\" = None,\n **kwargs,\n ) -> \"Hist2DArtists\":\n \"\"\"\n Plot2d method for BaseHist object.\n \"\"\"\n\n import hist.plot\n\n return hist.plot.hist2dplot(self, ax=ax, **kwargs)\n\n def plot2d_full(\n self,\n *,\n ax_dict: \"Optional[Dict[str, matplotlib.axes.Axes]]\" = None,\n **kwargs,\n ) -> \"Tuple[Hist2DArtists, Hist1DArtists, Hist1DArtists]\":\n \"\"\"\n Plot2d_full method for BaseHist object.\n\n Pass a dict of axes to ``ax_dict``, otherwise, the current figure will be used.\n \"\"\"\n # Type judgement\n\n import hist.plot\n\n return hist.plot.plot2d_full(self, ax_dict=ax_dict, **kwargs)\n\n def plot_pull(\n self,\n func: Callable,\n *,\n ax_dict: \"Optional[Dict[str, matplotlib.axes.Axes]]\" = None,\n **kwargs,\n ) -> \"Tuple[matplotlib.axes.Axes, matplotlib.axes.Axes]\":\n \"\"\"\n Plot_pull method for BaseHist object.\n \"\"\"\n\n import hist.plot\n\n return hist.plot.plot_pull(self, func, ax_dict=ax_dict, **kwargs)\n" ]
[ [ "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GZHermit/video_analyst
[ "6233b19320e3d07b95fb1f782efd89b052a8cf4e", "6233b19320e3d07b95fb1f782efd89b052a8cf4e" ]
[ "demo/main/video/sot_video.py", "videoanalyst/engine/tester/tester_impl/lasot.py" ]
[ "# -*- coding: utf-8 -*\n\nfrom paths import ROOT_PATH # isort:skip\nfrom videoanalyst.config.config import cfg\nfrom videoanalyst.config.config import specify_task\nfrom videoanalyst.model import builder as model_builder\nfrom videoanalyst.pipeline import builder as pipeline_builder\nfrom videoanalyst.utils import complete_path_wt_root_in_cfg\nfrom videoanalyst.pipeline.utils.bbox import xywh2xyxy, xyxy2xywh\n\nimport argparse\nfrom loguru import logger\n\nimport cv2\nimport numpy as np\nimport time\nimport torch\n\nfont_size = 0.5\nfont_width = 1\n\n\ndef make_parser():\n parser = argparse.ArgumentParser(\n description=\"press s to select the target box,\\n \\\n then press enter or space to confirm it or press c to cancel it,\\n \\\n press c to stop track and press q to exit program\")\n parser.add_argument(\n \"-cfg\",\n \"--config\",\n default=\"experiments/siamfcpp/test/got10k/siamfcpp_alexnet-got.yaml\",\n type=str,\n help='experiment configuration')\n parser.add_argument(\"-d\",\n \"--device\",\n default=\"cpu\",\n type=str,\n help=\"torch.device, cuda or cpu\")\n parser.add_argument(\"-v\",\n \"--video\",\n type=str,\n default=\"webcam\",\n help=\"path to input video file, default is webcam\")\n parser.add_argument(\"-o\",\n \"--output\",\n type=str,\n default=\"\",\n help=\"path to dump the track video\")\n return parser\n\n\ndef main(args):\n root_cfg = cfg\n root_cfg.merge_from_file(args.config)\n logger.info(\"Load experiment configuration at: %s\" % args.config)\n\n # resolve config\n root_cfg = complete_path_wt_root_in_cfg(root_cfg, ROOT_PATH)\n root_cfg = root_cfg.test\n task, task_cfg = specify_task(root_cfg)\n task_cfg.freeze()\n window_name = task_cfg.exp_name\n # build model\n model = model_builder.build(task, task_cfg.model)\n # build pipeline\n pipeline = pipeline_builder.build(task, task_cfg.pipeline, model)\n dev = torch.device(args.device)\n pipeline.set_device(dev)\n init_box = None\n template = None\n vw = None\n\n if args.video == \"webcam\":\n logger.info(\"[INFO] starting video stream...\")\n vs = cv2.VideoCapture(0)\n vs.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))\n else:\n vs = cv2.VideoCapture(args.video)\n if args.output:\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n width, height = vs.get(3), vs.get(4)\n vw = cv2.VideoWriter(args.output, fourcc, 25, (int(width), int(height)))\n while vs.isOpened():\n ret, frame = vs.read()\n if ret:\n if init_box is not None:\n time_a = time.time()\n rect_pred = pipeline.update(frame)\n show_frame = frame.copy()\n time_cost = time.time() - time_a\n bbox_pred = xywh2xyxy(rect_pred)\n bbox_pred = tuple(map(int, bbox_pred))\n cv2.putText(show_frame,\n \"track cost: {:.4f} s\".format(time_cost), (128, 20),\n cv2.FONT_HERSHEY_COMPLEX, font_size, (0, 0, 255),\n font_width)\n cv2.rectangle(show_frame, bbox_pred[:2], bbox_pred[2:],\n (0, 255, 0))\n if template is not None:\n show_frame[:128, :128] = template\n else:\n show_frame = frame\n cv2.imshow(window_name, show_frame)\n if vw is not None:\n vw.write(show_frame)\n key = cv2.waitKey(30) & 0xFF\n if key == ord(\"q\"):\n break\n # if the 's' key is selected, we are going to \"select\" a bounding\n # box to track\n elif key == ord(\"s\"):\n # select the bounding box of the object we want to track (make\n # sure you press ENTER or SPACE after selecting the ROI)\n box = cv2.selectROI(window_name,\n frame,\n fromCenter=False,\n showCrosshair=True)\n if box[2] > 0 and box[3] > 0:\n init_box = box\n template = cv2.resize(\n frame[box[1]:box[1] + box[3], box[0]:box[0] + box[2]],\n (128, 128))\n pipeline.init(frame, init_box)\n elif key == ord(\"c\"):\n init_box = None\n template = None\n vs.release()\n if vw is not None:\n vw.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n parser = make_parser()\n args = parser.parse_args()\n main(args)\n", "# -*- coding: utf-8 -*\r\nimport copy\r\nfrom loguru import logger\r\nimport os.path as osp\r\n\r\nfrom yacs.config import CfgNode\r\n\r\nimport torch\r\n\r\nfrom videoanalyst.evaluation import got_benchmark\r\nfrom videoanalyst.evaluation.got_benchmark.experiments import ExperimentLaSOT\r\n\r\nfrom ..tester_base import TRACK_TESTERS, TesterBase\r\nfrom .utils.got_benchmark_helper import PipelineTracker\r\n\r\n\r\n@TRACK_TESTERS.register\r\nclass LaSOTTester(TesterBase):\r\n r\"\"\"LaSOT tester\r\n \r\n Hyper-parameters\r\n ----------------\r\n device_num: int\r\n number of gpus. If set to non-positive number, then use cpu\r\n data_root: str\r\n path to got-10k root\r\n subsets: List[str]\r\n list of subsets name (val|test)\r\n \"\"\"\r\n extra_hyper_params = dict(\r\n device_num=1,\r\n data_root=\"datasets/LaSOT\",\r\n subsets=[\"test\"], # (train|test|train_test)\r\n )\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(LaSOTTester, self).__init__(*args, **kwargs)\r\n # self._experiment = None\r\n\r\n def update_params(self):\r\n # set device state\r\n num_gpu = self._hyper_params[\"device_num\"]\r\n if num_gpu > 0:\r\n all_devs = [torch.device(\"cuda:%d\" % i) for i in range(num_gpu)]\r\n else:\r\n all_devs = [torch.device(\"cpu\")]\r\n self._state[\"all_devs\"] = all_devs\r\n\r\n def test(self, ):\r\n tracker_name = self._hyper_params[\"exp_name\"]\r\n all_devs = self._state[\"all_devs\"]\r\n dev = all_devs[0]\r\n self._pipeline.set_device(dev)\r\n pipeline_tracker = PipelineTracker(tracker_name, self._pipeline)\r\n\r\n for subset in self._hyper_params[\"subsets\"]:\r\n root_dir = self._hyper_params[\"data_root\"]\r\n dataset_name = \"GOT-Benchmark\"\r\n save_root_dir = osp.join(self._hyper_params[\"exp_save\"],\r\n dataset_name)\r\n result_dir = osp.join(save_root_dir, \"result\")\r\n report_dir = osp.join(save_root_dir, \"report\")\r\n\r\n experiment = ExperimentLaSOT(root_dir,\r\n subset=subset,\r\n result_dir=result_dir,\r\n report_dir=report_dir)\r\n experiment.run(pipeline_tracker)\r\n experiment.report([tracker_name], plot_curves=False)\r\n\r\n\r\nLaSOTTester.default_hyper_params = copy.deepcopy(\r\n LaSOTTester.default_hyper_params)\r\nLaSOTTester.default_hyper_params.update(LaSOTTester.extra_hyper_params)\r\n" ]
[ [ "torch.device" ], [ "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jarekczek/codeforces
[ "6214d75991979c12e6e315a3eff7de21a57fae56" ]
[ "python/plot_colors.py" ]
[ "from datetime import date\nimport dateutil.parser\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndf = pd.read_csv('colors_2010_2022_6.txt', delimiter=\"\\t\")\ndf = df[df['reportDate'] >= '2018']\nprint(df.head())\n#df = df.set_index('reportDate')\ndates = sorted(df['reportDate'].unique())\nprint(dates)\n\ndf2 = pd.DataFrame(index=dates)\ndf2['total'] = df[df['color'] == 'total'].set_index('reportDate')['count']\nprint(df2.head())\n\ndf3 = df[df['color'] == 'total']\n#print(df3.head())\ncolors = sorted(df['color'].unique())\nfor color in colors:\n print(color)\n df2[color] = df[df['color'] == color].set_index('reportDate')['count']\n df2[color + '_rel'] = 100.0 * df2[color] / df2['total']\n\ndf2[colors[0] + '_cumul'] = df2[colors[0] + '_rel']\nfor i in range(1, len(colors)):\n print(colors[i])\n df2[colors[i] + '_cumul'] = df2[colors[i] + '_rel'] + df2[colors[i-1] + '_cumul']\n \ndef dateToNumber(strDate):\n d = dateutil.parser.parse(strDate)\n dJan = date(d.year, 1, 1)\n return d.year + (d.toordinal() - dJan.toordinal()) / 365.0\n \ndf2['dateNumber'] = df2.apply(lambda row: dateToNumber(row.name), axis=1)\nprint(df2.head())\n \nplt.clf()\nplt.plot(df2['dateNumber'], df2['total'], linestyle='dotted', color='black', markevery=1)\nplt.plot(df2['dateNumber'], df2['0000_gray'], color='#808080')\nplt.plot(df2['dateNumber'], df2['1200_green'], color='#008000')\nplt.plot(df2['dateNumber'], df2['1400_cyan'], color='#03a89e')\nplt.plot(df2['dateNumber'], df2['1600_blue'], color='#0000c0')\nplt.plot(df2['dateNumber'], df2['1900_violet'], color='#a000a0')\nplt.plot(df2['dateNumber'], df2['2100_orange'], color='#ff8c00')\nplt.plot(df2['dateNumber'], df2['2600_red'], color='#ff0000')\nplt.savefig('total.png')\n\nplt.clf()\nplt.plot(df2['dateNumber'], df2['0000_gray_rel'], color='#808080')\nplt.plot(df2['dateNumber'], df2['1200_green_rel'], color='#008000')\nplt.plot(df2['dateNumber'], df2['1400_cyan_rel'], color='#03a89e')\nplt.plot(df2['dateNumber'], df2['1600_blue_rel'], color='#0000c0')\nplt.plot(df2['dateNumber'], df2['1900_violet_rel'], color='#a000a0')\nplt.plot(df2['dateNumber'], df2['2100_orange_rel'], color='#ff8c00')\nplt.plot(df2['dateNumber'], df2['2600_red_rel'], color='#ff0000')\nplt.savefig('total_rel.png')\n\nplt.clf()\nplt.plot(df2['dateNumber'], df2['0000_gray_cumul'], color='#808080')\nplt.plot(df2['dateNumber'], df2['1200_green_cumul'], color='#008000')\nplt.plot(df2['dateNumber'], df2['1400_cyan_cumul'], color='#03a89e')\nplt.plot(df2['dateNumber'], df2['1600_blue_cumul'], color='#0000c0')\nplt.plot(df2['dateNumber'], df2['1900_violet_cumul'], color='#a000a0')\nplt.plot(df2['dateNumber'], df2['2100_orange_cumul'], color='#ff8c00')\nplt.plot(df2['dateNumber'], df2['2600_red_cumul'], color='#ff0000')\nplt.savefig('total_rel_cumul.png')\n\nprint(\"ok\")\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.clf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
nbren12/nn_atmos_param
[ "cb138f0b211fd5743e56ad659aec38c082d2b3ac" ]
[ "lib/torch/loss.py" ]
[ "import torch\nfrom toolz import curry\n\n\n@curry\ndef weighted_loss(weight, x, y):\n # return torch.mean(torch.pow(x - y, 2).mul(weight.float()))\n return torch.mean(torch.abs(x - y).mul(weight.float()))\n\n\n@curry\ndef dynamic_loss(truth, pred, weights=None):\n x = truth['prognostic']\n y = pred['prognostic']\n\n total_loss = 0\n # time series loss\n for key in y:\n w = weights.get(key, 1.0)\n total_loss += weighted_loss(w, x[key], y[key]) / len(y)\n\n return total_loss\n" ]
[ [ "torch.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Vrekrer/magdynlab
[ "f5149d3213a37c7c18f39876c3e2367fc7deb9e8", "f5149d3213a37c7c18f39876c3e2367fc7deb9e8" ]
[ "controllers/resistance_controller.py", "controllers/z_controller_puc.py" ]
[ "# coding=utf-8\n\n# Author: Diego González Chávez\n# email : [email protected] / [email protected]\n#\n# Resistance Controller\n#\n# TODO:\n# Make documentation\n\nimport time\nimport numpy\n\n__all__ = ['ResistanceController']\n\n\nclass ResistanceController(object):\n\n# Controllador de SourceMeter para medidas de resistencia\n\n def __init__(self, source_meter):\n self.SM = source_meter\n self.SM.sense_mode = '4-Wire'\n self.Mode('Current')\n \n def Mode(self, mode):\n if mode == 'Voltage':\n self.SM.source_function = 'Voltage'\n self.SM.sense_function = 'Current'\n self.SM.source_value = 1E-3\n elif mode == 'Current':\n self.SM.source_function = 'Current'\n self.SM.sense_function = 'Voltage'\n self.SM.source_value = 50E-6\n \n \n def getResistance(self, n = 5, iniDelay = 0.1, measDelay = 0.01):\n vsIn = numpy.zeros(n)\n out = self.SM.output\n self.SM.output = 'ON'\n\n time.sleep(iniDelay)\n sv = self.SM.source_value\n svs = numpy.linspace(-sv, sv, n)\n \n for i in range(n):\n time.sleep(measDelay)\n self.SM.source_value = svs[i]\n vsIn[i] = self.SM.sense_value\n self.SM.output = out\n X = numpy.polyfit(svs, vsIn, 1)[0]\n if 'VOLT' in self.SM.sense_function:\n return 1/X\n else:\n return X\n", "# coding=utf-8\n\n# Author: Diego González Chávez\n# email : [email protected] / [email protected]\n#\n# Magneto Impedance Controller for PUC\n#\n# TODO:\n# Make documentation\n\nimport time\nimport numpy\n\n__all__ = ['ZControler_PUC']\n\n\nclass ZControler_PUC(object):\n\n def __init__(self, LockIn_instrument):\n self.LockIn = LockIn_instrument\n \n def setFreq(self, freq):\n return self.LockIn.setOscilatorFreq(freq)\n \n def getFXY(self, n = 5, iniDelay = 0.1, measDelay = 0):\n vsIn = numpy.zeros((n,2))\n time.sleep(iniDelay)\n for i in range(n):\n time.sleep(measDelay)\n vsIn[i] = [self.LockIn.X, self.LockIn.Y]\n vIn = vsIn.mean(axis=0)\n freq = self.LockIn.Freq\n return numpy.array([freq, vIn[0], vIn[1]])\n" ]
[ [ "numpy.polyfit", "numpy.zeros", "numpy.linspace" ], [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
phc-health/covid-data-model
[ "13c5084d631cf2dd33a7fe558c212dbd32b686e6" ]
[ "tests/libs/datasets/timeseries_test.py" ]
[ "import dataclasses\nimport datetime\nimport io\nimport pathlib\nimport pickle\n\nimport pytest\nimport pandas as pd\nimport numpy as np\nimport structlog\n\nfrom datapublic.common_fields import CommonFields\nfrom datapublic.common_fields import DemographicBucket\nfrom datapublic.common_fields import FieldName\nfrom datapublic.common_fields import PdFields\n\nfrom datapublic.common_test_helpers import to_dict\n\nfrom libs import github_utils\nfrom libs.datasets import AggregationLevel\nfrom libs.datasets import dataset_pointer\nfrom libs.datasets import taglib\n\nfrom libs.datasets import timeseries\nfrom libs.datasets.taglib import TagType\nfrom libs.datasets.taglib import UrlStr\nfrom libs.pipeline import Region\nfrom tests import test_helpers\nfrom tests.dataset_utils_test import read_csv_and_index_fips\nfrom tests.dataset_utils_test import read_csv_and_index_fips_date\nfrom tests.test_helpers import TimeseriesLiteral\n\n\n# turns all warnings into errors for this module\npytestmark = pytest.mark.filterwarnings(\"error\", \"ignore::libs.pipeline.BadFipsWarning\")\n\n\ndef _make_dataset_pointer(tmpdir, filename: str = \"somefile.csv\") -> dataset_pointer.DatasetPointer:\n # The fixture passes in a py.path, which is not the type in DatasetPointer.\n path = pathlib.Path(tmpdir) / filename\n\n fake_git_summary = github_utils.GitSummary(sha=\"abcdef\", branch=\"main\", is_dirty=True)\n\n return dataset_pointer.DatasetPointer(\n dataset_type=dataset_pointer.DatasetType.MULTI_REGION,\n path=path,\n model_git_info=fake_git_summary,\n updated_at=datetime.datetime.utcnow(),\n )\n\n\[email protected](\"include_na_at_end\", [False, True])\ndef test_remove_padded_nans(include_na_at_end):\n rows = [\n {\"date\": \"2020-02-01\", \"cases\": pd.NA},\n {\"date\": \"2020-02-02\", \"cases\": pd.NA},\n {\"date\": \"2020-02-03\", \"cases\": 1},\n {\"date\": \"2020-02-04\", \"cases\": pd.NA},\n {\"date\": \"2020-02-05\", \"cases\": 2},\n {\"date\": \"2020-02-06\", \"cases\": 3},\n ]\n if include_na_at_end:\n rows += [{\"date\": \"2020-02-07\", \"cases\": pd.NA}]\n\n df = pd.DataFrame(rows)\n\n results = timeseries._remove_padded_nans(df, [\"cases\"])\n expected_series = pd.Series([1, pd.NA, 2, 3], name=\"cases\")\n\n pd.testing.assert_series_equal(results.cases, expected_series)\n\n\ndef test_multi_region_to_from_timeseries_and_latest_values(tmp_path: pathlib.Path):\n # TODO(tom): Replace csv with test_helpers builders and uncomment assert in add_fips_static_df\n ts_df = read_csv_and_index_fips_date(\n \"fips,county,aggregate_level,date,m1,m2\\n\"\n \"97111,Bar County,county,2020-04-02,2,\\n\"\n \"97222,Foo County,county,2020-04-01,,10\\n\"\n \"01,,state,2020-04-01,,20\\n\"\n ).reset_index()\n latest_values_df = read_csv_and_index_fips(\n \"fips,county,aggregate_level,c1,c2\\n\"\n \"97111,Bar County,county,3,\\n\"\n \"97222,Foo County,county,4,10.5\\n\"\n \"01,,state,,123.4\\n\"\n ).reset_index()\n multiregion = (\n timeseries.MultiRegionDataset.from_fips_timeseries_df(ts_df)\n .add_fips_static_df(latest_values_df)\n .add_provenance_csv(\n io.StringIO(\"location_id,variable,provenance\\n\" \"iso1:us#fips:97111,m1,ts197111prov\\n\")\n )\n )\n region_97111 = multiregion.get_one_region(Region.from_fips(\"97111\"))\n assert region_97111.date_indexed.at[\"2020-04-02\", \"m1\"] == 2\n assert region_97111.latest[\"c1\"] == 3\n assert multiregion.get_one_region(Region.from_fips(\"01\")).latest[\"c2\"] == 123.4\n\n csv_path = tmp_path / \"multiregion.csv\"\n multiregion.to_csv(csv_path)\n multiregion_loaded = timeseries.MultiRegionDataset.from_csv(csv_path)\n region_97111 = multiregion_loaded.get_one_region(Region.from_fips(\"97111\"))\n assert region_97111.date_indexed.at[\"2020-04-02\", \"m1\"] == 2\n assert region_97111.latest[\"c1\"] == 3\n assert region_97111.region.fips == \"97111\"\n assert multiregion_loaded.get_one_region(Region.from_fips(\"01\")).latest[\"c2\"] == 123.4\n test_helpers.assert_dataset_like(\n multiregion, multiregion_loaded, drop_na_latest=True, drop_na_timeseries=True\n )\n\n\ndef test_multi_region_get_one_region():\n ts = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,county,aggregate_level,date,m1,m2\\n\"\n \"iso1:us#fips:97111,Bar County,county,2020-04-02,2,\\n\"\n \"iso1:us#fips:97222,Foo County,county,2020-04-01,,10\\n\"\n \"iso1:us#fips:97111,Bar County,county,,3,\\n\"\n \"iso1:us#fips:97222,Foo County,county,,,11\\n\"\n )\n )\n region_97111_ts = ts.get_one_region(Region.from_fips(\"97111\"))\n assert to_dict([\"date\"], region_97111_ts.data[[\"date\", \"m1\", \"m2\"]]) == {\n pd.to_datetime(\"2020-04-02\"): {\"m1\": 2}\n }\n assert region_97111_ts.latest[\"m1\"] == 3\n assert region_97111_ts.region.fips == \"97111\"\n\n region_97222_ts = ts.get_one_region(Region.from_fips(\"97222\"))\n assert to_dict([\"date\"], region_97222_ts.data) == {\n pd.to_datetime(\"2020-04-01\"): {\"m2\": 10, \"location_id\": \"iso1:us#fips:97222\",}\n }\n assert region_97222_ts.latest[\"m2\"] == 11\n\n\ndef test_multi_region_get_counties_and_places():\n ds_in = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,county,aggregate_level,date,m1,m2\\n\"\n \"iso1:us#fips:97111,Bar County,county,2020-04-02,2,\\n\"\n \"iso1:us#fips:97111,Bar County,county,2020-04-03,3,\\n\"\n \"iso1:us#fips:97222,Foo County,county,2020-04-01,,10\\n\"\n \"iso1:us#fips:9711122,,place,2020-04-02,5,60\\n\"\n \"iso1:us#fips:97,Great State,state,2020-04-01,1,2\\n\"\n \"iso1:us#fips:97111,Bar County,county,,3,\\n\"\n \"iso1:us#fips:9711122,,place,,3,\\n\"\n \"iso1:us#fips:97222,Foo County,county,,,10\\n\"\n \"iso1:us#fips:97,Great State,state,,1,2\\n\"\n )\n )\n ds_out = ds_in.get_counties_and_places(\n after=pd.to_datetime(\"2020-04-01\")\n ).timeseries.reset_index()\n assert to_dict([\"location_id\", \"date\"], ds_out[[\"location_id\", \"date\", \"m1\"]]) == {\n (\"iso1:us#fips:97111\", pd.to_datetime(\"2020-04-02\")): {\"m1\": 2},\n (\"iso1:us#fips:97111\", pd.to_datetime(\"2020-04-03\")): {\"m1\": 3},\n (\"iso1:us#fips:9711122\", pd.to_datetime(\"2020-04-02\")): {\"m1\": 5},\n }\n\n\ndef test_multi_region_groupby():\n ts = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,county,aggregate_level,date,m1,m2\\n\"\n \"iso1:us#fips:97222,Foo County,county,2020-04-01,,10\\n\"\n \"iso1:us#fips:97222,Foo County,county,2020-04-02,,20\\n\"\n \"iso1:us#fips:97,Great State,state,2020-04-01,1,2\\n\"\n \"iso1:us#fips:97222,Foo County,county,,,20\\n\"\n \"iso1:us#fips:97,Great State,state,,1,2\\n\"\n )\n )\n\n assert ts.groupby_region()[\"m2\"].last().to_dict() == {\n \"iso1:us#fips:97\": 2,\n \"iso1:us#fips:97222\": 20,\n }\n\n\ndef test_one_region_dataset():\n bar_county_row = {\n \"location_id\": \"iso1:us#fips:97111\",\n \"county\": \"Bar County\",\n \"aggregate_level\": \"county\",\n \"date\": \"2020-04-02\",\n \"m1\": 2,\n \"m2\": pd.NA,\n }\n ts = timeseries.OneRegionTimeseriesDataset(\n Region.from_fips(\"97111\"), pd.DataFrame([bar_county_row]), {}, pd.DataFrame([])\n )\n assert ts.has_one_region() == True\n\n foo_county_row = {\n \"location_id\": \"iso1:us#fips:97222\",\n \"county\": \"Foo County\",\n \"aggregate_level\": \"county\",\n \"date\": \"2020-04-01\",\n \"m1\": pd.NA,\n \"m2\": 10,\n }\n with pytest.raises(ValueError):\n timeseries.OneRegionTimeseriesDataset(\n Region.from_fips(\"97222\"),\n pd.DataFrame([bar_county_row, foo_county_row]),\n {},\n pd.DataFrame([]),\n )\n\n with structlog.testing.capture_logs() as logs:\n ts = timeseries.OneRegionTimeseriesDataset(\n Region.from_fips(\"97111\"),\n pd.DataFrame([], columns=\"location_id county aggregate_level date m1 m2\".split()),\n {},\n pd.DataFrame([]),\n )\n assert [l[\"event\"] for l in logs] == [\"Creating OneRegionTimeseriesDataset with zero regions\"]\n assert ts.empty\n\n\ndef test_multiregion_provenance():\n m1 = FieldName(\"m1\")\n m2 = FieldName(\"m2\")\n\n region_97111 = Region.from_fips(\"97111\")\n region_97222 = Region.from_fips(\"97222\")\n region_03 = Region.from_fips(\"03\")\n ds = test_helpers.build_dataset(\n {\n region_97111: {m1: TimeseriesLiteral([1, 2, None], provenance=\"src11\")},\n region_97222: {\n m1: TimeseriesLiteral([None, None, 3], provenance=\"src21\"),\n m2: TimeseriesLiteral([10, None, 30], provenance=\"src22\"),\n },\n region_03: {\n m1: TimeseriesLiteral([None, None, 4], provenance=\"src31\"),\n m2: TimeseriesLiteral([None, None, 40], provenance=\"src32\"),\n },\n },\n )\n\n # Use loc[...].at[...] as work-around for https://github.com/pandas-dev/pandas/issues/26989\n assert ds.provenance.loc[region_97111.location_id].at[\"m1\"] == \"src11\"\n assert ds.get_one_region(region_97111).provenance[\"m1\"] == [\"src11\"]\n assert ds.provenance.loc[region_97222.location_id].at[\"m2\"] == \"src22\"\n assert ds.get_one_region(region_97222).provenance[\"m2\"] == [\"src22\"]\n assert ds.provenance.loc[region_03.location_id].at[\"m2\"] == \"src32\"\n assert ds.get_one_region(region_03).provenance[\"m2\"] == [\"src32\"]\n\n counties = ds.get_counties_and_places(after=pd.to_datetime(\"2020-04-01\"))\n assert region_03.location_id not in counties.provenance.index\n assert counties.provenance.loc[region_97222.location_id].at[\"m1\"] == \"src21\"\n assert counties.get_one_region(region_97222).provenance[\"m1\"] == [\"src21\"]\n\n\ndef test_one_region_multiple_provenance():\n tag1 = test_helpers.make_tag(date=\"2020-04-01\")\n tag2 = test_helpers.make_tag(date=\"2020-04-02\")\n one_region = test_helpers.build_one_region_dataset(\n {\n CommonFields.ICU_BEDS: TimeseriesLiteral(\n [0, 2, 4], annotation=[tag1, tag2], provenance=[\"prov1\", \"prov2\"],\n ),\n CommonFields.CASES: [100, 200, 300],\n }\n )\n\n assert set(one_region.annotations_all_bucket(CommonFields.ICU_BEDS)) == {tag1, tag2}\n assert sorted(one_region.provenance[CommonFields.ICU_BEDS]) == [\"prov1\", \"prov2\"]\n\n\ndef test_add_aggregate_level():\n ts_df = read_csv_and_index_fips_date(\"fips,date,m1,m2\\n\" \"36061,2020-04-02,2,\\n\").reset_index()\n multiregion = timeseries.MultiRegionDataset.from_fips_timeseries_df(ts_df)\n assert multiregion.geo_data.aggregate_level.to_list() == [\"county\"]\n\n\ndef test_fips_not_in_geo_data_csv_raises():\n df = test_helpers.read_csv_str(\n \" location_id, date, cases\\n\"\n \"iso1:us#fips:06010, 2020-04-01, 100\\n\",\n skip_spaces=True,\n )\n\n with pytest.raises(AssertionError):\n timeseries.MultiRegionDataset.from_timeseries_df(df)\n\n\ndef test_append_regions():\n ts_fips = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1,m2\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\\n\"\n \"iso1:us#fips:97111,2020-04-03,Bar County,county,3,\\n\"\n \"iso1:us#fips:97222,2020-04-04,Foo County,county,,11\\n\"\n \"iso1:us#fips:97111,,Bar County,county,3,\\n\"\n \"iso1:us#fips:97222,,Foo County,county,,11\\n\"\n )\n ).add_provenance_csv(\n io.StringIO(\"location_id,variable,provenance\\n\" \"iso1:us#fips:97111,m1,prov97111m1\\n\")\n )\n ts_cbsa = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,m2\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,2\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,3\\n\"\n \"iso1:us#cbsa:20300,2020-04-03,4\\n\"\n \"iso1:us#cbsa:10100,,3\\n\"\n \"iso1:us#cbsa:20300,,4\\n\"\n )\n ).add_provenance_csv(\n io.StringIO(\"location_id,variable,provenance\\n\" \"iso1:us#cbsa:20300,m1,prov20200m2\\n\")\n )\n # Check that merge is symmetric\n ts_merged_1 = ts_fips.append_regions(ts_cbsa)\n ts_merged_2 = ts_cbsa.append_regions(ts_fips)\n test_helpers.assert_dataset_like(ts_merged_1, ts_merged_2)\n\n ts_expected = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1,m2\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,,2\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,,3\\n\"\n \"iso1:us#cbsa:20300,2020-04-03,,,,4\\n\"\n \"iso1:us#cbsa:10100,,,,,3\\n\"\n \"iso1:us#cbsa:20300,,,,,4\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\\n\"\n \"iso1:us#fips:97111,2020-04-03,Bar County,county,3,\\n\"\n \"iso1:us#fips:97222,2020-04-04,Foo County,county,,11\\n\"\n \"iso1:us#fips:97111,,Bar County,county,3,\\n\"\n \"iso1:us#fips:97222,,Foo County,county,,11\\n\"\n )\n ).add_provenance_csv(\n io.StringIO(\n \"location_id,variable,provenance\\n\"\n \"iso1:us#fips:97111,m1,prov97111m1\\n\"\n \"iso1:us#cbsa:20300,m1,prov20200m2\\n\"\n )\n )\n test_helpers.assert_dataset_like(ts_merged_1, ts_expected)\n\n\ndef test_append_regions_with_buckets():\n region_cbsa = Region.from_cbsa_code(\"10100\")\n region_la = Region.from_fips(\"06037\")\n region_sf = Region.from_fips(\"06075\")\n m1 = FieldName(\"m1\")\n m2 = FieldName(\"m2\")\n age_40s = DemographicBucket(\"age:40-49\")\n data_county = {\n region_la: {\n m1: {\n age_40s: TimeseriesLiteral([1, 2], annotation=[test_helpers.make_tag()]),\n DemographicBucket.ALL: [2, 3],\n }\n },\n region_sf: {m1: [3, 4]},\n }\n data_cbsa = {region_cbsa: {m2: [5, 6]}}\n ds_county = test_helpers.build_dataset(data_county)\n ds_cbsa = test_helpers.build_dataset(data_cbsa)\n\n ds_out_1 = ds_county.append_regions(ds_cbsa)\n ds_out_2 = ds_cbsa.append_regions(ds_county)\n\n ds_expected = test_helpers.build_dataset({**data_cbsa, **data_county})\n\n test_helpers.assert_dataset_like(ds_out_1, ds_expected)\n test_helpers.assert_dataset_like(ds_out_2, ds_expected)\n\n\ndef test_append_regions_duplicate_region_raises():\n ts1 = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1,m2\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\\n\"\n )\n )\n ts2 = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1,m2\\n\"\n \"iso1:us#fips:97111,2020-04-03,Bar County,county,2,\\n\"\n )\n )\n with pytest.raises(ValueError):\n ts1.append_regions(ts2)\n\n\ndef test_timeseries_long():\n \"\"\"Test timeseries_long where all data has bucket `all`\"\"\"\n region_cbsa = Region.from_cbsa_code(\"10100\")\n region_county = Region.from_fips(\"97111\")\n ds = test_helpers.build_dataset(\n {\n region_county: {FieldName(\"m1\"): [2, None, 4]},\n region_cbsa: {FieldName(\"m2\"): [2, 3, None]},\n },\n start_date=\"2020-04-02\",\n )\n\n expected = test_helpers.read_csv_str(\n \" location_id, date,variable,value\\n\"\n \"iso1:us#cbsa:10100,2020-04-02, m2, 2\\n\"\n \"iso1:us#cbsa:10100,2020-04-03, m2, 3\\n\"\n \"iso1:us#fips:97111,2020-04-02, m1, 2\\n\"\n \"iso1:us#fips:97111,2020-04-04, m1, 4\\n\",\n skip_spaces=True,\n dtype={\"value\": float},\n )\n long_series = ds.timeseries_bucketed_long\n assert long_series.index.names == [\n CommonFields.LOCATION_ID,\n PdFields.DEMOGRAPHIC_BUCKET,\n CommonFields.DATE,\n PdFields.VARIABLE,\n ]\n assert long_series.name == PdFields.VALUE\n long_df = long_series.xs(\"all\", level=PdFields.DEMOGRAPHIC_BUCKET).reset_index()\n pd.testing.assert_frame_equal(long_df, expected, check_like=True)\n\n\ndef test_timeseries_bucketed_long():\n region_cbsa = Region.from_cbsa_code(\"10100\")\n region_county = Region.from_fips(\"97111\")\n bucket_age_0 = DemographicBucket(\"age:0-9\")\n bucket_age_10 = DemographicBucket(\"age:10-19\")\n bucket_all = DemographicBucket(\"all\")\n ds = test_helpers.build_dataset(\n {\n region_county: {\n FieldName(\"m1\"): {\n bucket_age_0: [4, 5, 6],\n bucket_age_10: [None, None, 7],\n bucket_all: [2, None, 4],\n }\n },\n region_cbsa: {FieldName(\"m2\"): [2, 3, None]},\n },\n start_date=\"2020-04-02\",\n )\n\n expected = test_helpers.read_csv_str(\n \" location_id,demographic_bucket, date,variable,value\\n\"\n \"iso1:us#cbsa:10100, all,2020-04-02, m2, 2\\n\"\n \"iso1:us#cbsa:10100, all,2020-04-03, m2, 3\\n\"\n \"iso1:us#fips:97111, age:0-9,2020-04-02, m1, 4\\n\"\n \"iso1:us#fips:97111, age:0-9,2020-04-03, m1, 5\\n\"\n \"iso1:us#fips:97111, age:0-9,2020-04-04, m1, 6\\n\"\n \"iso1:us#fips:97111, age:10-19,2020-04-04, m1, 7\\n\"\n \"iso1:us#fips:97111, all,2020-04-02, m1, 2\\n\"\n \"iso1:us#fips:97111, all,2020-04-04, m1, 4\\n\",\n skip_spaces=True,\n dtype={\"value\": float},\n )\n long_series = ds.timeseries_bucketed_long\n assert long_series.index.names == [\n CommonFields.LOCATION_ID,\n PdFields.DEMOGRAPHIC_BUCKET,\n CommonFields.DATE,\n PdFields.VARIABLE,\n ]\n assert long_series.name == PdFields.VALUE\n pd.testing.assert_frame_equal(long_series.reset_index(), expected, check_like=True)\n\n\ndef test_timeseries_distribution_long():\n bucket_age_0 = DemographicBucket(\"age:0-9\")\n bucket_age_10 = DemographicBucket(\"age:10-19\")\n bucket_all = DemographicBucket(\"all\")\n bucket_blueman = DemographicBucket(\"color;gender:blue;man\")\n ds = test_helpers.build_default_region_dataset(\n {\n FieldName(\"m1\"): {\n bucket_age_0: [1, 2, 3],\n bucket_age_10: [None, None, 4],\n bucket_all: [5, None, 6],\n bucket_blueman: [7, None, None],\n }\n }\n )\n\n long_series = ds.timeseries_distribution_long\n assert long_series.name == PdFields.VALUE\n assert long_series.index.names == [\n CommonFields.LOCATION_ID,\n PdFields.DEMOGRAPHIC_BUCKET,\n CommonFields.DATE,\n PdFields.DISTRIBUTION,\n PdFields.VARIABLE,\n ]\n expected = test_helpers.read_csv_str(\n \" location_id, demographic_bucket, date,distribution,variable,value\\n\"\n \"iso1:us#fips:97222, age:0-9,2020-04-01, age, m1, 1\\n\"\n \"iso1:us#fips:97222, age:0-9,2020-04-02, age, m1, 2\\n\"\n \"iso1:us#fips:97222, age:0-9,2020-04-03, age, m1, 3\\n\"\n \"iso1:us#fips:97222, age:10-19,2020-04-03, age, m1, 4\\n\"\n \"iso1:us#fips:97222, all,2020-04-01, all, m1, 5\\n\"\n \"iso1:us#fips:97222, all,2020-04-03, all, m1, 6\\n\"\n \"iso1:us#fips:97222,color;gender:blue;man,2020-04-01,color;gender, m1, 7\\n\",\n skip_spaces=True,\n dtype={\"value\": float},\n )\n pd.testing.assert_frame_equal(long_series.reset_index(), expected, check_like=True)\n\n\ndef test_timeseries_wide_dates():\n region_cbsa = Region.from_cbsa_code(\"10100\")\n region_fips = Region.from_fips(\"97111\")\n m1 = FieldName(\"m1\")\n m2 = FieldName(\"m2\")\n ds = test_helpers.build_dataset(\n {region_cbsa: {m2: [2, 3]}, region_fips: {m1: [2, None, 4]}},\n static_by_region_then_field_name={region_fips: {CommonFields.COUNTY: \"Bar County\", m1: 4}},\n start_date=\"2020-04-02\",\n )\n\n # TODO(tom): Delete this test of _timeseries_not_bucketed_wide_dates which is no longer\n # accessed from outside timeseries when there are other tests for from_timeseries_wide_dates_df\n ds_wide = ds._timeseries_not_bucketed_wide_dates\n assert ds_wide.index.names == [CommonFields.LOCATION_ID, PdFields.VARIABLE]\n assert ds_wide.columns.names == [CommonFields.DATE]\n\n expected = (\n pd.read_csv(\n io.StringIO(\n \"location_id,variable,2020-04-02,2020-04-03,2020-04-04\\n\"\n \"iso1:us#cbsa:10100,m2,2,3,\\n\"\n \"iso1:us#fips:97111,m1,2,,4\\n\"\n ),\n )\n .set_index(ds_wide.index.names)\n .rename_axis(columns=\"date\")\n .astype(float)\n )\n expected.columns = pd.to_datetime(expected.columns)\n\n pd.testing.assert_frame_equal(ds_wide, expected)\n\n # Recreate the dataset using `from_timeseries_wide_dates_df`.\n ds_recreated = timeseries.MultiRegionDataset.from_timeseries_wide_dates_df(\n ds_wide\n ).add_static_values(ds.static.reset_index())\n test_helpers.assert_dataset_like(ds, ds_recreated)\n\n assert ds.get_timeseries_not_bucketed_wide_dates(m1).loc[region_fips.location_id, :].replace(\n {np.nan: None}\n ).to_list() == [2, None, 4]\n assert ds.get_timeseries_bucketed_wide_dates(m1).loc[\n (region_fips.location_id, DemographicBucket.ALL), :\n ].replace({np.nan: None}).to_list() == [2, None, 4]\n\n\ndef test_timeseries_wide_dates_empty():\n m1 = FieldName(\"m1\")\n ds = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1,m2\\n\"\n \"iso1:us#cbsa:10100,,,,,3\\n\"\n \"iso1:us#fips:97111,,Bar County,county,4,\\n\"\n )\n )\n\n assert ds.get_timeseries_not_bucketed_wide_dates(m1).empty\n assert ds.get_timeseries_bucketed_wide_dates(m1).empty\n assert ds.get_timeseries_not_bucketed_wide_dates(CommonFields.CASES).empty\n assert ds.get_timeseries_bucketed_wide_dates(CommonFields.CASES).empty\n\n\ndef test_write_read_wide_dates_csv_compare_literal(tmpdir):\n pointer = _make_dataset_pointer(tmpdir)\n\n region_as = Region.from_state(\"AS\")\n region_sf = Region.from_fips(\"06075\")\n metrics_as = {\n CommonFields.ICU_BEDS: TimeseriesLiteral([0, 2, 4], provenance=\"pt_src1\"),\n CommonFields.CASES: [100, 200, 300],\n }\n metrics_sf = {\n CommonFields.DEATHS: TimeseriesLiteral([1, 2, None], provenance=\"pt_src2\"),\n CommonFields.CASES: [None, 210, 310],\n }\n dataset_in = test_helpers.build_dataset({region_as: metrics_as, region_sf: metrics_sf})\n\n dataset_in.write_to_dataset_pointer(pointer)\n\n # Compare written file with a string literal so a test fails if something changes in how the\n # file is written. The literal contains spaces to align the columns in the source.\n assert pointer.path_wide_dates().read_text() == (\n \" location_id,variable,demographic_bucket,provenance,2020-04-03,2020-04-02,2020-04-01\\n\"\n \"iso1:us#iso2:us-as, cases, all, , 300, 200, 100\\n\"\n \"iso1:us#iso2:us-as,icu_beds, all, pt_src1, 4, 2, 0\\n\"\n \"iso1:us#iso2:us-ca#fips:06075,cases, all, , 310, 210,\\n\"\n \"iso1:us#iso2:us-ca#fips:06075,deaths, all, pt_src2, , 2, 1\\n\"\n ).replace(\" \", \"\")\n\n dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)\n test_helpers.assert_dataset_like(dataset_read, dataset_in)\n\n # Check that a file without the demographic_bucket column (as written before\n # https://github.com/covid-projections/covid-data-model/pull/1021) can be read.\n pointer.path_wide_dates().write_text(\n \" location_id,variable,provenance,2020-04-03,2020-04-02,2020-04-01\\n\"\n \" iso1:us#iso2:us-as, cases, , 300, 200, 100\\n\"\n \" iso1:us#iso2:us-as,icu_beds, pt_src1, 4, 2, 0\\n\"\n \"iso1:us#iso2:us-ca#fips:06075, cases, , 310, 210,\\n\"\n \"iso1:us#iso2:us-ca#fips:06075, deaths, pt_src2, , 2, 1\\n\".replace(\n \" \", \"\"\n )\n )\n dataset_without_bucket_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)\n test_helpers.assert_dataset_like(dataset_without_bucket_read, dataset_in)\n\n\ndef test_write_read_wide_dates_csv_with_annotation(tmpdir):\n pointer = _make_dataset_pointer(tmpdir)\n\n region = Region.from_state(\"AS\")\n metrics = {\n CommonFields.ICU_BEDS: TimeseriesLiteral(\n [0, 2, 4],\n annotation=[\n test_helpers.make_tag(date=\"2020-04-01\"),\n test_helpers.make_tag(TagType.ZSCORE_OUTLIER, date=\"2020-04-02\"),\n ],\n ),\n CommonFields.CASES: [100, 200, 300],\n }\n dataset_in = test_helpers.build_dataset({region: metrics})\n\n dataset_in.write_to_dataset_pointer(pointer)\n dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)\n\n test_helpers.assert_dataset_like(dataset_read, dataset_in)\n\n\ndef test_write_read_dataset_pointer_with_provenance_list(tmpdir):\n pointer = _make_dataset_pointer(tmpdir)\n\n dataset_in = test_helpers.build_default_region_dataset(\n {\n CommonFields.ICU_BEDS: TimeseriesLiteral(\n [0, 2, 4],\n annotation=[\n test_helpers.make_tag(date=\"2020-04-01\"),\n test_helpers.make_tag(date=\"2020-04-02\"),\n ],\n provenance=[\"prov1\", \"prov2\"],\n ),\n CommonFields.CASES: [100, 200, 300],\n }\n )\n\n dataset_in.write_to_dataset_pointer(pointer)\n dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)\n\n test_helpers.assert_dataset_like(dataset_read, dataset_in)\n\n\ndef test_write_read_wide_with_buckets(tmpdir):\n pointer = _make_dataset_pointer(tmpdir)\n\n all_bucket = DemographicBucket(\"all\")\n age_20s = DemographicBucket(\"age:20-29\")\n age_30s = DemographicBucket(\"age:30-39\")\n region_as = Region.from_state(\"AS\")\n region_sf = Region.from_fips(\"06075\")\n metrics_as = {\n CommonFields.ICU_BEDS: TimeseriesLiteral(\n [0, 2, 4],\n annotation=[\n test_helpers.make_tag(date=\"2020-04-01\"),\n test_helpers.make_tag(TagType.ZSCORE_OUTLIER, date=\"2020-04-02\"),\n ],\n ),\n CommonFields.CASES: [100, 200, 300],\n }\n metrics_sf = {\n CommonFields.CASES: {\n age_20s: TimeseriesLiteral([3, 4, 5], source=taglib.Source(type=\"MySource\")),\n age_30s: [4, 5, 6],\n all_bucket: [1, 2, 3],\n }\n }\n dataset_in = test_helpers.build_dataset({region_as: metrics_as, region_sf: metrics_sf})\n\n dataset_in.write_to_dataset_pointer(pointer)\n dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)\n\n test_helpers.assert_dataset_like(dataset_read, dataset_in)\n\n\ndef test_timeseries_drop_stale_timeseries_entire_region():\n ds_in = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1,m2\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,,2\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,,3\\n\"\n \"iso1:us#cbsa:10100,,,,,3\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\\n\"\n \"iso1:us#fips:97111,,Bar County,county,4,\\n\"\n )\n )\n\n ds_out = ds_in.drop_stale_timeseries(pd.to_datetime(\"2020-04-04\"))\n\n ds_expected = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1,m2\\n\"\n \"iso1:us#cbsa:10100,,,,,3\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\\n\"\n \"iso1:us#fips:97111,,Bar County,county,4,\\n\"\n )\n )\n test_helpers.assert_dataset_like(ds_out, ds_expected)\n\n\ndef test_timeseries_drop_stale_timeseries_one_metric():\n csv_in = (\n \"location_id,date,county,aggregate_level,m1,m2\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,11,2\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,,3\\n\"\n \"iso1:us#cbsa:10100,,,,,3\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\\n\"\n \"iso1:us#fips:97111,,Bar County,county,4,\\n\"\n )\n ds_in = timeseries.MultiRegionDataset.from_csv(io.StringIO(csv_in)).add_provenance_csv(\n io.StringIO(\n \"location_id,variable,provenance\\n\"\n \"iso1:us#cbsa:10100,m1,m1-10100prov\\n\"\n \"iso1:us#cbsa:10100,m2,m2-10100prov\\n\"\n \"iso1:us#fips:97111,m1,m1-97111prov\\n\"\n )\n )\n\n ds_out = ds_in.drop_stale_timeseries(pd.to_datetime(\"2020-04-03\"))\n\n # The only timeseries that is stale with cutoff of 4/3 is the CBSA m1. The expected\n # dataset is the same as the input with \"11\" removed from the timeseries and\n # corresponding provenance removed.\n ds_expected = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(csv_in.replace(\",11,\", \",,\"))\n ).add_provenance_csv(\n io.StringIO(\n \"location_id,variable,provenance\\n\"\n \"iso1:us#cbsa:10100,m2,m2-10100prov\\n\"\n \"iso1:us#fips:97111,m1,m1-97111prov\\n\"\n )\n )\n test_helpers.assert_dataset_like(ds_out, ds_expected)\n\n\ndef test_timeseries_drop_stale_timeseries_with_tag():\n region = Region.from_state(\"TX\")\n values_recent = [100, 200, 300, 400]\n values_stale = [100, 200, None, None]\n ts_recent = TimeseriesLiteral(values_recent, annotation=[test_helpers.make_tag()])\n ts_stale = TimeseriesLiteral(values_stale, annotation=[test_helpers.make_tag()])\n\n dataset_in = test_helpers.build_dataset(\n {region: {CommonFields.CASES: ts_recent, CommonFields.DEATHS: ts_stale}}\n )\n\n dataset_out = dataset_in.drop_stale_timeseries(pd.to_datetime(\"2020-04-03\"))\n\n assert len(dataset_out.tag) == 1\n # drop_stale_timeseries preserves the empty DEATHS column so add it to dataset_expected\n dataset_expected = test_helpers.build_dataset(\n {region: {CommonFields.CASES: ts_recent}}, timeseries_columns=[CommonFields.DEATHS]\n )\n test_helpers.assert_dataset_like(dataset_out, dataset_expected)\n\n\ndef test_append_region_and_get_regions_subset_with_tag():\n region_tx = Region.from_state(\"TX\")\n region_sf = Region.from_fips(\"06075\")\n values = [100, 200, 300, 400]\n ts_with_tag = TimeseriesLiteral(values, annotation=[test_helpers.make_tag()])\n\n dataset_tx = test_helpers.build_dataset({region_tx: {CommonFields.CASES: ts_with_tag}})\n dataset_sf = test_helpers.build_dataset({region_sf: {CommonFields.CASES: ts_with_tag}})\n\n dataset_appended = dataset_tx.append_regions(dataset_sf)\n\n assert len(dataset_appended.tag) == 2\n dataset_tx_and_sf = test_helpers.build_dataset(\n {region_tx: {CommonFields.CASES: ts_with_tag}, region_sf: {CommonFields.CASES: ts_with_tag}}\n )\n test_helpers.assert_dataset_like(dataset_appended, dataset_tx_and_sf)\n\n dataset_out = dataset_tx_and_sf.get_regions_subset([region_tx])\n assert len(dataset_out.tag) == 1\n test_helpers.assert_dataset_like(dataset_out, dataset_tx)\n\n\ndef test_one_region_annotations():\n region_tx = Region.from_state(\"TX\")\n region_sf = Region.from_fips(\"06075\")\n values = [100, 200, 300, 400]\n tag1 = test_helpers.make_tag(date=\"2020-04-01\")\n tag2a = test_helpers.make_tag(date=\"2020-04-02\")\n tag2b = test_helpers.make_tag(date=\"2020-04-03\")\n\n dataset_tx_and_sf = test_helpers.build_dataset(\n {\n region_tx: {CommonFields.CASES: (TimeseriesLiteral(values, annotation=[tag1]))},\n region_sf: {CommonFields.CASES: (TimeseriesLiteral(values, annotation=[tag2a, tag2b]))},\n }\n )\n\n # get_one_region and iter_one_regions use separate code to split up the tags. Test both of them.\n one_region_tx = dataset_tx_and_sf.get_one_region(region_tx)\n assert one_region_tx.annotations_all_bucket(CommonFields.CASES) == [tag1]\n one_region_sf = dataset_tx_and_sf.get_one_region(region_sf)\n assert one_region_sf.annotations_all_bucket(CommonFields.CASES) == [\n tag2a,\n tag2b,\n ]\n assert set(one_region_sf.sources_all_bucket(CommonFields.CASES)) == set()\n\n assert {\n region: one_region_dataset.annotations_all_bucket(CommonFields.CASES)\n for region, one_region_dataset in dataset_tx_and_sf.iter_one_regions()\n } == {region_sf: [tag2a, tag2b], region_tx: [tag1],}\n\n\ndef test_one_region_empty_annotations():\n one_region = test_helpers.build_one_region_dataset({CommonFields.CASES: [100, 200, 300]})\n\n assert one_region.annotations_all_bucket(CommonFields.CASES) == []\n assert one_region.source_url == {}\n assert one_region.provenance == {}\n assert set(one_region.sources_all_bucket(CommonFields.ICU_BEDS)) == set()\n assert set(one_region.sources_all_bucket(CommonFields.CASES)) == set()\n\n\ndef test_one_region_tag_objects_series():\n values = [100, 200]\n tag1 = test_helpers.make_tag(TagType.ZSCORE_OUTLIER, date=\"2020-04-01\")\n tag2a = test_helpers.make_tag(date=\"2020-04-02\")\n tag2b = test_helpers.make_tag(date=\"2020-04-03\")\n\n one_region = test_helpers.build_one_region_dataset(\n {\n CommonFields.CASES: TimeseriesLiteral(values, annotation=[tag1]),\n CommonFields.ICU_BEDS: TimeseriesLiteral(values, provenance=\"prov1\"),\n CommonFields.DEATHS: TimeseriesLiteral(values, annotation=[tag2a, tag2b]),\n }\n )\n\n assert isinstance(one_region.tag_objects_series, pd.Series)\n assert one_region.tag.index.equals(one_region.tag_objects_series.index)\n assert set(one_region.tag_objects_series.reset_index().itertuples(index=False)) == {\n (CommonFields.CASES, DemographicBucket.ALL, tag1.tag_type, tag1),\n (\n CommonFields.ICU_BEDS,\n DemographicBucket.ALL,\n \"provenance\",\n taglib.ProvenanceTag(source=\"prov1\"),\n ),\n (CommonFields.DEATHS, DemographicBucket.ALL, tag2a.tag_type, tag2a),\n (CommonFields.DEATHS, DemographicBucket.ALL, tag2b.tag_type, tag2b),\n }\n\n\ndef test_one_region_tag_objects_series_empty():\n one_region = test_helpers.build_one_region_dataset({CommonFields.CASES: [1, 2, 3]})\n assert one_region.tag.empty\n assert isinstance(one_region.tag_objects_series, pd.Series)\n assert one_region.tag_objects_series.empty\n\n\ndef test_timeseries_tag_objects_series():\n values = [100, 200]\n tag1 = test_helpers.make_tag(TagType.ZSCORE_OUTLIER, date=\"2020-04-01\")\n tag2a = test_helpers.make_tag(date=\"2020-04-02\")\n tag2b = test_helpers.make_tag(date=\"2020-04-03\")\n url_str = UrlStr(\"http://foo.com/1\")\n source_obj = taglib.Source(\"source_with_url\", url=url_str)\n\n ds = test_helpers.build_default_region_dataset(\n {\n CommonFields.CASES: TimeseriesLiteral(values, annotation=[tag1]),\n CommonFields.ICU_BEDS: TimeseriesLiteral(values, source=source_obj),\n CommonFields.DEATHS: TimeseriesLiteral(values, annotation=[tag2a, tag2b]),\n CommonFields.TOTAL_TESTS: values,\n }\n )\n\n assert isinstance(ds.tag_objects_series, pd.Series)\n assert ds.tag.index.equals(ds.tag_objects_series.index)\n location_id = test_helpers.DEFAULT_REGION.location_id\n assert set(ds.tag_objects_series.reset_index().itertuples(index=False)) == {\n (location_id, CommonFields.CASES, DemographicBucket.ALL, tag1.tag_type, tag1),\n (location_id, CommonFields.ICU_BEDS, DemographicBucket.ALL, TagType.SOURCE, source_obj),\n (location_id, CommonFields.DEATHS, DemographicBucket.ALL, tag2a.tag_type, tag2a),\n (location_id, CommonFields.DEATHS, DemographicBucket.ALL, tag2b.tag_type, tag2b),\n }\n\n\ndef test_timeseries_latest_values():\n dataset = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1,m2\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,,2\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,10,3\\n\"\n \"iso1:us#cbsa:10100,2020-04-04,,,,1\\n\"\n \"iso1:us#cbsa:10100,,,,,4\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\\n\"\n \"iso1:us#fips:97111,,Bar County,county,5,\\n\"\n )\n )\n\n # Check bulk access via _timeseries_latest_values\n expected = pd.read_csv(\n io.StringIO(\"location_id,m1,m2\\n\" \"iso1:us#cbsa:10100,10,1\\n\" \"iso1:us#fips:97111,4,\\n\")\n )\n latest_from_timeseries = dataset._timeseries_latest_values().reset_index()\n pd.testing.assert_frame_equal(\n latest_from_timeseries, expected, check_like=True, check_dtype=False\n )\n\n # Check access to timeseries latests values via get_one_region\n region_10100 = dataset.get_one_region(Region.from_cbsa_code(\"10100\"))\n assert region_10100.latest == {\n \"aggregate_level\": \"cbsa\",\n \"county\": None,\n \"country\": \"USA\",\n \"fips\": \"10100\",\n \"state\": None,\n \"m1\": 10, # Derived from timeseries\n \"m2\": 4, # Explicitly in recent values\n }\n region_97111 = dataset.get_one_region(Region.from_fips(\"97111\"))\n assert region_97111.latest == {\n \"aggregate_level\": \"county\",\n \"county\": \"Bar County\",\n \"country\": \"USA\",\n \"fips\": \"97111\",\n \"state\": \"ZZ\",\n \"m1\": 5,\n \"m2\": None,\n }\n\n\ndef test_timeseries_latest_values_copied_to_static():\n dataset = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,t1,s1\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,,2\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,10,3\\n\"\n \"iso1:us#cbsa:10100,2020-04-04,,,,1\\n\"\n \"iso1:us#cbsa:10100,,,,,4\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\\n\"\n \"iso1:us#fips:97111,,Bar County,county,,\\n\"\n )\n )\n\n # Check access to latest values as copied to static\n t1 = FieldName(\"t1\")\n s1 = FieldName(\"s1\")\n dataset_t1_latest_in_static = dataset.latest_in_static(t1)\n assert dataset_t1_latest_in_static.static.loc[\"iso1:us#cbsa:10100\", t1] == 10\n assert dataset_t1_latest_in_static.static.loc[\"iso1:us#fips:97111\", t1] == 4\n\n # Trying to copy the latest values of s1 fails because s1 already has a real value in static.\n # See also longer comment where the ValueError is raised.\n with pytest.raises(ValueError):\n dataset.latest_in_static(s1)\n\n\ndef test_join_columns():\n ts_1 = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,\\n\"\n \"iso1:us#cbsa:10100,,,,\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4\\n\"\n \"iso1:us#fips:97111,,Bar County,county,4\\n\"\n )\n ).add_provenance_csv(\n io.StringIO(\n \"location_id,variable,provenance\\n\"\n \"iso1:us#cbsa:10100,m1,ts110100prov\\n\"\n \"iso1:us#fips:97111,m1,ts197111prov\\n\"\n )\n )\n ts_2 = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m2\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,2\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,3\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,\\n\"\n )\n ).add_provenance_csv(\n io.StringIO(\n \"location_id,variable,provenance\\n\"\n \"iso1:us#cbsa:10100,m2,ts110100prov\\n\"\n \"iso1:us#fips:97111,m2,ts197111prov\\n\"\n )\n )\n ts_expected = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1,m2\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,,2\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,,3\\n\"\n \"iso1:us#cbsa:10100,,,,,\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\\n\"\n \"iso1:us#fips:97111,,Bar County,county,4,\\n\"\n )\n ).add_provenance_csv(\n io.StringIO(\n \"location_id,variable,provenance\\n\"\n \"iso1:us#cbsa:10100,m1,ts110100prov\\n\"\n \"iso1:us#cbsa:10100,m2,ts110100prov\\n\"\n \"iso1:us#fips:97111,m1,ts197111prov\\n\"\n \"iso1:us#fips:97111,m2,ts197111prov\\n\"\n )\n )\n ts_joined = ts_1.join_columns(ts_2)\n test_helpers.assert_dataset_like(ts_joined, ts_expected, drop_na_latest=True)\n\n ts_joined = ts_2.join_columns(ts_1)\n test_helpers.assert_dataset_like(ts_joined, ts_expected, drop_na_latest=True)\n\n with pytest.raises(ValueError):\n # Raises because the same column is in both datasets\n ts_2.join_columns(ts_2)\n\n # geo attributes, such as aggregation level and county name, generally appear in geo-data.csv\n # instead of MultiRegionDataset so they don't need special handling in join_columns.\n\n\ndef test_join_columns_missing_regions():\n ts_1 = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,\\n\"\n \"iso1:us#cbsa:10100,,,,\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4\\n\"\n \"iso1:us#fips:97111,,Bar County,county,4\\n\"\n )\n )\n ts_2 = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m2\\n\" \"iso1:us#cbsa:10100,2020-04-02,,,2\\n\"\n )\n )\n ts_expected = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1,m2\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,,2\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,,\\n\"\n \"iso1:us#cbsa:10100,,,,,\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\\n\"\n \"iso1:us#fips:97111,,Bar County,county,4,\\n\"\n )\n )\n ts_joined = ts_1.join_columns(ts_2)\n test_helpers.assert_dataset_like(ts_joined, ts_expected, drop_na_latest=True)\n\n\ndef test_join_columns_with_buckets():\n m1 = FieldName(\"m1\")\n m2 = FieldName(\"m2\")\n age20s = DemographicBucket(\"age:20-29\")\n\n m1_data = {m1: {age20s: [1, 2, 3]}}\n ds_1 = test_helpers.build_default_region_dataset(m1_data)\n m2_data = {m2: {age20s: [4, 5, 6], DemographicBucket.ALL: [7, 8, 9]}}\n ds_2 = test_helpers.build_default_region_dataset(m2_data)\n\n with pytest.raises(ValueError):\n ds_1.join_columns(ds_1)\n\n ds_expected = test_helpers.build_default_region_dataset({**m1_data, **m2_data})\n\n ds_joined = ds_1.join_columns(ds_2)\n test_helpers.assert_dataset_like(ds_joined, ds_expected)\n\n\ndef test_join_columns_with_static():\n m1 = FieldName(\"m1\")\n m2 = FieldName(\"m2\")\n\n ds_1 = test_helpers.build_default_region_dataset({}, static={m1: 1})\n ds_2 = test_helpers.build_default_region_dataset({}, static={m2: 2})\n\n with pytest.raises(ValueError):\n ds_1.join_columns(ds_1)\n\n ds_expected = test_helpers.build_default_region_dataset({}, static={m1: 1, m2: 2})\n\n ds_joined = ds_1.join_columns(ds_2)\n test_helpers.assert_dataset_like(ds_joined, ds_expected)\n\n ds_joined = ds_2.join_columns(ds_1)\n test_helpers.assert_dataset_like(ds_joined, ds_expected)\n\n\ndef test_iter_one_region():\n ts = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,\\n\"\n \"iso1:us#cbsa:10100,,,,\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4\\n\"\n \"iso1:us#fips:97111,,Bar County,county,4\\n\"\n # 97222 does not have a row of latest data to make sure it still works\n \"iso1:us#fips:97222,2020-04-02,No Recent County,county,3\\n\"\n \"iso1:us#fips:97222,2020-04-04,No Recent County,county,5\\n\"\n )\n )\n assert {region.location_id for region, _ in ts.iter_one_regions()} == {\n \"iso1:us#cbsa:10100\",\n \"iso1:us#fips:97111\",\n \"iso1:us#fips:97222\",\n }\n for it_region, it_one_region in ts.iter_one_regions():\n one_region = ts.get_one_region(it_region)\n assert (one_region.data.fillna(\"\") == it_one_region.data.fillna(\"\")).all(axis=None)\n assert one_region.latest == it_one_region.latest\n assert one_region.provenance == it_one_region.provenance\n assert one_region.region == it_region\n assert one_region.region == it_one_region.region\n\n\ndef test_drop_regions_without_population():\n cbsa_with_pop = Region.from_cbsa_code(\"10100\")\n fips_with_pop = Region.from_fips(\"97111\")\n cbsa_without_pop = Region.from_cbsa_code(\"20300\")\n fips_without_pop = Region.from_fips(\"97222\")\n m1 = FieldName(\"m1\")\n regions_with_pop = [cbsa_with_pop, fips_with_pop]\n all_regions = regions_with_pop + [cbsa_without_pop, fips_without_pop]\n static_populations = {r: {CommonFields.POPULATION: 80_000} for r in regions_with_pop}\n ts_in = test_helpers.build_dataset(\n {r: {m1: [1]} for r in all_regions}, static_by_region_then_field_name=static_populations,\n )\n ts_expected = test_helpers.build_dataset(\n {r: {m1: [1]} for r in regions_with_pop},\n static_by_region_then_field_name=static_populations,\n )\n with structlog.testing.capture_logs() as logs:\n ts_out = timeseries.drop_regions_without_population(\n ts_in, [fips_without_pop.location_id], structlog.get_logger()\n )\n test_helpers.assert_dataset_like(ts_out, ts_expected)\n\n assert [l[\"event\"] for l in logs] == [\"Dropping unexpected regions without populaton\"]\n assert [l[\"location_ids\"] for l in logs] == [[cbsa_without_pop.location_id]]\n\n\ndef test_merge_provenance():\n ts = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,\\n\"\n \"iso1:us#cbsa:10100,,,,\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4\\n\"\n \"iso1:us#fips:97111,,Bar County,county,4\\n\"\n )\n ).add_provenance_csv(\n io.StringIO(\"location_id,variable,provenance\\n\" \"iso1:us#cbsa:10100,m1,ts110100prov\\n\")\n )\n\n with pytest.raises(NotImplementedError):\n ts.add_provenance_csv(\n io.StringIO(\"location_id,variable,provenance\\n\" \"iso1:us#fips:97111,m1,ts197111prov\\n\")\n )\n\n\ndef test_append_tags():\n region_sf = Region.from_fips(\"06075\")\n cases_values = [100, 200, 300, 400]\n metrics_sf = {\n CommonFields.POSITIVE_TESTS: TimeseriesLiteral([1, 2, 3, 4], provenance=\"pt_src2\"),\n CommonFields.CASES: cases_values,\n }\n dataset_in = test_helpers.build_dataset({region_sf: metrics_sf})\n tag_sf_cases = test_helpers.make_tag(TagType.CUMULATIVE_TAIL_TRUNCATED, date=\"2020-04-02\")\n tag_df = test_helpers.make_tag_df(\n region_sf, CommonFields.CASES, DemographicBucket.ALL, [tag_sf_cases]\n )\n dataset_out = dataset_in.append_tag_df(tag_df)\n metrics_sf[CommonFields.CASES] = TimeseriesLiteral(cases_values, annotation=[tag_sf_cases])\n dataset_expected = test_helpers.build_dataset({region_sf: metrics_sf})\n\n test_helpers.assert_dataset_like(dataset_out, dataset_expected)\n\n\ndef test_add_provenance_all_with_tags():\n \"\"\"Checks that add_provenance_all (and add_provenance_series that it calls) fails when tags\n already exist.\"\"\"\n region = Region.from_state(\"TX\")\n cases_values = [100, 200, 300, 400]\n timeseries = TimeseriesLiteral(cases_values, annotation=[(test_helpers.make_tag())])\n dataset_in = test_helpers.build_dataset({region: {CommonFields.CASES: timeseries}})\n\n with pytest.raises(NotImplementedError):\n dataset_in.add_provenance_all(\"prov_prov\")\n\n\ndef test_join_columns_with_tags():\n \"\"\"Checks that join_columns preserves tags.\"\"\"\n region = Region.from_state(\"TX\")\n cases_values = [100, 200, 300, 400]\n ts_lit = TimeseriesLiteral(cases_values, annotation=[test_helpers.make_tag()])\n dataset_cases = test_helpers.build_dataset({region: {CommonFields.CASES: ts_lit}})\n dataset_deaths = test_helpers.build_dataset({region: {CommonFields.DEATHS: ts_lit}})\n\n dataset_out = dataset_cases.join_columns(dataset_deaths)\n\n assert len(dataset_out.tag) == 2\n # The following checks that the tags in `ts_lit` have been preserved.\n dataset_expected = test_helpers.build_dataset(\n {region: {CommonFields.CASES: ts_lit, CommonFields.DEATHS: ts_lit}}\n )\n\n test_helpers.assert_dataset_like(dataset_out, dataset_expected)\n\n\ndef test_drop_column_with_tags():\n region = Region.from_state(\"TX\")\n cases_values = [100, 200, 300, 400]\n ts_lit = TimeseriesLiteral(cases_values, annotation=[test_helpers.make_tag()])\n\n dataset_in = test_helpers.build_dataset(\n {region: {CommonFields.CASES: ts_lit, CommonFields.DEATHS: ts_lit}}\n )\n\n dataset_out = dataset_in.drop_column_if_present(CommonFields.DEATHS)\n\n assert len(dataset_out.tag) == 1\n dataset_expected = test_helpers.build_dataset({region: {CommonFields.CASES: ts_lit}})\n test_helpers.assert_dataset_like(dataset_out, dataset_expected)\n\n\ndef test_drop_na_columns():\n tag = test_helpers.make_tag()\n timeseries_real = {\n CommonFields.CASES: TimeseriesLiteral([1, 2], annotation=[tag]),\n }\n static_real = {CommonFields.STAFFED_BEDS: 3}\n ds = test_helpers.build_default_region_dataset(\n # Adds CASES with real values, which won't be dropped, and a tag for DEATHS, that will be\n # dropped.\n {**timeseries_real, CommonFields.DEATHS: TimeseriesLiteral([], annotation=[tag])},\n static=static_real,\n )\n # The test_helper functions don't do a good job of creating fields that are all NA so the\n # following inserts time series DEATHS and static ICU_BEDS, then asserts that they were\n # inserted.\n timeseries_bucketed_with_na = ds.timeseries_bucketed.copy()\n timeseries_bucketed_with_na.loc[:, CommonFields.DEATHS] = np.nan\n static_with_na = ds.static.copy()\n static_with_na.loc[:, CommonFields.ICU_BEDS] = np.nan\n ds = dataclasses.replace(\n ds, timeseries_bucketed=timeseries_bucketed_with_na, static=static_with_na\n )\n assert CommonFields.DEATHS in ds.timeseries_bucketed.columns\n assert CommonFields.ICU_BEDS in ds.static.columns\n\n dataset_out = ds.drop_na_columns()\n\n dataset_expected = test_helpers.build_default_region_dataset(\n timeseries_real, static=static_real\n )\n test_helpers.assert_dataset_like(dataset_out, dataset_expected)\n\n\ndef test_drop_na_columns_no_tags():\n timeseries_real = {CommonFields.CASES: [1, 2]}\n tag = test_helpers.make_tag()\n ds = test_helpers.build_default_region_dataset(\n # Add a tag for DEATHS, that will be dropped.\n {**timeseries_real, CommonFields.DEATHS: TimeseriesLiteral([], annotation=[tag])}\n )\n # The test_helper functions don't do a good job of creating fields that are all NA so the\n # following inserts time series DEATHS and static ICU_BEDS, then asserts that they were\n # inserted.\n timeseries_bucketed_with_na = ds.timeseries_bucketed.copy()\n timeseries_bucketed_with_na.loc[:, CommonFields.DEATHS] = np.nan\n ds = dataclasses.replace(ds, timeseries_bucketed=timeseries_bucketed_with_na)\n assert CommonFields.DEATHS in ds.timeseries_bucketed.columns\n\n dataset_out = ds.drop_na_columns()\n\n dataset_expected = test_helpers.build_default_region_dataset(timeseries_real)\n test_helpers.assert_dataset_like(dataset_out, dataset_expected)\n\n\ndef test_drop_column_with_tags_and_bucket():\n age_40s = DemographicBucket(\"age:40-49\")\n ts_lit = TimeseriesLiteral([10, 20, 30], annotation=[test_helpers.make_tag()])\n data_cases = {CommonFields.CASES: {age_40s: ts_lit, DemographicBucket.ALL: ts_lit}}\n data_deaths = {CommonFields.DEATHS: {age_40s: ts_lit}}\n\n dataset_in = test_helpers.build_default_region_dataset({**data_cases, **data_deaths})\n assert len(dataset_in.tag) == 3\n\n dataset_out = dataset_in.drop_column_if_present(CommonFields.DEATHS)\n\n assert len(dataset_out.tag) == 2\n dataset_expected = test_helpers.build_default_region_dataset({**data_cases})\n test_helpers.assert_dataset_like(dataset_out, dataset_expected)\n\n\ndef test_timeseries_empty_timeseries_and_static():\n # Check that empty dataset creates a MultiRegionDataset\n # and that get_one_region raises expected exception.\n dataset = timeseries.MultiRegionDataset.new_without_timeseries()\n with pytest.raises(timeseries.RegionLatestNotFound):\n dataset.get_one_region(Region.from_fips(\"01001\"))\n\n\ndef test_timeseries_empty():\n # Check that empty geodata_timeseries_df creates a MultiRegionDataset\n # and that get_one_region raises expected exception.\n dataset = timeseries.MultiRegionDataset.from_timeseries_df(\n pd.DataFrame([], columns=[CommonFields.LOCATION_ID, CommonFields.DATE])\n )\n with pytest.raises(timeseries.RegionLatestNotFound):\n dataset.get_one_region(Region.from_fips(\"01001\"))\n\n\ndef test_timeseries_empty_static_not_empty():\n # Check that empty timeseries does not prevent static data working as expected.\n dataset = timeseries.MultiRegionDataset.from_timeseries_df(\n pd.DataFrame([], columns=[CommonFields.LOCATION_ID, CommonFields.DATE])\n ).add_static_values(pd.DataFrame([{\"location_id\": \"iso1:us#fips:97111\", \"m1\": 1234}]))\n assert dataset.get_one_region(Region.from_fips(\"97111\")).latest[\"m1\"] == 1234\n\n\ndef test_from_timeseries_df_fips_location_id_mismatch():\n df = test_helpers.read_csv_str(\n \" location_id, fips, date,m1\\n\"\n \"iso1:us#iso2:us-tx#fips:48197,48201,2020-04-02, 2\\n\"\n \"iso1:us#iso2:us-tx#fips:48201,48201,2020-04-02, 2\\n\",\n skip_spaces=True,\n )\n with pytest.warns(timeseries.ExtraColumnWarning, match=\"48201\"):\n timeseries.MultiRegionDataset.from_timeseries_df(df)\n\n\ndef test_from_timeseries_df_no_fips_no_warning():\n df = test_helpers.read_csv_str(\n \" location_id, fips, date,m1\\n\"\n \" iso1:us, ,2020-04-02, 2\\n\",\n skip_spaces=True,\n )\n timeseries.MultiRegionDataset.from_timeseries_df(df)\n\n\ndef test_from_timeseries_df_fips_state_mismatch():\n df = test_helpers.read_csv_str(\n \" location_id,state, date,m1\\n\"\n \"iso1:us#iso2:us-tx#fips:48197, TX,2020-04-02, 2\\n\"\n \"iso1:us#iso2:us-tx#fips:48201, IL,2020-04-02, 2\\n\",\n skip_spaces=True,\n )\n with pytest.warns(timeseries.ExtraColumnWarning, match=\"48201\"):\n timeseries.MultiRegionDataset.from_timeseries_df(df)\n\n\ndef test_from_timeseries_df_bad_level():\n df = test_helpers.read_csv_str(\n \" location_id, aggregate_level, date,m1\\n\"\n \"iso1:us#iso2:us-tx#fips:48201, county,2020-04-02, 2\\n\"\n \"iso1:us#iso2:us-tx#fips:48197, state,2020-04-02, 2\\n\"\n \" iso1:us#iso2:us-tx, state,2020-04-02, 2\\n\",\n skip_spaces=True,\n )\n with pytest.warns(timeseries.ExtraColumnWarning, match=\"48197\"):\n timeseries.MultiRegionDataset.from_timeseries_df(df)\n\n\ndef test_combined_timeseries():\n ds1 = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,2.2\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,3.3\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4\\n\"\n )\n ).add_provenance_csv(\n io.StringIO(\"location_id,variable,provenance\\n\" \"iso1:us#cbsa:10100,m1,ds110100prov\\n\")\n )\n ds2 = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,,,333\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,,,333\\n\"\n \"iso1:us#fips:97222,2020-04-03,Foo County,county,30\\n\"\n \"iso1:us#fips:97222,2020-04-04,Foo County,county,40\\n\"\n )\n ).add_provenance_csv(\n io.StringIO(\"location_id,variable,provenance\\n\" \"iso1:us#cbsa:10100,m1,ds110100prov\\n\")\n )\n combined = timeseries.combined_datasets({FieldName(\"m1\"): [ds1, ds2]}, {})\n expected = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,m1\\n\"\n \"iso1:us#cbsa:10100,2020-04-02,2.2\\n\"\n \"iso1:us#cbsa:10100,2020-04-03,3.3\\n\"\n \"iso1:us#fips:97111,2020-04-02,2\\n\"\n \"iso1:us#fips:97111,2020-04-04,4\\n\"\n \"iso1:us#fips:97222,2020-04-03,30\\n\"\n \"iso1:us#fips:97222,2020-04-04,40\\n\"\n )\n ).add_provenance_csv(\n io.StringIO(\"location_id,variable,provenance\\n\" \"iso1:us#cbsa:10100,m1,ds110100prov\\n\")\n )\n\n test_helpers.assert_dataset_like(expected, combined)\n\n\ndef test_combined_annotation():\n ts1a = TimeseriesLiteral(\n [0, 2, 4],\n annotation=[\n test_helpers.make_tag(date=\"2020-04-01\"),\n test_helpers.make_tag(date=\"2020-04-02\"),\n ],\n )\n ts1b = [100, 200, 300]\n ds1 = test_helpers.build_default_region_dataset(\n {CommonFields.ICU_BEDS: ts1a, CommonFields.CASES: ts1b}\n )\n ts2a = TimeseriesLiteral([1, 3, 5], annotation=[test_helpers.make_tag(date=\"2020-04-01\")],)\n ts2b = [150, 250, 350]\n ds2 = test_helpers.build_default_region_dataset(\n {CommonFields.ICU_BEDS: ts2a, CommonFields.CASES: ts2b}\n )\n combined = timeseries.combined_datasets(\n {CommonFields.ICU_BEDS: [ds1, ds2], CommonFields.CASES: [ds2, ds1]}, {}\n )\n\n expected = test_helpers.build_default_region_dataset(\n {CommonFields.ICU_BEDS: ts1a, CommonFields.CASES: ts2b}\n )\n\n test_helpers.assert_dataset_like(combined, expected)\n\n\ndef test_combined_missing_field():\n ts1 = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m1\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,2\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,4\\n\"\n )\n )\n ts2 = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,m2\\n\"\n \"iso1:us#fips:97111,2020-04-02,Bar County,county,111\\n\"\n \"iso1:us#fips:97111,2020-04-04,Bar County,county,111\\n\"\n )\n )\n # m1 is output, m2 is dropped.\n field_source_map = {FieldName(\"m1\"): [ts1, ts2]}\n\n # Check that combining finishes and produces the expected result.\n combined_1 = timeseries.combined_datasets(field_source_map, {})\n expected = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,m1\\n\"\n \"iso1:us#fips:97111,2020-04-02,2\\n\"\n \"iso1:us#fips:97111,2020-04-04,4\\n\"\n )\n )\n test_helpers.assert_dataset_like(expected, combined_1)\n\n # Because there is only one source for the output timeseries reversing the source list\n # produces the same output.\n combined_2 = timeseries.combined_datasets(\n {name: list(reversed(source_list)) for name, source_list in field_source_map.items()}, {}\n )\n test_helpers.assert_dataset_like(expected, combined_2)\n\n\ndef test_combined_static():\n ds1 = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,s1\\n\"\n \"iso1:us#cbsa:10100,,,,\\n\"\n \"iso1:us#fips:97222,,Foo County,county,22\\n\"\n )\n )\n ds2 = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,date,county,aggregate_level,s1\\n\"\n \"iso1:us#cbsa:10100,,,,111\\n\"\n \"iso1:us#fips:97222,,Foo County,county,222\\n\"\n )\n )\n combined = timeseries.combined_datasets({}, {FieldName(\"s1\"): [ds1, ds2]})\n expected = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\"location_id,date,s1\\n\" \"iso1:us#cbsa:10100,,111\\n\" \"iso1:us#fips:97222,,22\\n\")\n )\n\n test_helpers.assert_dataset_like(expected, combined, drop_na_timeseries=True)\n\n\ndef test_timeseries_rows():\n ts = timeseries.MultiRegionDataset.from_csv(\n io.StringIO(\n \"location_id,county,aggregate_level,date,m1,m2,population\\n\"\n \"iso1:us#iso2:us-tx,Texas,state,2020-04-01,4,2,\\n\"\n \"iso1:us#iso2:us-tx,Texas,state,2020-04-02,4,4,\\n\"\n \"iso1:us#iso2:us-tx,Texas,state,,,,2500\\n\"\n \"iso1:us#iso2:us-az,Arizona,state,2020-04-01,8,20,\\n\"\n \"iso1:us#iso2:us-az,Arizona,state,2020-04-02,12,40,\\n\"\n \"iso1:us#iso2:us-az,Arizona,state,,,,7500\\n\"\n )\n )\n\n rows = ts.timeseries_rows()\n expected = test_helpers.read_csv_str(\n \" location_id,variable,demographic_bucket,2020-04-02,2020-04-01\\n\"\n \"iso1:us#iso2:us-az, m1, all, 12, 8\\n\"\n \"iso1:us#iso2:us-az, m2, all, 40, 20\\n\"\n \"iso1:us#iso2:us-tx, m1, all, 4, 4\\n\"\n \"iso1:us#iso2:us-tx, m2, all, 4, 2\\n\",\n skip_spaces=True,\n ).set_index([CommonFields.LOCATION_ID, PdFields.VARIABLE, PdFields.DEMOGRAPHIC_BUCKET])\n pd.testing.assert_frame_equal(rows, expected, check_dtype=False, check_exact=False)\n\n\ndef test_multi_region_dataset_get_subset():\n region_us = Region.from_iso1(\"us\")\n region_tx = Region.from_state(\"TX\")\n region_county = Region.from_fips(\"97222\")\n region_cbsa = Region.from_cbsa_code(\"10100\")\n m1 = FieldName(\"m1\")\n m2 = FieldName(\"m2\")\n ds = test_helpers.build_dataset(\n {\n region_us: {m1: [100], m2: [200]},\n region_tx: {m1: [4], m2: [2]},\n region_county: {m1: [1], m2: [2]},\n region_cbsa: {m1: [1], m2: [2], CommonFields.POPULATION: [20_000]},\n },\n static_by_region_then_field_name={\n region_us: {CommonFields.POPULATION: 10_000},\n region_tx: {CommonFields.POPULATION: 5_000},\n region_county: {CommonFields.POPULATION: 1_000},\n },\n )\n\n subset = ds.get_subset(aggregation_level=AggregationLevel.COUNTRY)\n assert subset.static.at[\"iso1:us\", CommonFields.POPULATION] == 10000\n\n subset = ds.get_subset(fips=\"97222\")\n assert subset.timeseries.at[(\"iso1:us#fips:97222\", \"2020-04-01\"), \"m2\"] == 2\n\n subset = ds.get_subset(state=\"TX\")\n assert subset.static.at[\"iso1:us#iso2:us-tx\", CommonFields.POPULATION] == 5000\n\n subset = ds.get_subset(states=[\"TX\"])\n assert subset.static.at[\"iso1:us#iso2:us-tx\", CommonFields.POPULATION] == 5000\n\n subset = ds.get_subset(location_id_matches=r\"\\A(iso1\\:us|iso1\\:us\\#cbsa.+)\\Z\")\n assert {r.location_id for r, _ in subset.iter_one_regions()} == {\n \"iso1:us\",\n \"iso1:us#cbsa:10100\",\n }\n\n\ndef test_multi_region_dataset_get_subset_with_buckets():\n # Make some regions at different levels\n region_us = Region.from_iso1(\"us\")\n region_tx = Region.from_state(\"TX\")\n region_la = Region.from_fips(\"06037\")\n age_40s = DemographicBucket(\"age:40-49\")\n data_us = {region_us: {CommonFields.CASES: [100, 200]}}\n data_tx = {region_tx: {CommonFields.CASES: [10, 20]}}\n data_la = {region_la: {CommonFields.CASES: {DemographicBucket.ALL: [5, 10], age_40s: [1, 2]}}}\n ds = test_helpers.build_dataset({**data_us, **data_tx, **data_la})\n\n ds_expected = test_helpers.build_dataset({**data_us, **data_la})\n test_helpers.assert_dataset_like(ds.get_regions_subset([region_us, region_la]), ds_expected)\n test_helpers.assert_dataset_like(ds.partition_by_region(exclude=[region_tx])[0], ds_expected)\n\n\ndef test_write_read_dataset_pointer_with_source_url(tmpdir):\n pointer = _make_dataset_pointer(tmpdir)\n url_str1 = UrlStr(\"http://foo.com/1\")\n url_str2 = UrlStr(\"http://foo.com/2\")\n url_str3 = UrlStr(\"http://foo.com/3\")\n\n ts1a = TimeseriesLiteral(\n [0, 2, 4],\n annotation=[\n test_helpers.make_tag(date=\"2020-04-01\"),\n test_helpers.make_tag(date=\"2020-04-02\"),\n ],\n source_url=url_str1,\n )\n ts1b = TimeseriesLiteral([100, 200, 300], source_url=[url_str2, url_str3])\n dataset_in = test_helpers.build_default_region_dataset(\n {CommonFields.ICU_BEDS: ts1a, CommonFields.CASES: ts1b}\n )\n\n dataset_in.write_to_dataset_pointer(pointer)\n\n dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)\n\n test_helpers.assert_dataset_like(dataset_read, dataset_in)\n source_url_read = dataset_read.get_one_region(test_helpers.DEFAULT_REGION).source_url\n assert source_url_read[CommonFields.ICU_BEDS] == [url_str1]\n # Copy to a set because the order of the URLs in the source_url may change.\n assert set(source_url_read[CommonFields.CASES]) == {url_str2, url_str3}\n\n\ndef test_pickle():\n ts = TimeseriesLiteral(\n [0, 2, 4],\n annotation=[\n test_helpers.make_tag(date=\"2020-04-01\"),\n test_helpers.make_tag(date=\"2020-04-02\"),\n ],\n source_url=UrlStr(\"http://public.com\"),\n )\n ds_in = test_helpers.build_default_region_dataset({CommonFields.CASES: ts})\n\n ds_out = pickle.loads(pickle.dumps(ds_in))\n\n test_helpers.assert_dataset_like(ds_in, ds_out)\n\n\ndef test_make_source_tags():\n url_str = UrlStr(\"http://foo.com/1\")\n\n ts_prov_only = TimeseriesLiteral(\n [0, 2, 4], annotation=[test_helpers.make_tag(date=\"2020-04-01\"),], provenance=\"prov_only\",\n )\n ts_with_url = TimeseriesLiteral([3, 5, 7], provenance=\"prov_with_url\", source_url=url_str)\n dataset_in = test_helpers.build_default_region_dataset(\n {CommonFields.ICU_BEDS: ts_prov_only, CommonFields.CASES: ts_with_url}\n )\n\n dataset_out = timeseries.make_source_tags(dataset_in)\n\n source_tag_prov_only = taglib.Source(\"prov_only\")\n ts_prov_only_expected = TimeseriesLiteral(\n [0, 2, 4],\n annotation=[test_helpers.make_tag(date=\"2020-04-01\"),],\n source=source_tag_prov_only,\n )\n source_tag_prov_with_url = taglib.Source(\"prov_with_url\", url=url_str)\n ts_with_url_expected = TimeseriesLiteral([3, 5, 7], source=source_tag_prov_with_url,)\n dataset_expected = test_helpers.build_default_region_dataset(\n {CommonFields.ICU_BEDS: ts_prov_only_expected, CommonFields.CASES: ts_with_url_expected}\n )\n test_helpers.assert_dataset_like(dataset_out, dataset_expected)\n\n one_region = dataset_out.get_one_region(test_helpers.DEFAULT_REGION)\n assert one_region.sources_all_bucket(CommonFields.ICU_BEDS) == [source_tag_prov_only]\n assert one_region.sources_all_bucket(CommonFields.CASES) == [source_tag_prov_with_url]\n\n\ndef test_make_source_tags_no_urls():\n # There was a bug where `./run.py data update` failed at the very end when no timeseries had\n # a source_url. This tests for it.\n ts_prov_only = TimeseriesLiteral(\n [0, 2, 4], annotation=[test_helpers.make_tag(date=\"2020-04-01\"),], provenance=\"prov_only\",\n )\n dataset_in = test_helpers.build_default_region_dataset({CommonFields.ICU_BEDS: ts_prov_only})\n\n dataset_out = timeseries.make_source_tags(dataset_in)\n\n source_tag_prov_only = taglib.Source(\"prov_only\")\n ts_prov_only_expected = TimeseriesLiteral(\n [0, 2, 4],\n annotation=[test_helpers.make_tag(date=\"2020-04-01\"),],\n source=source_tag_prov_only,\n )\n dataset_expected = test_helpers.build_default_region_dataset(\n {CommonFields.ICU_BEDS: ts_prov_only_expected}\n )\n test_helpers.assert_dataset_like(dataset_out, dataset_expected)\n\n one_region = dataset_out.get_one_region(test_helpers.DEFAULT_REGION)\n assert one_region.sources_all_bucket(CommonFields.ICU_BEDS) == [source_tag_prov_only]\n\n\ndef test_make_source_url_tags():\n url_str = UrlStr(\"http://foo.com/1\")\n\n source_tag_prov_only = taglib.Source(\"prov_only\")\n ts_prov_only = TimeseriesLiteral(\n [0, 2, 4],\n annotation=[test_helpers.make_tag(date=\"2020-04-01\"),],\n source=source_tag_prov_only,\n )\n source_tag_prov_with_url = taglib.Source(\"prov_with_url\", url=url_str)\n ts_with_url = TimeseriesLiteral([3, 5, 7], source=source_tag_prov_with_url,)\n dataset_in = test_helpers.build_default_region_dataset(\n {CommonFields.ICU_BEDS: ts_prov_only, CommonFields.CASES: ts_with_url}\n )\n\n dataset_out = timeseries.make_source_url_tags(dataset_in)\n\n ts_with_url_expected = TimeseriesLiteral(\n [3, 5, 7], source=source_tag_prov_with_url, source_url=url_str\n )\n dataset_expected = test_helpers.build_default_region_dataset(\n {CommonFields.ICU_BEDS: ts_prov_only, CommonFields.CASES: ts_with_url_expected}\n )\n test_helpers.assert_dataset_like(dataset_out, dataset_expected)\n\n\ndef test_make_source_url_tags_no_source_tags():\n dataset_in = test_helpers.build_default_region_dataset({CommonFields.CASES: [1, 2, 3]})\n dataset_out = timeseries.make_source_url_tags(dataset_in)\n assert dataset_in == dataset_out\n\n\ndef test_make_source_url_tags_has_source_url():\n url_str = UrlStr(\"http://foo.com/1\")\n dataset_in = test_helpers.build_default_region_dataset(\n {CommonFields.CASES: TimeseriesLiteral([1, 2, 3], source_url=url_str)}\n )\n with pytest.raises(AssertionError):\n timeseries.make_source_url_tags(dataset_in)\n\n\ndef test_check_timeseries_structure_empty():\n timeseries._check_timeseries_wide_vars_structure(\n timeseries.EMPTY_TIMESERIES_WIDE_VARIABLES_DF, bucketed=False\n )\n timeseries._check_timeseries_wide_vars_structure(\n timeseries.EMPTY_TIMESERIES_BUCKETED_WIDE_VARIABLES_DF, bucketed=True\n )\n\n\ndef test_make_and_pickle_demographic_data():\n location_id = test_helpers.DEFAULT_REGION.location_id\n date_0 = test_helpers.DEFAULT_START_DATE\n date_1 = pd.to_datetime(test_helpers.DEFAULT_START_DATE) + pd.to_timedelta(1, unit=\"day\")\n m1 = FieldName(\"m1\")\n age20s = DemographicBucket(\"age:20-29\")\n age30s = DemographicBucket(\"age:30-39\")\n all = DemographicBucket(\"all\")\n\n ds = test_helpers.build_default_region_dataset(\n {m1: {age20s: [1, 2, 3], age30s: [5, 6, 7], all: [8, 9, None]}}\n )\n\n assert ds.timeseries_bucketed.at[(location_id, age30s, date_0), m1] == 5\n assert ds.timeseries_bucketed.at[(location_id, all, date_1), m1] == 9\n\n ds_unpickled = pickle.loads(pickle.dumps(ds))\n\n test_helpers.assert_dataset_like(ds, ds_unpickled)\n\n\ndef test_combine_demographic_data_basic():\n m1 = FieldName(\"m1\")\n age20s = DemographicBucket(\"age:20-29\")\n age30s = DemographicBucket(\"age:30-39\")\n age40s = DemographicBucket(\"age:40-49\")\n ds1 = test_helpers.build_default_region_dataset(\n {m1: {age20s: [21, 22, 23], age30s: [31, 32, 33],}}\n )\n ds2 = test_helpers.build_default_region_dataset(\n {m1: {age30s: [32, 33, 34], age40s: [42, 43, 44],}}\n )\n\n combined = timeseries.combined_datasets({m1: [ds1, ds2]}, {})\n test_helpers.assert_dataset_like(combined, ds1)\n\n combined = timeseries.combined_datasets({m1: [ds2, ds1]}, {})\n test_helpers.assert_dataset_like(combined, ds2)\n\n\ndef test_combine_demographic_data_multiple_distributions():\n \"\"\"All time-series within a variable are treated as a unit when combining\"\"\"\n m1 = FieldName(\"m1\")\n m2 = FieldName(\"m2\")\n all = DemographicBucket(\"all\")\n age_20s = DemographicBucket(\"age:20-29\")\n age_30s = DemographicBucket(\"age:30-39\")\n region_ak = Region.from_state(\"AK\")\n region_ca = Region.from_state(\"CA\")\n\n ds1 = test_helpers.build_dataset(\n {\n region_ak: {m1: {all: TimeseriesLiteral([1.1, 2.1], provenance=\"ds1_ak_m1_all\")}},\n region_ca: {m1: {age_20s: TimeseriesLiteral([2.1, 3.1], provenance=\"ds1_ca_m1_20s\")}},\n }\n )\n\n ds2 = test_helpers.build_dataset(\n {\n region_ak: {m1: {all: TimeseriesLiteral([1, 2], provenance=\"ds2_ak_m1_all\")}},\n region_ca: {\n m1: {\n age_30s: TimeseriesLiteral([3, 4], provenance=\"ds2_ca_m1_30s\"),\n all: TimeseriesLiteral([5, 6], provenance=\"ds2_ca_m1_all\"),\n },\n m2: {age_30s: TimeseriesLiteral([6, 7], provenance=\"ds2_ca_m2_30s\")},\n },\n }\n )\n\n combined = timeseries.combined_datasets({m1: [ds1, ds2], m2: [ds1, ds2]}, {})\n\n ds_expected = test_helpers.build_dataset(\n {\n region_ak: {m1: {all: TimeseriesLiteral([1.1, 2.1], provenance=\"ds1_ak_m1_all\")}},\n region_ca: {\n m1: {\n age_20s: TimeseriesLiteral([2.1, 3.1], provenance=\"ds1_ca_m1_20s\"),\n all: TimeseriesLiteral([5, 6], provenance=\"ds2_ca_m1_all\"),\n },\n m2: {age_30s: TimeseriesLiteral([6, 7], provenance=\"ds2_ca_m2_30s\")},\n },\n }\n )\n test_helpers.assert_dataset_like(combined, ds_expected)\n\n\ndef test_bucketed_latest_missing_location_id(nyc_region: Region):\n dataset = test_helpers.build_default_region_dataset({CommonFields.CASES: [1, 2, 3]})\n # nyc_region = Region.from_fips(\"97222\")\n output = dataset._bucketed_latest_for_location_id(nyc_region.location_id)\n expected = pd.DataFrame(\n [],\n index=pd.MultiIndex.from_tuples([], names=[PdFields.DEMOGRAPHIC_BUCKET]),\n columns=pd.Index([CommonFields.CASES], name=\"variable\"),\n dtype=\"float\",\n )\n pd.testing.assert_frame_equal(expected, output)\n\n\ndef test_bucketed_latest(nyc_region: Region):\n m1 = FieldName(\"m1\")\n age20s = DemographicBucket(\"age:20-29\")\n age30s = DemographicBucket(\"age:30-39\")\n\n dataset = test_helpers.build_default_region_dataset(\n {m1: {age20s: [21, 22, 23], age30s: [31, 32, 33],}}\n )\n bucketed_latest = dataset._bucketed_latest_for_location_id(\n test_helpers.DEFAULT_REGION.location_id\n )\n expected = pd.DataFrame(\n [{\"m1\": 23}, {\"m1\": 33}],\n index=pd.Index([age20s, age30s], name=PdFields.DEMOGRAPHIC_BUCKET),\n columns=pd.Index([m1], name=\"variable\"),\n )\n pd.testing.assert_frame_equal(bucketed_latest, expected)\n\n\ndef test_one_region_demographic_distributions():\n m1 = FieldName(\"m1\")\n age20s = DemographicBucket(\"age:20-29\")\n age30s = DemographicBucket(\"age:30-39\")\n dataset = test_helpers.build_default_region_dataset(\n {m1: {age20s: [21, 22, 23], age30s: [31, 32, 33], DemographicBucket.ALL: [20, 21, 22]}}\n )\n one_region = dataset.get_one_region(test_helpers.DEFAULT_REGION)\n\n expected = {m1: {\"age\": {\"20-29\": 23, \"30-39\": 33}}}\n assert one_region.demographic_distributions_by_field == expected\n\n\ndef test_one_region_demographic_distributions_overlapping_buckets():\n m1 = FieldName(\"m1\")\n m2 = FieldName(\"m2\")\n age20s = DemographicBucket(\"age:20-29\")\n age30s = DemographicBucket(\"age:30-39\")\n # Presumably 25 to 29 is from a different age distribution as it overlaps with age bucket above.\n # Make sure that different age bucketing doesn't polute other variables.\n age25to29 = DemographicBucket(\"age:25-29\")\n\n dataset = test_helpers.build_default_region_dataset(\n {\n m1: {age20s: [21, 22, 23], age30s: [31, 32, 33], DemographicBucket.ALL: [20, 21, 22]},\n m2: {DemographicBucket.ALL: [20, 21, 22], age25to29: [20, 21, 22]},\n },\n )\n one_region = dataset.get_one_region(test_helpers.DEFAULT_REGION)\n expected = {m1: {\"age\": {\"20-29\": 23, \"30-39\": 33}}, m2: {\"age\": {\"25-29\": 22}}}\n\n assert one_region.demographic_distributions_by_field == expected\n\n\ndef test_print_stats():\n all_bucket = DemographicBucket(\"all\")\n age_20s = DemographicBucket(\"age:20-29\")\n age_30s = DemographicBucket(\"age:30-39\")\n\n test_helpers.build_default_region_dataset(\n {\n CommonFields.ICU_BEDS: TimeseriesLiteral(\n [0, 2, 4], annotation=[test_helpers.make_tag(date=\"2020-04-01\"),],\n ),\n CommonFields.CASES: [100, 200, 300],\n }\n ).print_stats(\"DS1\")\n\n test_helpers.build_default_region_dataset(\n {\n CommonFields.CASES: {\n age_20s: TimeseriesLiteral([3, 4, 5], source=taglib.Source(type=\"MySource\")),\n age_30s: [4, 5, 6],\n all_bucket: [1, 2, 3],\n }\n }\n ).print_stats(\"DS2\")\n\n\ndef test_static_and_geo_data():\n region_chi = Region.from_fips(\"17031\")\n ds = test_helpers.build_default_region_dataset(\n {CommonFields.CASES: [0]}, static={CommonFields.POPULATION: 5}, region=region_chi\n )\n assert ds.static_and_geo_data.loc[region_chi.location_id, CommonFields.COUNTY] == \"Cook County\"\n assert ds.static_and_geo_data.loc[region_chi.location_id, CommonFields.POPULATION] == 5\n\n\ndef test_add_tag_all_bucket():\n region_tx = Region.from_state(\"TX\")\n region_la = Region.from_fips(\"06037\")\n age_40s = DemographicBucket(\"age:40-49\")\n data_tx = {region_tx: {CommonFields.CASES: [10, 20]}}\n data_la = {region_la: {CommonFields.CASES: {DemographicBucket.ALL: [5, 10], age_40s: [1, 2]}}}\n\n tag = test_helpers.make_tag(date=\"2020-04-01\")\n ds = test_helpers.build_dataset({**data_tx, **data_la}).add_tag_all_bucket(tag)\n\n expected_tx = {region_tx: {CommonFields.CASES: TimeseriesLiteral([10, 20], annotation=[tag])}}\n expected_la = {\n region_la: {\n CommonFields.CASES: {\n DemographicBucket.ALL: TimeseriesLiteral([5, 10], annotation=[tag]),\n age_40s: [1, 2],\n }\n }\n }\n ds_expected = test_helpers.build_dataset({**expected_tx, **expected_la})\n test_helpers.assert_dataset_like(ds, ds_expected)\n\n\ndef test_add_tag_without_timeseries(tmpdir):\n \"\"\"Create a dataset with a tag for a timeseries that doesn't exist.\"\"\"\n pointer = _make_dataset_pointer(tmpdir)\n\n region_tx = Region.from_state(\"TX\")\n region_la = Region.from_fips(\"06037\")\n data_tx = {region_tx: {CommonFields.CASES: [10, 20]}}\n\n tag_collection = taglib.TagCollection()\n tag = test_helpers.make_tag(date=\"2020-04-01\")\n tag_collection.add(\n tag,\n location_id=region_la.location_id,\n variable=CommonFields.CASES,\n bucket=DemographicBucket.ALL,\n )\n\n dataset = test_helpers.build_dataset({**data_tx}).append_tag_df(tag_collection.as_dataframe())\n\n # Check that the tag was created for region_la, which doesn't have any timeseries data.\n assert set(\n dataset.tag_objects_series.xs(region_la.location_id, level=CommonFields.LOCATION_ID)\n ) == {tag}\n\n # Check that tag location_id are included in location_ids property.\n assert set(dataset.location_ids) == {region_la.location_id, region_tx.location_id}\n\n # Check that the tag still exists after writing and reading from disk.\n dataset.write_to_dataset_pointer(pointer)\n dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)\n test_helpers.assert_dataset_like(dataset, dataset_read)\n\n\ndef test_variables():\n # Make a dataset with CASES, DEATHS and ICU_BEDS each appearing in only one of timeseries,\n # static and tag data. This make sure variable names are merged from all three places.\n region_97111 = Region.from_fips(\"97111\")\n tag_collection = taglib.TagCollection()\n tag_collection.add(\n test_helpers.make_tag(),\n location_id=region_97111.location_id,\n variable=CommonFields.DEATHS,\n bucket=DemographicBucket.ALL,\n )\n ds = test_helpers.build_dataset(\n {region_97111: {CommonFields.CASES: [1, 2, None]}},\n static_by_region_then_field_name={region_97111: {CommonFields.ICU_BEDS: 10}},\n ).append_tag_df(tag_collection.as_dataframe())\n assert set(ds.variables.to_list()) == {\n CommonFields.CASES,\n CommonFields.ICU_BEDS,\n CommonFields.DEATHS,\n }\n\n\ndef test_variables_empty():\n assert timeseries.MultiRegionDataset.new_without_timeseries().variables.to_list() == []\n\n\ndef test_static_long():\n region_cbsa = Region.from_cbsa_code(\"10100\")\n region_fips = Region.from_fips(\"97111\")\n m1 = FieldName(\"m1\")\n ds = test_helpers.build_dataset(\n {},\n static_by_region_then_field_name={\n region_fips: {CommonFields.CAN_LOCATION_PAGE_URL: \"http://can.do\", m1: 4},\n region_cbsa: {CommonFields.CASES: 3},\n },\n )\n # Use loc[level0].at[level1] as work-around for\n # https://github.com/pandas-dev/pandas/issues/26989\n # TODO(tom): Change to `at[level0, level1]` after upgrading to Pandas >=1.1\n assert (\n ds.static_long.loc[region_fips.location_id].at[CommonFields.CAN_LOCATION_PAGE_URL]\n == \"http://can.do\"\n )\n assert ds.static_long.loc[region_fips.location_id].at[m1] == 4\n assert ds.static_long.loc[region_cbsa.location_id].at[CommonFields.CASES] == 3\n\n ds_empty_static = timeseries.MultiRegionDataset.new_without_timeseries()\n assert ds_empty_static.static_long.empty\n assert ds_empty_static.static_long.name == ds.static_long.name\n assert ds_empty_static.static_long.index.names == ds.static_long.index.names\n\n\ndef test_delta_timeseries_removed():\n # This tests time series being removed only, not tags or static values.\n region_tx = Region.from_state(\"TX\")\n region_la = Region.from_fips(\"06037\")\n age_40s = DemographicBucket(\"age:40-49\")\n data_tx = {region_tx: {CommonFields.CASES: [10, 20]}}\n data_la_a = {region_la: {CommonFields.CASES: {DemographicBucket.ALL: [5, 10], age_40s: [1, 2]}}}\n\n ds_a = test_helpers.build_dataset({**data_tx, **data_la_a})\n\n data_la_b = {region_la: {CommonFields.CASES: {DemographicBucket.ALL: [5, 10]}}}\n ds_b = test_helpers.build_dataset({**data_tx, **data_la_b})\n\n delta = timeseries.MultiRegionDatasetDiff(old=ds_a, new=ds_b)\n ds_out = delta.timeseries_removed\n\n ds_expected = test_helpers.build_dataset({region_la: {CommonFields.CASES: {age_40s: [1, 2]}}})\n\n test_helpers.assert_dataset_like(ds_out, ds_expected)\n\n\ndef test_drop_observations_after():\n age_40s = DemographicBucket(\"age:40-49\")\n ds_in = test_helpers.build_default_region_dataset(\n {\n CommonFields.CASES: {DemographicBucket.ALL: [5, 10], age_40s: [1, 2, 3]},\n # Check that observation is dropped even when not a True value (ie 0).\n CommonFields.DEATHS: [0, 0, 0],\n # Check what happens when there are no real valued observations after dropping,\n # though the behaviour probably doesn't matter.\n CommonFields.ICU_BEDS: [None, None, 10],\n }\n )\n\n ds_out = timeseries.drop_observations(ds_in, after=datetime.date(2020, 4, 2))\n\n tag = test_helpers.make_tag(taglib.TagType.DROP_FUTURE_OBSERVATION, after=\"2020-04-02\")\n ds_expected = test_helpers.build_default_region_dataset(\n {\n CommonFields.CASES: {\n DemographicBucket.ALL: [5, 10],\n age_40s: TimeseriesLiteral([1, 2], annotation=[tag]),\n },\n CommonFields.DEATHS: TimeseriesLiteral([0, 0], annotation=[tag]),\n CommonFields.ICU_BEDS: TimeseriesLiteral([], annotation=[tag]),\n }\n )\n\n test_helpers.assert_dataset_like(ds_out, ds_expected)\n\n\ndef test_pickle_test_dataset_size(tmp_path: pathlib.Path):\n pkl_path = tmp_path / \"testfile.pkl.gz\"\n test_dataset = test_helpers.load_test_dataset()\n test_dataset.get_timeseries_not_bucketed_wide_dates(CommonFields.CASES)\n test_dataset.to_compressed_pickle(pkl_path)\n assert pkl_path.stat().st_size < 800_000\n\n loaded_dataset = timeseries.MultiRegionDataset.from_compressed_pickle(pkl_path)\n\n test_helpers.assert_dataset_like(test_dataset, loaded_dataset)\n" ]
[ [ "pandas.to_datetime", "pandas.testing.assert_series_equal", "pandas.Series", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "pandas.Index", "pandas.testing.assert_frame_equal", "pandas.to_timedelta" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
amanvell/faro
[ "2c4e5b86406937e1dd3fa9f339cfbca2325d98d6" ]
[ "src/faro/FaceWorker.py" ]
[ "'''\nMIT License\n\nCopyright 2019 Oak Ridge National Laboratory\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\nCreated on Feb 5, 2019\n\n@author: bolme\n'''\n\nimport faro.proto.face_service_pb2 as fsd\nimport faro.proto.geometry_pb2 as geo\nimport faro.proto.proto_types as pt\nimport numpy as np\nimport scipy as sp\nimport scipy.spatial as spat \n\n# Standard scores can be computed by the client which may offer \n# performance benefits. In some cases scores can only be computed \n# on the server.\nSCORE_L1 = \"SCORE_L1\" # L1 Distance / Cityblock\nSCORE_L2 = \"SCORE_L2\" # Euclidean\nSCORE_DOT = \"SCORE_DOT\" # Simple dot product.\nSCORE_SERVER = \"SCORE_SERVER\" # A non-standard, custom, or proprietary score / Requires computation on the server.\n\nSTATUS_READY = \"STATUS_READY\"\n\n\nclass FaceWorker(object):\n '''\n Workers handle requests for one process in a multiprocessing system.\n \n In general the methods will be called in the order: detect, locate, align, \n extract, and score. Not all of these methods need to do something. Some\n deep learning algorithms do not require alignment for example. Also, in \n some cases detection, location, and alignment might all occur together.\n In that case it should be implemented in detect and the other methods will\n do nothing but may still be called to maintain a consistant interface.\n \n Scores are assumed to be distances where smaller indicates a better match.\n '''\n\n\n def __init__(self, options):\n '''\n Constructor\n '''\n \n def detect(self):\n '''Run a face detector and return rectangles.'''\n raise NotImplementedError(\"Abstract Method Called.\")\n \n def locate(self):\n '''Locate facial features.'''\n raise NotImplementedError(\"Abstract Method Called.\")\n \n def align(self):\n '''Align the images to a standard size and orientation to allow \n recognition.'''\n raise NotImplementedError(\"Abstract Method Called.\")\n \n def extract(self):\n '''Extract a template that allows the face to be matched.'''\n raise NotImplementedError(\"Abstract Method Called.\")\n \n def score(self,score_request):\n '''Compare templates to produce scores.'''\n score_type = self.scoreType()\n result = geo.Matrix()\n \n # Check that this is a known score type\n if score_type not in [fsd.L1,fsd.L2,fsd.NEG_DOT]:\n raise NotImplementedError(\"Score type <%s> not implemented.\"%(score_type,))\n \n # Check to make sure the probe and gallery records are correct\n if min(len(score_request.face_probes.face_records),len(score_request.template_probes.templates)) != 0:\n raise ValueError(\"probes argument cannot have both face_probes and template_probes defined.\")\n if max(len(score_request.face_probes.face_records),len(score_request.template_probes.templates)) == 0:\n raise ValueError(\"no probe templates were found in the arguments.\")\n if min(len(score_request.face_gallery.face_records),len(score_request.template_gallery.templates)) != 0:\n raise ValueError(\"gallery argument cannot have both face_gallery and template_gallery defined.\")\n if max(len(score_request.face_gallery.face_records),len(score_request.template_gallery.templates)) == 0:\n raise ValueError(\"no gallery templates were found in the arguments.\")\n \n # Generate probe and gallery matrices\n if len(score_request.face_probes.face_records) > len(score_request.template_probes.templates):\n probe_mat = [pt.vector_proto2np(face_rec.template.data) for face_rec in score_request.face_probes.face_records]\n else:\n probe_mat = [pt.vector_proto2np(template.data) for template in score_request.template_probes.templates]\n probe_mat = np.array(probe_mat,dtype=np.float32)\n \n if len(score_request.face_gallery.face_records) > len(score_request.template_gallery.templates):\n gal_mat = [pt.vector_proto2np(face_rec.template.data) for face_rec in score_request.face_gallery.face_records]\n else:\n gal_mat = [pt.vector_proto2np(template.data) for template in score_request.template_gallery.templates]\n gal_mat = np.array(gal_mat,dtype=np.float32)\n \n # Compute the distance\n if score_type == fsd.L1:\n dist_mat = spat.distance_matrix(probe_mat,gal_mat,1)\n elif score_type == fsd.L2:\n dist_mat = spat.distance_matrix(probe_mat,gal_mat,2)\n elif score_type == fsd.NEG_DOT:\n dist_mat = -np.dot(probe_mat,gal_mat.T)\n else:\n NotImplementedError(\"ScoreType %s is not implemented.\"%(score_type,))\n \n # Return the result\n return pt.matrix_np2proto(dist_mat)\n \n def version(self):\n '''Returns a three item tuple of algorithm name, version number, \n configuration notes. '''\n raise NotImplementedError(\"Abstract Method Called.\")\n \n def scoreType(self):\n '''Return the method used to create a score from the template.\n \n By default server computation is required.\n \n SCORE_L1, SCORE_L2, SCORE_DOT, SCORE_SERVER\n '''\n return fsd.L2\n \n \n def status(self):\n '''Return a simple status message.'''\n print(\"Handeling status request.\")\n status_message = fsd.FaceServiceInfo()\n status_message.status = fsd.READY\n \n return status_message\n \n def recommendedThreshold(self,far=-1.0):\n '''Return the method used to create a score from the template.\n \n By default server computation is required.\n \n Should return a recommended score. If a positive false accept rate is\n provided \n '''\n \n raise NotImplementedError(\"Abstract Method Called.\")\n \n \n def cleanexit(self):\n pass \n" ]
[ [ "numpy.dot", "numpy.array", "scipy.spatial.distance_matrix" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
ishank-arora/venmo-emoji
[ "209f53b4f90b9d8737c609e6cd5f16d0f5cf25d4" ]
[ "topicModellingWhite.py" ]
[ "import csv\nfrom collections import Counter\nimport emoji\nfrom emoji import unicode_codes\nimport pickle\nimport re\nimport pandas\nimport string\nfrom num2words import num2words\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import WordNetLemmatizer\nimport gensim\nfrom gensim.utils import simple_preprocess\nfrom gensim.parsing.preprocessing import STOPWORDS\nfrom nltk.stem.porter import *\nimport numpy as np\nnp.random.seed(2018)\nimport nltk\nnltk.download('wordnet')\nimport time\n\n#pd = pandas.read_csv(\"/data/06333/aroraish/rest.csv\", encoding='utf-8')\npd = pandas.read_csv(\"/data/06333/aroraish/flat/flat_light_proc_2.csv\", encoding='utf-8', error_bad_lines=False)\n#pd3 = pandas.read_csv(\"/data/06333/aroraish/modifiableN.csv\", encoding='utf-8', error_bad_lines=False)\n\n\nemojicols = [u\"\\U0001f3fb\", u\"\\U0001f3fc\", u\"\\U0001f3fd\", u\"\\U0001f3fe\", u\"\\U0001f3ff\"]\npattern = u'(' + u'|'.join(re.escape(u) for u in emojicols) + u')'\n\nallCols = re.compile(pattern)\n\nemojiss = unicode_codes.EMOJI_ALIAS_UNICODE\ncoloured = set()\n\nfor key in emojiss:\n if(allCols.findall(emojiss[key])):\n coloured.add(emojiss[key])\n coloured.add(allCols.sub('',emojiss[key]))\n\ncoloured.remove(u\"\")\nemojis = sorted(coloured, key=len,\n reverse=True)\npattern2 = u'(' + u'|'.join(re.escape(u) for u in emojis) + u')'\n\ncolouredRE = re.compile(pattern2)\n\n\nemojis = sorted(emojiss.values(), key=len,\n reverse=True)\npattern3 = u'(' + u'|'.join(re.escape(u) for u in emojis) + u')'\n\nree = re.compile(pattern3)\n\n \ndef pipe(message):\n text = preprocess(message)\n n_all(text)\n \ndef num(token):\n try:\n return num2words(token)\n except:\n return token\n\ndef n_all(message):\n #message = message.decode('utf-8')\n tokens = list()\n sp = message.split()\n for i in sp:\n l = ree.findall(i)\n if(l):\n tokens.extend(l)\n else:\n tokens.append(i)\n return sp\n\n\n\n\nprocessed_docs = pd[u'message'].map(n_all)\ndictionary = gensim.corpora.Dictionary(processed_docs)\n\ndictionary.filter_extremes(no_below=15, no_above=0.5, keep_n=100000)\n\nbow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs]\n\nfrom gensim import corpora, models\ntfidf = models.TfidfModel(bow_corpus)\ncorpus_tfidf = tfidf[bow_corpus]\n\nlda_model = gensim.models.LdaMulticore(bow_corpus, num_topics=50, id2word=dictionary, passes=2, workers=1)\n\npickle.dump(lda_model, open(\"/data/06333/aroraish/models/ldaM_light_3.pkl\", \"w\"))\n\nlda_model_tfidf = gensim.models.LdaMulticore(corpus_tfidf, num_topics=50, id2word=dictionary, passes=2, workers=1)\n\npickle.dump(lda_model, open(\"/data/06333/aroraish/models/ldaMtfidf_light_3.pkl\", \"w\"))\n\nwith open(\"/data/06333/aroraish/outputs/lda_bag_of_words_light_3.txt\", 'w') as bw:\n\n for idx, topic in lda_model.print_topics(-1):\n bw.write('Topic: {} \\nWords: {}\\n\\n'.format(idx, topic.encode('utf-8')))\n\n\nwith open(\"/data/06333/aroraish/outputs/lda_tfidf_light_3.txt\", 'w') as tf:\n\n for idx, topic in lda_model_tfidf.print_topics(-1):\n tf.write('Topic: {} \\nWord: {}\\n\\n'.format(idx, topic.encode('utf-8')))\n\n\n\n" ]
[ [ "pandas.read_csv", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
bjlkeng/sandbox
[ "c95653618b7be5022b0a8e217a4e5667badb2449", "ba1fea113065256d4981a71f7b4bece7299effd1" ]
[ "notebooks/label_refinery/imagenet_utils.py", "bitsback/ans.py" ]
[ "\"\"\"Utilities for ImageNet data preprocessing & prediction decoding.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport warnings\nimport numpy as np\n\n# from . import get_keras_submodule\n\n# backend = get_keras_submodule('backend')\n# keras_utils = get_keras_submodule('utils')\nimport keras.backend as backend\nimport keras.utils as keras_utils\n\nCLASS_INDEX = None\nCLASS_INDEX_PATH = ('https://s3.amazonaws.com/deep-learning-models/'\n 'image-models/imagenet_class_index.json')\n\n# Global tensor of imagenet mean for preprocessing symbolic inputs\n_IMAGENET_MEAN = None\n\n\ndef _preprocess_numpy_input(x, data_format, mode):\n \"\"\"Preprocesses a Numpy array encoding a batch of images.\n\n # Arguments\n x: Input array, 3D or 4D.\n data_format: Data format of the image array.\n mode: One of \"caffe\", \"tf\" or \"torch\".\n - caffe: will convert the images from RGB to BGR,\n then will zero-center each color channel with\n respect to the ImageNet dataset,\n without scaling.\n - tf: will scale pixels between -1 and 1,\n sample-wise.\n - torch: will scale pixels between 0 and 1 and then\n will normalize each channel with respect to the\n ImageNet dataset.\n\n # Returns\n Preprocessed Numpy array.\n \"\"\"\n if not issubclass(x.dtype.type, np.floating):\n x = x.astype(backend.floatx(), copy=False)\n\n if mode == 'tf':\n x /= 127.5\n x -= 1.\n return x\n\n if mode == 'torch':\n x /= 255.\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n else:\n if data_format == 'channels_first':\n # 'RGB'->'BGR'\n if x.ndim == 3:\n x = x[::-1, ...]\n else:\n x = x[:, ::-1, ...]\n else:\n # 'RGB'->'BGR'\n x = x[..., ::-1]\n mean = [103.939, 116.779, 123.68]\n std = None\n\n # Zero-center by mean pixel\n if data_format == 'channels_first':\n if x.ndim == 3:\n x[0, :, :] -= mean[0]\n x[1, :, :] -= mean[1]\n x[2, :, :] -= mean[2]\n if std is not None:\n x[0, :, :] /= std[0]\n x[1, :, :] /= std[1]\n x[2, :, :] /= std[2]\n else:\n x[:, 0, :, :] -= mean[0]\n x[:, 1, :, :] -= mean[1]\n x[:, 2, :, :] -= mean[2]\n if std is not None:\n x[:, 0, :, :] /= std[0]\n x[:, 1, :, :] /= std[1]\n x[:, 2, :, :] /= std[2]\n else:\n x[..., 0] -= mean[0]\n x[..., 1] -= mean[1]\n x[..., 2] -= mean[2]\n if std is not None:\n x[..., 0] /= std[0]\n x[..., 1] /= std[1]\n x[..., 2] /= std[2]\n return x\n\n\ndef _preprocess_symbolic_input(x, data_format, mode):\n \"\"\"Preprocesses a tensor encoding a batch of images.\n\n # Arguments\n x: Input tensor, 3D or 4D.\n data_format: Data format of the image tensor.\n mode: One of \"caffe\", \"tf\" or \"torch\".\n - caffe: will convert the images from RGB to BGR,\n then will zero-center each color channel with\n respect to the ImageNet dataset,\n without scaling.\n - tf: will scale pixels between -1 and 1,\n sample-wise.\n - torch: will scale pixels between 0 and 1 and then\n will normalize each channel with respect to the\n ImageNet dataset.\n\n # Returns\n Preprocessed tensor.\n \"\"\"\n global _IMAGENET_MEAN\n\n if mode == 'tf':\n x /= 127.5\n x -= 1.\n return x\n\n if mode == 'torch':\n x /= 255.\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n else:\n if data_format == 'channels_first':\n # 'RGB'->'BGR'\n if backend.ndim(x) == 3:\n x = x[::-1, ...]\n else:\n x = x[:, ::-1, ...]\n else:\n # 'RGB'->'BGR'\n x = x[..., ::-1]\n mean = [103.939, 116.779, 123.68]\n std = None\n\n if _IMAGENET_MEAN is None:\n _IMAGENET_MEAN = backend.constant(-np.array(mean))\n\n # Zero-center by mean pixel\n if backend.dtype(x) != backend.dtype(_IMAGENET_MEAN):\n x = backend.bias_add(\n x, backend.cast(_IMAGENET_MEAN, backend.dtype(x)),\n data_format=data_format)\n else:\n x = backend.bias_add(x, _IMAGENET_MEAN, data_format)\n if std is not None:\n x /= std\n return x\n\n\ndef preprocess_input(x, data_format=None, mode='caffe'):\n \"\"\"Preprocesses a tensor or Numpy array encoding a batch of images.\n\n # Arguments\n x: Input Numpy or symbolic tensor, 3D or 4D.\n The preprocessed data is written over the input data\n if the data types are compatible. To avoid this\n behaviour, `numpy.copy(x)` can be used.\n data_format: Data format of the image tensor/array.\n mode: One of \"caffe\", \"tf\" or \"torch\".\n - caffe: will convert the images from RGB to BGR,\n then will zero-center each color channel with\n respect to the ImageNet dataset,\n without scaling.\n - tf: will scale pixels between -1 and 1,\n sample-wise.\n - torch: will scale pixels between 0 and 1 and then\n will normalize each channel with respect to the\n ImageNet dataset.\n\n # Returns\n Preprocessed tensor or Numpy array.\n\n # Raises\n ValueError: In case of unknown `data_format` argument.\n \"\"\"\n if data_format is None:\n data_format = backend.image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format ' + str(data_format))\n\n if isinstance(x, np.ndarray):\n return _preprocess_numpy_input(x, data_format=data_format, mode=mode)\n else:\n return _preprocess_symbolic_input(x, data_format=data_format,\n mode=mode)\n\n\ndef decode_predictions(preds, top=5):\n \"\"\"Decodes the prediction of an ImageNet model.\n\n # Arguments\n preds: Numpy tensor encoding a batch of predictions.\n top: Integer, how many top-guesses to return.\n\n # Returns\n A list of lists of top class prediction tuples\n `(class_name, class_description, score)`.\n One list of tuples per sample in batch input.\n\n # Raises\n ValueError: In case of invalid shape of the `pred` array\n (must be 2D).\n \"\"\"\n global CLASS_INDEX\n if len(preds.shape) != 2 or preds.shape[1] != 1000:\n raise ValueError('`decode_predictions` expects '\n 'a batch of predictions '\n '(i.e. a 2D array of shape (samples, 1000)). '\n 'Found array with shape: ' + str(preds.shape))\n if CLASS_INDEX is None:\n fpath = keras_utils.get_file(\n 'imagenet_class_index.json',\n CLASS_INDEX_PATH,\n cache_subdir='models',\n file_hash='c2c37ea517e94d9795004a39431a14cb')\n with open(fpath) as f:\n CLASS_INDEX = json.load(f)\n results = []\n for pred in preds:\n top_indices = pred.argsort()[-top:][::-1]\n result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]\n result.sort(key=lambda x: x[2], reverse=True)\n results.append(result)\n return results\n\n\ndef _obtain_input_shape(input_shape,\n default_size,\n min_size,\n data_format,\n require_flatten,\n weights=None):\n \"\"\"Internal utility to compute/validate a model's input shape.\n\n # Arguments\n input_shape: Either None (will return the default network input shape),\n or a user-provided shape to be validated.\n default_size: Default input width/height for the model.\n min_size: Minimum input width/height accepted by the model.\n data_format: Image data format to use.\n require_flatten: Whether the model is expected to\n be linked to a classifier via a Flatten layer.\n weights: One of `None` (random initialization)\n or 'imagenet' (pre-training on ImageNet).\n If weights='imagenet' input channels must be equal to 3.\n\n # Returns\n An integer shape tuple (may include None entries).\n\n # Raises\n ValueError: In case of invalid argument values.\n \"\"\"\n if weights != 'imagenet' and input_shape and len(input_shape) == 3:\n if data_format == 'channels_first':\n if input_shape[0] not in {1, 3}:\n warnings.warn(\n 'This model usually expects 1 or 3 input channels. '\n 'However, it was passed an input_shape with ' +\n str(input_shape[0]) + ' input channels.')\n default_shape = (input_shape[0], default_size, default_size)\n else:\n if input_shape[-1] not in {1, 3}:\n warnings.warn(\n 'This model usually expects 1 or 3 input channels. '\n 'However, it was passed an input_shape with ' +\n str(input_shape[-1]) + ' input channels.')\n default_shape = (default_size, default_size, input_shape[-1])\n else:\n if data_format == 'channels_first':\n default_shape = (3, default_size, default_size)\n else:\n default_shape = (default_size, default_size, 3)\n if weights == 'imagenet' and require_flatten:\n if input_shape is not None:\n if input_shape != default_shape:\n raise ValueError('When setting`include_top=True` '\n 'and loading `imagenet` weights, '\n '`input_shape` should be ' +\n str(default_shape) + '.')\n return default_shape\n if input_shape:\n if data_format == 'channels_first':\n if input_shape is not None:\n if len(input_shape) != 3:\n raise ValueError(\n '`input_shape` must be a tuple of three integers.')\n if input_shape[0] != 3 and weights == 'imagenet':\n raise ValueError('The input must have 3 channels; got '\n '`input_shape=' + str(input_shape) + '`')\n if ((input_shape[1] is not None and input_shape[1] < min_size) or\n (input_shape[2] is not None and input_shape[2] < min_size)):\n raise ValueError('Input size must be at least ' +\n str(min_size) + 'x' + str(min_size) +\n '; got `input_shape=' +\n str(input_shape) + '`')\n else:\n if input_shape is not None:\n if len(input_shape) != 3:\n raise ValueError(\n '`input_shape` must be a tuple of three integers.')\n if input_shape[-1] != 3 and weights == 'imagenet':\n raise ValueError('The input must have 3 channels; got '\n '`input_shape=' + str(input_shape) + '`')\n if ((input_shape[0] is not None and input_shape[0] < min_size) or\n (input_shape[1] is not None and input_shape[1] < min_size)):\n raise ValueError('Input size must be at least ' +\n str(min_size) + 'x' + str(min_size) +\n '; got `input_shape=' +\n str(input_shape) + '`')\n else:\n if require_flatten:\n input_shape = default_shape\n else:\n if data_format == 'channels_first':\n input_shape = (3, None, None)\n else:\n input_shape = (None, None, 3)\n if require_flatten:\n if None in input_shape:\n raise ValueError('If `include_top` is True, '\n 'you should specify a static `input_shape`. '\n 'Got `input_shape=' + str(input_shape) + '`')\n return input_shape\n", "import numpy as np\nfrom math import floor\n\n\nDEBUG = False\n\n\ndef code_rans(symbol, stack, alphabet, freqs, cdf=None, quant_bits=16, renorm_bits=32):\n '''\n Returns stack as a list where each element is renorm_bits long except\n for the last one which is 2*renorm_bits long\n\n symbol - string corresponding to a symbol alphabet\n stack - stack of codes from previous encoded symbols\n alphabet - list of strings representing symbols in alphabet\n freqs - List/nd.array of integers f[s] s.t. p_s ~= f[s] / 2^quant_bits\n cdf - np.array of cumulative sum of frequencies, if None will be calculated from freqs\n quant_bits - exponent of 2^N (quantizing factor)\n renorm_bits - n-bit renormalization\n '''\n if DEBUG:\n assert len(freqs) == len(alphabet)\n assert all([f > 0 for f in freqs])\n assert sum(freqs) == 1 << quant_bits\n assert type(stack) == list\n assert quant_bits <= renorm_bits\n\n if cdf is None:\n cdf = np.cumsum(freqs)\n cdf = np.insert(cdf, 0, 0).astype(np.uint64)\n assert len(cdf) == len(freqs) + 1\n\n if not stack:\n codes = []\n code = 1\n else:\n codes = stack\n code = int(codes.pop())\n\n if DEBUG:\n pcode = code\n index = alphabet.index(symbol)\n assert int(freqs[index]) != 0, 'Symbol has zero probability - index = %d' % index\n\n # Renormalization - if we would push past 2**renorm_bits, then renorm\n new_code = ((floor(code // int(freqs[index])) << quant_bits)\n + (code % int(freqs[index]))\n + int(cdf[index]))\n if new_code > ((1 << (2 * renorm_bits)) - 1):\n if DEBUG:\n print('renorm')\n codes.append(code & ((1 << renorm_bits) - 1))\n assert codes[-1] <= (1 << renorm_bits) - 1\n code = code >> renorm_bits\n\n # rANS\n code = ((floor(code // int(freqs[index])) << quant_bits)\n + (code % int(freqs[index]))\n + int(cdf[index]))\n\n if DEBUG:\n print(pcode, '(', type(pcode), ')', ' -> ', code, ' ', type(code))\n\n assert type(code) == int\n\n codes.append(code)\n return codes\n\n\ndef decode_rans(stack, alphabet, freqs, cdf=None, quant_bits=16, renorm_bits=32):\n '''\n stack - stack of coded message (see return of above function)\n alphabet - list of strings representing symbols in alphabet\n freqs - List/nd.array of integers f[s] s.t. p_s ~= f[s] / 2^quant_bits\n cdf - np.array of cumulative sum of frequencies, if None will be calculated from freqs\n quant_bits - exponent of 2^N (quantizing factor)\n renorm_bits - n-bit renormalization\n '''\n if DEBUG:\n assert len(freqs) == len(alphabet)\n assert sum(freqs) == (1 << quant_bits)\n assert len(stack) >= 1\n assert all(c < (1 << renorm_bits) for c in stack[:-1])\n assert stack[-1] < (1 << (2*renorm_bits))\n\n codes = stack\n\n if cdf is None:\n cdf = np.cumsum(freqs)\n cdf = np.insert(cdf, 0, 0).astype(int)\n assert len(cdf) == len(freqs) + 1\n\n mask = (1 << quant_bits) - 1\n pcode, plen = codes[-1], len(codes)\n code = int(codes.pop())\n if code >= 1:\n if DEBUG:\n pcode = code\n s = code & mask\n index = np.argmax(cdf > s) - 1\n code = (int(freqs[index]) * (code >> quant_bits)\n + (code & mask)\n - int(cdf[index]))\n\n symbol = alphabet[index]\n if (code < (1 << renorm_bits)) and codes:\n if DEBUG:\n print('renorm')\n assert codes[-1] < (1 << renorm_bits)\n code = (code << renorm_bits) + codes.pop()\n codes.append(code)\n\n if DEBUG:\n print(pcode, ' -> ', code)\n\n assert (0 < codes[-1] < pcode or len(codes) < plen\n or (pcode == codes[-1] and freqs[index] == (1 << quant_bits))), \\\n (pcode, codes[-1], plen, len(codes), index, freqs[index], cdf[index])\n return codes, symbol\n else:\n return [], None\n" ]
[ [ "numpy.array" ], [ "numpy.insert", "numpy.argmax", "numpy.cumsum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
reagames/ipcamstreamer
[ "a0b6782e228659d526c91e12255c9fd62c694471" ]
[ "ipcamstreamer.py" ]
[ "#ip camera openCV streamer for DLink DCS-932L\n\n\n\n\nimport cv2\nimport urllib \nimport numpy as np\nimport sys\n\n#stream=urllib.urlopen('http://admin:[email protected]:8088/mjpeg.cgi?user=admin&password=CmasQp123&channel=0&.mjpg')\n\nstream=urllib.urlopen('http://admin:[email protected]/mjpeg.cgi?user=admin&password=CmasQp123&channel=0&.mjpg')\n\nprint(sys.argv)\n\ncascPath = sys.argv[1]\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\nbytes=''\nwhile True:\n bytes+=stream.read(1024)\n a = bytes.find('\\xff\\xd8')\n b = bytes.find('\\xff\\xd9')\n if a!=-1 and b!=-1:\n jpg = bytes[a:b+2]\n bytes= bytes[b+2:]\n frame = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.CV_LOAD_IMAGE_COLOR)\n \n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n faces = faceCascade.detectMultiScale(\n frame,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.cv.CV_HAAR_SCALE_IMAGE\n )\n \n # Draw a rectangle around the faces\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n \n cv2.imshow('stream',frame)\n \n if cv2.waitKey(1) ==27:\n exit(0) \n\n\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.fromstring" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Thinklab-SJTU/DCL_RetinaNet_Tensorflow
[ "1d14c9800c3eb1975e8832978f7a263783d171ec", "1d14c9800c3eb1975e8832978f7a263783d171ec", "1d14c9800c3eb1975e8832978f7a263783d171ec", "1d14c9800c3eb1975e8832978f7a263783d171ec", "1d14c9800c3eb1975e8832978f7a263783d171ec" ]
[ "libs/detection_oprations/proposal_opr_csl_tsne.py", "libs/configs/DOTA1.0/dcl/cfgs_res101_dota_dcl_v1.py", "libs/configs/DOTA1.0/dota_train/cfgs_res50_dotatrain_dcl_v8.py", "tools/test_mlt_dcl_ms.py", "libs/losses/losses_dcl.py" ]
[ "# encoding: utf-8\nfrom libs.configs import cfgs\nfrom libs.box_utils import bbox_transform\nfrom libs.box_utils import nms_rotate\nimport tensorflow as tf\nimport numpy as np\n\nfrom libs.box_utils.coordinate_convert import coordinate_present_convert, coords_regular\n\n\ndef postprocess_detctions(rpn_bbox_pred, rpn_cls_prob, rpn_angle_prob, rpn_angle_logits, anchors, is_training, gpu_id):\n\n return_boxes_pred = []\n return_boxes_pred_angle = []\n return_angle_logits = []\n return_scores = []\n return_labels = []\n for j in range(0, cfgs.CLASS_NUM):\n scores = rpn_cls_prob[:, j]\n if is_training:\n indices = tf.reshape(tf.where(tf.greater(scores, cfgs.VIS_SCORE)), [-1, ])\n else:\n indices = tf.reshape(tf.where(tf.greater(scores, cfgs.FILTERED_SCORE)), [-1, ])\n\n anchors_ = tf.gather(anchors, indices)\n rpn_bbox_pred_ = tf.gather(rpn_bbox_pred, indices)\n scores = tf.gather(scores, indices)\n rpn_angle_prob_ = tf.gather(rpn_angle_prob, indices)\n rpn_angle_logits_ = tf.gather(rpn_angle_logits, indices)\n\n angle_cls = tf.cast(tf.argmax(rpn_angle_prob_, axis=1), tf.float32)\n\n if cfgs.METHOD == 'H':\n x_c = (anchors_[:, 2] + anchors_[:, 0]) / 2\n y_c = (anchors_[:, 3] + anchors_[:, 1]) / 2\n h = anchors_[:, 2] - anchors_[:, 0] + 1\n w = anchors_[:, 3] - anchors_[:, 1] + 1\n theta = -90 * tf.ones_like(x_c)\n anchors_ = tf.transpose(tf.stack([x_c, y_c, w, h, theta]))\n\n if cfgs.ANGLE_RANGE == 180:\n anchors_ = tf.py_func(coordinate_present_convert,\n inp=[anchors_, -1],\n Tout=[tf.float32])\n anchors_ = tf.reshape(anchors_, [-1, 5])\n\n boxes_pred = bbox_transform.rbbox_transform_inv(boxes=anchors_, deltas=rpn_bbox_pred_)\n\n boxes_pred = tf.reshape(boxes_pred, [-1, 5])\n angle_cls = (tf.reshape(angle_cls, [-1, ]) * -1 - 0.5) * cfgs.OMEGA\n\n x, y, w, h, theta = tf.unstack(boxes_pred, axis=1)\n boxes_pred_angle = tf.transpose(tf.stack([x, y, w, h, angle_cls]))\n\n if cfgs.ANGLE_RANGE == 180:\n\n # _, _, _, _, theta = tf.unstack(boxes_pred, axis=1)\n # indx = tf.reshape(tf.where(tf.logical_and(tf.less(theta, 0), tf.greater_equal(theta, -180))), [-1, ])\n # boxes_pred = tf.gather(boxes_pred, indx)\n # scores = tf.gather(scores, indx)\n\n boxes_pred = tf.py_func(coordinate_present_convert,\n inp=[boxes_pred, 1],\n Tout=[tf.float32])\n boxes_pred = tf.reshape(boxes_pred, [-1, 5])\n\n boxes_pred_angle = tf.py_func(coordinate_present_convert,\n inp=[boxes_pred_angle, 1],\n Tout=[tf.float32])\n boxes_pred_angle = tf.reshape(boxes_pred_angle, [-1, 5])\n\n max_output_size = 4000 if 'DOTA' in cfgs.NET_NAME else 200\n nms_indices = nms_rotate.nms_rotate(decode_boxes=boxes_pred_angle,\n scores=scores,\n iou_threshold=cfgs.NMS_IOU_THRESHOLD,\n max_output_size=100 if is_training else max_output_size,\n use_angle_condition=False,\n angle_threshold=15,\n use_gpu=True,\n gpu_id=gpu_id)\n\n tmp_boxes_pred = tf.reshape(tf.gather(boxes_pred, nms_indices), [-1, 5])\n tmp_boxes_pred_angle = tf.reshape(tf.gather(boxes_pred_angle, nms_indices), [-1, 5])\n tmp_scores = tf.reshape(tf.gather(scores, nms_indices), [-1, ])\n tmp_rpn_angle_logits = tf.gather(rpn_angle_logits_, nms_indices)\n\n return_boxes_pred.append(tmp_boxes_pred)\n return_boxes_pred_angle.append(tmp_boxes_pred_angle)\n return_scores.append(tmp_scores)\n return_labels.append(tf.ones_like(tmp_scores)*(j+1))\n return_angle_logits.append(tmp_rpn_angle_logits)\n\n return_boxes_pred = tf.concat(return_boxes_pred, axis=0)\n return_boxes_pred_angle = tf.concat(return_boxes_pred_angle, axis=0)\n return_scores = tf.concat(return_scores, axis=0)\n return_labels = tf.concat(return_labels, axis=0)\n return_angle_logits = tf.concat(return_angle_logits, axis=0)\n\n return return_boxes_pred, return_scores, return_labels, return_boxes_pred_angle, return_angle_logits\n", "# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\nimport os\nimport tensorflow as tf\nimport math\n\n\"\"\"\nBCL + OMEGA = 180 / 32. + data aug + ms\nFLOPs: 1321979063; Trainable params: 52136440\n\nThis is your result for task 1:\n\n mAP: 0.7197849386403127\n ap of each class:\n plane:0.888767706661227,\n baseball-diamond:0.828813250009659,\n bridge:0.4680099872101205,\n ground-track-field:0.6901660404277621,\n small-vehicle:0.7395262726408154,\n large-vehicle:0.5667009043812319,\n ship:0.7347841545028257,\n tennis-court:0.9071577005837769,\n basketball-court:0.8229722946512178,\n storage-tank:0.8448119293093586,\n soccer-ball-field:0.6186025857112213,\n roundabout:0.6441106031308795,\n harbor:0.628617739475033,\n swimming-pool:0.7122314212831211,\n helicopter:0.7015014896264388\n\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_DCL_B_3x_20200923_145.8w\n\n\nThis is your result for task 1:\n\n mAP: 0.7301021435398962\n ap of each class:\n plane:0.8770187842801944,\n baseball-diamond:0.8267941412496465,\n bridge:0.5127762497195772,\n ground-track-field:0.7429087472658292,\n small-vehicle:0.7430367724213736,\n large-vehicle:0.5717593529976157,\n ship:0.7535736625119606,\n tennis-court:0.9066598303041958,\n basketball-court:0.8420467411496289,\n storage-tank:0.85672175425764,\n soccer-ball-field:0.6380583684613818,\n roundabout:0.653533415863242,\n harbor:0.6392204165860068,\n swimming-pool:0.7104658633500178,\n helicopter:0.676958052680133\n\nThe submitted information is :\n\nDescription: RetinaNet_DOTA_DCL_B_3x_20200923_ms_145.8w\n\n\"\"\"\n\n# ------------------------------------------------\nVERSION = 'RetinaNet_DOTA_DCL_B_3x_20200923'\nNET_NAME = 'resnet101_v1d' # 'MobilenetV2'\nADD_BOX_IN_TENSORBOARD = True\n\n# ---------------------------------------- System_config\nROOT_PATH = os.path.abspath('../')\nprint(20*\"++--\")\nprint(ROOT_PATH)\nGPU_GROUP = \"0,1,2\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSHOW_TRAIN_INFO_INTE = 20\nSMRY_ITER = 2000\nSAVE_WEIGHTS_INTE = 27000 * 3\n\nSUMMARY_PATH = ROOT_PATH + '/output/summary'\nTEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'\n\nif NET_NAME.startswith(\"resnet\"):\n weights_name = NET_NAME\nelif NET_NAME.startswith(\"MobilenetV2\"):\n weights_name = \"mobilenet/mobilenet_v2_1.0_224\"\nelse:\n raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')\n\nPRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\nEVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'\n\n# ------------------------------------------ Train config\nRESTORE_FROM_RPN = False\nFIXED_BLOCKS = 1 # allow 0~3\nFREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone\nUSE_07_METRIC = True\n\nMUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy\nGRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip\n\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0\nANGLE_WEIGHT = 0.5\nREG_LOSS_MODE = None\nALPHA = 1.0\nBETA = 1.0\n\nBATCH_SIZE = 1\nEPSILON = 1e-5\nMOMENTUM = 0.9\nLR = 5e-4\nDECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]\nMAX_ITERATION = SAVE_WEIGHTS_INTE*20\nWARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)\n\n# -------------------------------------------- Data_preprocess_config\nDATASET_NAME = 'DOTA' # 'pascal', 'coco'\nPIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nPIXEL_MEAN_ = [0.485, 0.456, 0.406]\nPIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nIMG_SHORT_SIDE_LEN = [800, 400, 600, 1000, 1200]\nIMG_MAX_LENGTH = 1200\nCLASS_NUM = 15\nOMEGA = 180 / 32.\nANGLE_MODE = 0\n\nIMG_ROTATE = True\nRGB2GRAY = True\nVERTICAL_FLIP = True\nHORIZONTAL_FLIP = True\nIMAGE_PYRAMID = True\n\n# --------------------------------------------- Network_config\nSUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)\nSUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)\nPROBABILITY = 0.01\nFINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))\nWEIGHT_DECAY = 1e-4\nUSE_GN = False\nFPN_CHANNEL = 256\n\n# ---------------------------------------------Anchor config\nLEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']\nBASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]\nANCHOR_STRIDE = [8, 16, 32, 64, 128]\nANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]\nANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]\nANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]\nANCHOR_SCALE_FACTORS = None\nUSE_CENTER_OFFSET = True\nMETHOD = 'H'\nUSE_ANGLE_COND = False\nANGLE_RANGE = 180 # 90 or 180\n\n# --------------------------------------------RPN config\nSHARE_NET = True\nUSE_P5 = True\nIOU_POSITIVE_THRESHOLD = 0.5\nIOU_NEGATIVE_THRESHOLD = 0.4\n\nNMS = True\nNMS_IOU_THRESHOLD = 0.1\nMAXIMUM_DETECTIONS = 100\nFILTERED_SCORE = 0.05\nVIS_SCORE = 0.4\n\n\n", "# -*- coding: utf-8 -*-\nfrom __future__ import division, print_function, absolute_import\nimport os\nimport tensorflow as tf\nimport math\n\n\"\"\"\nBCL + OMEGA = 180 / 32. + period loss\n\n\n\n\"\"\"\n\n# ------------------------------------------------\nVERSION = 'RetinaNet_DOTA_DCL_B_2x_20200921'\nNET_NAME = 'resnet50_v1d' # 'MobilenetV2'\nADD_BOX_IN_TENSORBOARD = True\n\n# ---------------------------------------- System_config\nROOT_PATH = os.path.abspath('../')\nprint(20*\"++--\")\nprint(ROOT_PATH)\nGPU_GROUP = \"0,1,2\"\nNUM_GPU = len(GPU_GROUP.strip().split(','))\nSHOW_TRAIN_INFO_INTE = 20\nSMRY_ITER = 2000\nSAVE_WEIGHTS_INTE = 20673 * 2\n\nSUMMARY_PATH = ROOT_PATH + '/output/summary'\nTEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'\n\nif NET_NAME.startswith(\"resnet\"):\n weights_name = NET_NAME\nelif NET_NAME.startswith(\"MobilenetV2\"):\n weights_name = \"mobilenet/mobilenet_v2_1.0_224\"\nelse:\n raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')\n\nPRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'\nTRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')\nEVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'\n\n# ------------------------------------------ Train config\nRESTORE_FROM_RPN = False\nFIXED_BLOCKS = 1 # allow 0~3\nFREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone\nUSE_07_METRIC = True\n\nMUTILPY_BIAS_GRADIENT = 2.0 # if None, will not multipy\nGRADIENT_CLIPPING_BY_NORM = 10.0 # if None, will not clip\n\nCLS_WEIGHT = 1.0\nREG_WEIGHT = 1.0\nANGLE_WEIGHT = 0.5\nREG_LOSS_MODE = None\nALPHA = 1.0\nBETA = 1.0\n\nBATCH_SIZE = 1\nEPSILON = 1e-5\nMOMENTUM = 0.9\nLR = 5e-4\nDECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]\nMAX_ITERATION = SAVE_WEIGHTS_INTE*20\nWARM_SETP = int(1.0 / 4.0 * SAVE_WEIGHTS_INTE)\n\n# -------------------------------------------- Data_preprocess_config\nDATASET_NAME = 'DOTATrain' # 'pascal', 'coco'\nPIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nPIXEL_MEAN_ = [0.485, 0.456, 0.406]\nPIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR\nIMG_SHORT_SIDE_LEN = 800\nIMG_MAX_LENGTH = 800\nCLASS_NUM = 15\nOMEGA = 180 / 32.\nANGLE_MODE = 0\n\nIMG_ROTATE = False\nRGB2GRAY = False\nVERTICAL_FLIP = False\nHORIZONTAL_FLIP = True\nIMAGE_PYRAMID = False\n\n# --------------------------------------------- Network_config\nSUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)\nSUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)\nPROBABILITY = 0.01\nFINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))\nWEIGHT_DECAY = 1e-4\nUSE_GN = False\nFPN_CHANNEL = 256\n\n# ---------------------------------------------Anchor config\nLEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']\nBASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]\nANCHOR_STRIDE = [8, 16, 32, 64, 128]\nANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]\nANCHOR_RATIOS = [1, 1 / 2, 2., 1 / 3., 3., 5., 1 / 5.]\nANCHOR_ANGLES = [-90, -75, -60, -45, -30, -15]\nANCHOR_SCALE_FACTORS = None\nUSE_CENTER_OFFSET = True\nMETHOD = 'H'\nUSE_ANGLE_COND = False\nANGLE_RANGE = 180 # 90 or 180\n\n# --------------------------------------------RPN config\nSHARE_NET = True\nUSE_P5 = True\nIOU_POSITIVE_THRESHOLD = 0.5\nIOU_NEGATIVE_THRESHOLD = 0.4\n\nNMS = True\nNMS_IOU_THRESHOLD = 0.1\nMAXIMUM_DETECTIONS = 100\nFILTERED_SCORE = 0.05\nVIS_SCORE = 0.4\n\n\n", "# -*- coding:utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport sys\nimport tensorflow as tf\nimport cv2\nimport numpy as np\nimport math\nfrom tqdm import tqdm\nimport argparse\nfrom multiprocessing import Queue, Process\nsys.path.append(\"../\")\n\nfrom libs.networks import build_whole_network_dcl\nfrom help_utils import tools\nfrom libs.label_name_dict.label_dict import *\nfrom libs.box_utils import draw_box_in_img\nfrom libs.box_utils.coordinate_convert import forward_convert, backward_convert\nfrom libs.box_utils import nms_rotate\nfrom libs.box_utils.rotate_polygon_nms import rotate_gpu_nms\n\n\ndef worker(gpu_id, images, det_net, result_queue):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id)\n # 1. preprocess img\n img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR\n img_batch = tf.cast(img_plac, tf.float32)\n\n if cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:\n img_batch = (img_batch / 255 - tf.constant(cfgs.PIXEL_MEAN_)) / tf.constant(cfgs.PIXEL_STD)\n else:\n img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)\n\n img_batch = tf.expand_dims(img_batch, axis=0)\n\n detection_scores, detection_category, detection_boxes_angle = det_net.build_whole_detection_network(\n input_img_batch=img_batch,\n gtboxes_batch_h=None,\n gtboxes_batch_r=None,\n gt_encode_label=None,\n gpu_id=0)\n\n init_op = tf.group(\n tf.global_variables_initializer(),\n tf.local_variables_initializer()\n )\n\n restorer, restore_ckpt = det_net.get_restorer()\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n with tf.Session(config=config) as sess:\n sess.run(init_op)\n if not restorer is None:\n restorer.restore(sess, restore_ckpt)\n print('restore model %d ...' % gpu_id)\n for a_img in images:\n raw_img = cv2.imread(a_img)\n raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]\n\n det_boxes_r_all, det_scores_r_all, det_category_r_all = [], [], []\n\n img_short_side_len_list = cfgs.IMG_SHORT_SIDE_LEN if isinstance(cfgs.IMG_SHORT_SIDE_LEN, list) else [\n cfgs.IMG_SHORT_SIDE_LEN]\n img_short_side_len_list = [img_short_side_len_list[0]] if not args.multi_scale else img_short_side_len_list\n\n for short_size in img_short_side_len_list:\n max_len = cfgs.IMG_MAX_LENGTH\n if raw_h < raw_w:\n new_h, new_w = short_size, min(int(short_size * float(raw_w) / raw_h), max_len)\n else:\n new_h, new_w = min(int(short_size * float(raw_h) / raw_w), max_len), short_size\n img_resize = cv2.resize(raw_img, (new_w, new_h))\n\n resized_img, detected_boxes, detected_scores, detected_categories = \\\n sess.run(\n [img_batch, detection_boxes_angle, detection_scores, detection_category],\n feed_dict={img_plac: img_resize[:, :, ::-1]}\n )\n\n detected_indices = detected_scores >= cfgs.VIS_SCORE\n detected_scores = detected_scores[detected_indices]\n detected_boxes = detected_boxes[detected_indices]\n detected_categories = detected_categories[detected_indices]\n\n if detected_boxes.shape[0] == 0:\n continue\n resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]\n detected_boxes = forward_convert(detected_boxes, False)\n detected_boxes[:, 0::2] *= (raw_w / resized_w)\n detected_boxes[:, 1::2] *= (raw_h / resized_h)\n # detected_boxes = backward_convert(detected_boxes, False)\n\n det_boxes_r_all.extend(detected_boxes)\n det_scores_r_all.extend(detected_scores)\n det_category_r_all.extend(detected_categories)\n det_boxes_r_all = np.array(det_boxes_r_all)\n det_scores_r_all = np.array(det_scores_r_all)\n det_category_r_all = np.array(det_category_r_all)\n\n box_res_rotate_ = []\n label_res_rotate_ = []\n score_res_rotate_ = []\n\n if det_scores_r_all.shape[0] != 0:\n for sub_class in range(1, cfgs.CLASS_NUM + 1):\n index = np.where(det_category_r_all == sub_class)[0]\n if len(index) == 0:\n continue\n tmp_boxes_r = det_boxes_r_all[index]\n tmp_label_r = det_category_r_all[index]\n tmp_score_r = det_scores_r_all[index]\n\n tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)\n\n try:\n inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r_),\n scores=np.array(tmp_score_r),\n iou_threshold=cfgs.NMS_IOU_THRESHOLD,\n max_output_size=5000)\n except:\n tmp_boxes_r_ = np.array(tmp_boxes_r_)\n tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])\n tmp[:, 0:-1] = tmp_boxes_r_\n tmp[:, -1] = np.array(tmp_score_r)\n # Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms\n jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])\n jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000\n inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),\n float(cfgs.NMS_IOU_THRESHOLD), 0)\n\n box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])\n score_res_rotate_.extend(np.array(tmp_score_r)[inx])\n label_res_rotate_.extend(np.array(tmp_label_r)[inx])\n\n box_res_rotate_ = np.array(box_res_rotate_)\n score_res_rotate_ = np.array(score_res_rotate_)\n label_res_rotate_ = np.array(label_res_rotate_)\n\n result_dict = {'scales': [1, 1], 'boxes': box_res_rotate_,\n 'scores': score_res_rotate_, 'labels': label_res_rotate_,\n 'image_id': a_img}\n result_queue.put_nowait(result_dict)\n\n\ndef test_mlt(det_net, real_test_img_list, gpu_ids, show_box, txt_name):\n\n save_path = os.path.join('./test_mlt', cfgs.VERSION)\n tools.mkdir(save_path)\n\n nr_records = len(real_test_img_list)\n pbar = tqdm(total=nr_records)\n gpu_num = len(gpu_ids.strip().split(','))\n\n nr_image = math.ceil(nr_records / gpu_num)\n result_queue = Queue(500)\n procs = []\n\n for i, gpu_id in enumerate(gpu_ids.strip().split(',')):\n start = i * nr_image\n end = min(start + nr_image, nr_records)\n split_records = real_test_img_list[start:end]\n proc = Process(target=worker, args=(int(gpu_id), split_records, det_net, result_queue))\n print('process:%d, start:%d, end:%d' % (i, start, end))\n proc.start()\n procs.append(proc)\n\n for i in range(nr_records):\n res = result_queue.get()\n if res['boxes'].shape[0] == 0:\n fw_txt_dt = open(os.path.join(save_path, 'res_{}.txt'.format(\n res['image_id'].split('/')[-1].split('.')[0].split('ts_')[1])), 'w')\n fw_txt_dt.close()\n pbar.update(1)\n\n fw = open(txt_name, 'a+')\n fw.write('{}\\n'.format(res['image_id'].split('/')[-1]))\n fw.close()\n\n continue\n x1, y1, x2, y2, x3, y3, x4, y4 = res['boxes'][:, 0], res['boxes'][:, 1], res['boxes'][:, 2], res['boxes'][:, 3],\\\n res['boxes'][:, 4], res['boxes'][:, 5], res['boxes'][:, 6], res['boxes'][:, 7]\n\n x1, y1 = x1 * res['scales'][0], y1 * res['scales'][1]\n x2, y2 = x2 * res['scales'][0], y2 * res['scales'][1]\n x3, y3 = x3 * res['scales'][0], y3 * res['scales'][1]\n x4, y4 = x4 * res['scales'][0], y4 * res['scales'][1]\n\n boxes = np.transpose(np.stack([x1, y1, x2, y2, x3, y3, x4, y4]))\n\n if show_box:\n boxes = backward_convert(boxes, False)\n nake_name = res['image_id'].split('/')[-1]\n draw_path = os.path.join(save_path, nake_name)\n draw_img = np.array(cv2.imread(res['image_id']), np.float32)\n\n final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(draw_img,\n boxes=boxes,\n labels=res['labels'],\n scores=res['scores'],\n method=1,\n in_graph=False)\n cv2.imwrite(draw_path, final_detections)\n\n else:\n fw_txt_dt = open(os.path.join(save_path, 'res_{}.txt'.format(\n res['image_id'].split('/')[-1].split('.')[0].split('ts_')[1])), 'w')\n\n for ii, box in enumerate(boxes):\n line = '%d,%d,%d,%d,%d,%d,%d,%d,%.3f\\n' % (box[0], box[1], box[2], box[3],\n box[4], box[5], box[6], box[7], res['scores'][ii])\n fw_txt_dt.write(line)\n fw_txt_dt.close()\n\n fw = open(txt_name, 'a+')\n fw.write('{}\\n'.format(res['image_id'].split('/')[-1]))\n fw.close()\n\n pbar.set_description(\"Test image %s\" % res['image_id'].split('/')[-1])\n\n pbar.update(1)\n\n for p in procs:\n p.join()\n\n\ndef eval(num_imgs, test_dir, gpu_ids, show_box):\n\n txt_name = '{}.txt'.format(cfgs.VERSION)\n if not args.show_box:\n if not os.path.exists(txt_name):\n fw = open(txt_name, 'w')\n fw.close()\n\n fr = open(txt_name, 'r')\n img_filter = fr.readlines()\n print('****************************' * 3)\n print('Already tested imgs:', img_filter)\n print('****************************' * 3)\n fr.close()\n\n test_imgname_list = [os.path.join(test_dir, img_name) for img_name in os.listdir(args.test_dir)\n if img_name.endswith(('.jpg', '.JPG', '.png', '.jpeg', '.tif', '.tiff')) and\n (img_name + '\\n' not in img_filter)]\n else:\n test_imgname_list = [os.path.join(test_dir, img_name) for img_name in os.listdir(args.test_dir)\n if img_name.endswith(('.jpg', '.JPG', '.png', '.jpeg', '.tif', '.tiff'))]\n\n assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \\\n ' Note that, we only support img format of (.jpg, .png, and .tiff) '\n\n if num_imgs == np.inf:\n real_test_img_list = test_imgname_list\n else:\n real_test_img_list = test_imgname_list[: num_imgs]\n\n dcl = build_whole_network_dcl.DetectionNetwork(base_network_name=cfgs.NET_NAME,\n is_training=False)\n\n test_mlt(det_net=dcl, real_test_img_list=real_test_img_list, gpu_ids=gpu_ids, show_box=show_box, txt_name=txt_name)\n\n if not show_box:\n os.remove(txt_name)\n\n\ndef parse_args():\n\n parser = argparse.ArgumentParser('evaluate the result with Pascal2007 strand')\n\n parser.add_argument('--test_dir', dest='test_dir',\n help='evaluate imgs dir ',\n default='/data/yangxue/dataset/MLT/test/ch8_test_images', type=str)\n parser.add_argument('--gpus', dest='gpus',\n help='gpu id',\n default='0,1,2,3,4,5,6,7', type=str)\n parser.add_argument('--eval_num', dest='eval_num',\n help='the num of eval imgs',\n default=np.inf, type=int)\n parser.add_argument('--show_box', '-s', default=False,\n action='store_true')\n parser.add_argument('--multi_scale', '-ms', default=False,\n action='store_true')\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n print(20*\"--\")\n print(args)\n print(20*\"--\")\n eval(args.eval_num,\n test_dir=args.test_dir,\n gpu_ids=args.gpus,\n show_box=args.show_box)\n\n\n", "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom libs.configs import cfgs\nfrom help_utils.densely_coded_label import angle_label_decode, get_code_len\n\n\ndef angle_cls_focal_loss(labels, pred, anchor_state, alpha=None, gamma=2.0, decimal_weight=False):\n\n indices = tf.reshape(tf.where(tf.equal(anchor_state, 1)), [-1, ])\n labels = tf.gather(labels, indices)\n pred = tf.gather(pred, indices)\n anchor_state = tf.gather(anchor_state, indices)\n\n # compute the focal loss\n per_entry_cross_ent = - labels * tf.log(tf.sigmoid(pred) + cfgs.EPSILON) \\\n - (1 - labels) * tf.log(1 - tf.sigmoid(pred) + cfgs.EPSILON)\n\n prediction_probabilities = tf.sigmoid(pred)\n p_t = ((labels * prediction_probabilities) +\n ((1 - labels) * (1 - prediction_probabilities)))\n modulating_factor = 1.0\n if gamma:\n modulating_factor = tf.pow(1.0 - p_t, gamma)\n alpha_weight_factor = 1.0\n if alpha is not None:\n alpha_weight_factor = (labels * alpha +\n (1 - labels) * (1 - alpha))\n\n if decimal_weight:\n angle_decode_labels = tf.py_func(func=angle_label_decode,\n inp=[labels, cfgs.ANGLE_RANGE, cfgs.OMEGA, cfgs.ANGLE_MODE],\n Tout=[tf.float32])\n angle_decode_labels = tf.reshape(angle_decode_labels, [-1, ]) * -1\n\n angle_decode_pred = tf.py_func(func=angle_label_decode,\n inp=[tf.sigmoid(pred), cfgs.ANGLE_RANGE, cfgs.OMEGA, cfgs.ANGLE_MODE],\n Tout=[tf.float32])\n\n angle_decode_pred = tf.reshape(angle_decode_pred, [-1, ]) * -1\n\n diff_weight = tf.reshape(tf.log(abs(angle_decode_labels - angle_decode_pred) + 1), [-1, 1])\n else:\n diff_weight = tf.ones_like(tf.reshape(anchor_state, [-1, 1]))\n\n focal_cross_entropy_loss = (diff_weight * modulating_factor * alpha_weight_factor *\n per_entry_cross_ent)\n\n # compute the normalizer: the number of positive anchors\n # normalizer = tf.stop_gradient(tf.where(tf.greater(anchor_state, -2)))\n normalizer = tf.stop_gradient(tf.where(tf.equal(anchor_state, 1)))\n normalizer = tf.cast(tf.shape(normalizer)[0], tf.float32)\n normalizer = tf.maximum(1.0, normalizer)\n\n # normalizer = tf.stop_gradient(tf.cast(tf.equal(anchor_state, 1), tf.float32))\n # normalizer = tf.maximum(tf.reduce_sum(normalizer), 1)\n\n return tf.reduce_sum(focal_cross_entropy_loss) / normalizer\n\n\ndef angle_cls_period_focal_loss(labels, pred, anchor_state, target_boxes, alpha=None, gamma=2.0,\n decimal_weight=False, aspect_ratio_threshold=1.5):\n\n indices = tf.reshape(tf.where(tf.equal(anchor_state, 1)), [-1, ])\n labels = tf.gather(labels, indices)\n pred = tf.gather(pred, indices)\n target_boxes = tf.gather(target_boxes, indices)\n anchor_state = tf.gather(anchor_state, indices)\n\n # compute the focal loss\n per_entry_cross_ent = - labels * tf.log(tf.sigmoid(pred) + cfgs.EPSILON) \\\n - (1 - labels) * tf.log(1 - tf.sigmoid(pred) + cfgs.EPSILON)\n\n prediction_probabilities = tf.sigmoid(pred)\n p_t = ((labels * prediction_probabilities) +\n ((1 - labels) * (1 - prediction_probabilities)))\n modulating_factor = 1.0\n if gamma:\n modulating_factor = tf.pow(1.0 - p_t, gamma)\n alpha_weight_factor = 1.0\n if alpha is not None:\n alpha_weight_factor = (labels * alpha +\n (1 - labels) * (1 - alpha))\n\n if decimal_weight:\n angle_decode_labels = tf.py_func(func=angle_label_decode,\n inp=[labels, cfgs.ANGLE_RANGE, cfgs.OMEGA, cfgs.ANGLE_MODE],\n Tout=[tf.float32])\n angle_decode_labels = tf.reshape(angle_decode_labels, [-1, ]) * -1\n\n angle_decode_pred = tf.py_func(func=angle_label_decode,\n inp=[tf.sigmoid(pred), cfgs.ANGLE_RANGE, cfgs.OMEGA, cfgs.ANGLE_MODE],\n Tout=[tf.float32])\n\n angle_decode_pred = tf.reshape(angle_decode_pred, [-1, ]) * -1\n\n target_boxes = tf.reshape(target_boxes[:, :-1], [-1, 5])\n x, y, h, w, theta = tf.unstack(target_boxes, axis=-1)\n aspect_ratio = h / w\n period_weight_90 = tf.cast(tf.less_equal(aspect_ratio, aspect_ratio_threshold), tf.int32) * 2 * 180 / cfgs.ANGLE_RANGE\n period_weight_180 = tf.cast(tf.greater(aspect_ratio, aspect_ratio_threshold), tf.int32) * 1 * 180 / cfgs.ANGLE_RANGE\n\n period_weight = tf.cast(period_weight_90 + period_weight_180, tf.float32)\n diff_weight = tf.reshape(tf.abs(tf.sin(period_weight * (angle_decode_labels - angle_decode_pred))), [-1, 1])\n\n else:\n diff_weight = tf.ones_like(tf.reshape(anchor_state, [-1, 1]))\n\n focal_cross_entropy_loss = (diff_weight * modulating_factor * alpha_weight_factor *\n per_entry_cross_ent)\n\n # compute the normalizer: the number of positive anchors\n # normalizer = tf.stop_gradient(tf.where(tf.greater(anchor_state, -2)))\n normalizer = tf.stop_gradient(tf.where(tf.equal(anchor_state, 1)))\n normalizer = tf.cast(tf.shape(normalizer)[0], tf.float32)\n normalizer = tf.maximum(1.0, normalizer)\n\n # normalizer = tf.stop_gradient(tf.cast(tf.equal(anchor_state, 1), tf.float32))\n # normalizer = tf.maximum(tf.reduce_sum(normalizer), 1)\n\n return tf.reduce_sum(focal_cross_entropy_loss) / normalizer\n\n" ]
[ [ "tensorflow.concat", "tensorflow.unstack", "tensorflow.greater", "tensorflow.stack", "tensorflow.reshape", "tensorflow.ones_like", "tensorflow.gather", "tensorflow.argmax", "tensorflow.py_func" ], [ "tensorflow.constant_initializer", "tensorflow.random_normal_initializer" ], [ "tensorflow.constant_initializer", "tensorflow.random_normal_initializer" ], [ "tensorflow.local_variables_initializer", "tensorflow.constant", "tensorflow.cast", "tensorflow.expand_dims", "tensorflow.placeholder", "numpy.stack", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "numpy.random.rand", "tensorflow.Session", "numpy.array", "numpy.where", "numpy.zeros" ], [ "tensorflow.sin", "tensorflow.unstack", "tensorflow.pow", "tensorflow.shape", "tensorflow.maximum", "tensorflow.reduce_sum", "tensorflow.reshape", "tensorflow.sigmoid", "tensorflow.cast", "tensorflow.equal", "tensorflow.less_equal", "tensorflow.greater", "tensorflow.gather", "tensorflow.py_func" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
mikofski/sktime
[ "87bdf36dbc0990f29942eb6f7fa56a8e6c5fa7b7" ]
[ "sktime/forecasting/base/adapters/_pmdarima.py" ]
[ "# -*- coding: utf-8 -*-\n# !/usr/bin/env python3 -u\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements adapter for pmdarima forecasters to be used in sktime framework.\"\"\"\n\n__author__ = [\"mloning\", \"hyang1996\", \"kejsitake\", \"fkiraly\"]\n__all__ = [\"_PmdArimaAdapter\"]\n\nimport pandas as pd\n\nfrom sktime.forecasting.base import BaseForecaster\nfrom sktime.forecasting.base._base import DEFAULT_ALPHA\n\n\nclass _PmdArimaAdapter(BaseForecaster):\n \"\"\"Base class for interfacing pmdarima.\"\"\"\n\n _tags = {\n \"ignores-exogeneous-X\": False,\n \"capability:pred_int\": True,\n \"requires-fh-in-fit\": False,\n \"handles-missing-data\": False,\n }\n\n def __init__(self):\n self._forecaster = None\n super(_PmdArimaAdapter, self).__init__()\n\n def _instantiate_model(self):\n raise NotImplementedError(\"abstract method\")\n\n def _fit(self, y, X=None, fh=None, **fit_params):\n \"\"\"Fit to training data.\n\n Parameters\n ----------\n y : pd.Series\n Target time series to which to fit the forecaster.\n fh : int, list, np.array or ForecastingHorizon, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._forecaster = self._instantiate_model()\n self._forecaster.fit(y, X=X, **fit_params)\n return self\n\n def _predict(self, fh, X=None):\n \"\"\"Make forecasts.\n\n Parameters\n ----------\n fh : array-like\n The forecasters horizon with the steps ahead to to predict.\n Default is\n one-step ahead forecast, i.e. np.array([1]).\n\n Returns\n -------\n y_pred : pandas.Series\n Returns series of predicted values.\n \"\"\"\n # distinguish between in-sample and out-of-sample prediction\n fh_oos = fh.to_out_of_sample(self.cutoff)\n fh_ins = fh.to_in_sample(self.cutoff)\n\n # all values are out-of-sample\n if fh.is_all_out_of_sample(self.cutoff):\n return self._predict_fixed_cutoff(fh_oos, X=X)\n\n # all values are in-sample\n elif fh.is_all_in_sample(self.cutoff):\n return self._predict_in_sample(fh_ins, X=X)\n\n # both in-sample and out-of-sample values\n else:\n y_ins = self._predict_in_sample(fh_ins, X=X)\n y_oos = self._predict_fixed_cutoff(fh_oos, X=X)\n return y_ins.append(y_oos)\n\n def _predict_in_sample(\n self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA\n ):\n \"\"\"Generate in sample predictions.\n\n Parameters\n ----------\n fh : array-like\n The forecasters horizon with the steps ahead to to predict.\n Default is\n one-step ahead forecast, i.e. np.array([1]).\n\n Returns\n -------\n y_pred : pandas.Series\n Returns series of predicted values.\n \"\"\"\n if hasattr(self, \"order\"):\n diff_order = self.order[1]\n else:\n diff_order = self._forecaster.model_.order[1]\n\n # Initialize return objects\n fh_abs = fh.to_absolute(self.cutoff).to_numpy()\n fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)\n y_pred = pd.Series(index=fh_abs)\n\n # for in-sample predictions, pmdarima requires zero-based integer indicies\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n if start < 0:\n # Can't forecasts earlier to train starting point\n raise ValueError(\"Can't make predictions earlier to train starting point\")\n elif start < diff_order:\n # Can't forecasts earlier to arima's differencing order\n # But we return NaN for these supposedly forecastable points\n start = diff_order\n if end < start:\n # since we might have forced `start` to surpass `end`\n end = diff_order\n # get rid of unforcastable points\n fh_abs = fh_abs[fh_idx >= diff_order]\n # reindex accordingly\n fh_idx = fh_idx[fh_idx >= diff_order] - diff_order\n\n result = self._forecaster.predict_in_sample(\n start=start,\n end=end,\n X=X,\n return_conf_int=False,\n alpha=DEFAULT_ALPHA,\n )\n\n if return_pred_int:\n pred_ints = []\n for a in alpha:\n pred_int = pd.DataFrame(index=fh_abs, columns=[\"lower\", \"upper\"])\n result = self._forecaster.predict_in_sample(\n start=start,\n end=end,\n X=X,\n return_conf_int=return_pred_int,\n alpha=a,\n )\n pred_int.loc[fh_abs] = result[1][fh_idx, :]\n pred_ints.append(pred_int)\n # unpack results\n y_pred.loc[fh_abs] = result[0][fh_idx]\n return y_pred, pred_ints\n else:\n y_pred.loc[fh_abs] = result[fh_idx]\n return y_pred\n\n def _predict_fixed_cutoff(\n self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA\n ):\n \"\"\"Make predictions out of sample.\n\n Parameters\n ----------\n fh : array-like\n The forecasters horizon with the steps ahead to to predict.\n Default is\n one-step ahead forecast, i.e. np.array([1]).\n\n Returns\n -------\n y_pred : pandas.Series\n Returns series of predicted values.\n \"\"\"\n n_periods = int(fh.to_relative(self.cutoff)[-1])\n result = self._forecaster.predict(\n n_periods=n_periods,\n X=X,\n return_conf_int=False,\n alpha=DEFAULT_ALPHA,\n )\n\n fh_abs = fh.to_absolute(self.cutoff)\n fh_idx = fh.to_indexer(self.cutoff)\n if return_pred_int:\n pred_ints = []\n for a in alpha:\n result = self._forecaster.predict(\n n_periods=n_periods,\n X=X,\n return_conf_int=True,\n alpha=a,\n )\n pred_int = result[1]\n pred_int = pd.DataFrame(\n pred_int[fh_idx, :], index=fh_abs, columns=[\"lower\", \"upper\"]\n )\n pred_ints.append(pred_int)\n return result[0], pred_ints\n else:\n return pd.Series(result[fh_idx], index=fh_abs)\n\n def _predict_interval(self, fh, X=None, coverage=0.90):\n \"\"\"Compute/return prediction quantiles for a forecast.\n\n private _predict_interval containing the core logic,\n called from predict_interval and possibly predict_quantiles\n\n State required:\n Requires state to be \"fitted\".\n\n Accesses in self:\n Fitted model attributes ending in \"_\"\n self.cutoff\n\n Parameters\n ----------\n fh : int, list, np.array or ForecastingHorizon\n Forecasting horizon, default = y.index (in-sample forecast)\n X : pd.DataFrame, optional (default=None)\n Exogenous time series\n coverage : list of float (guaranteed not None and floats in [0,1] interval)\n nominal coverage(s) of predictive interval(s)\n\n Returns\n -------\n pred_int : pd.DataFrame\n Column has multi-index: first level is variable name from y in fit,\n second level coverage fractions for which intervals were computed.\n in the same order as in input `coverage`.\n Third level is string \"lower\" or \"upper\", for lower/upper interval end.\n Row index is fh. Entries are forecasts of lower/upper interval end,\n for var in col index, at nominal coverage in second col index,\n lower/upper depending on third col index, for the row index.\n Upper/lower interval end forecasts are equivalent to\n quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.\n \"\"\"\n # initializaing cutoff and fh related info\n cutoff = self.cutoff\n fh_oos = fh.to_out_of_sample(cutoff)\n fh_ins = fh.to_in_sample(cutoff)\n fh_is_in_sample = fh.is_all_in_sample(cutoff)\n fh_is_oosample = fh.is_all_out_of_sample(cutoff)\n\n # prepare the return DataFrame - empty with correct cols\n var_names = [\"Coverage\"]\n int_idx = pd.MultiIndex.from_product([var_names, coverage, [\"lower\", \"upper\"]])\n pred_int = pd.DataFrame(columns=int_idx)\n\n kwargs = {\"X\": X, \"return_pred_int\": True, \"alpha\": coverage}\n # all values are out-of-sample\n if fh_is_oosample:\n _, y_pred_int = self._predict_fixed_cutoff(fh_oos, **kwargs)\n\n # all values are in-sample\n elif fh_is_in_sample:\n _, y_pred_int = self._predict_in_sample(fh_ins, **kwargs)\n\n # if all in-sample/out-of-sample, we put y_pred_int in the required format\n if fh_is_in_sample or fh_is_oosample:\n # needs to be replaced, also seems duplicative, identical to part A\n for intervals, a in zip(y_pred_int, coverage):\n pred_int[(\"Coverage\", a, \"lower\")] = intervals[\"lower\"]\n pred_int[(\"Coverage\", a, \"upper\")] = intervals[\"upper\"]\n return pred_int\n\n # both in-sample and out-of-sample values (we reach this line only then)\n # in this case, we additionally need to concat in and out-of-sample returns\n _, y_ins_pred_int = self._predict_in_sample(fh_ins, **kwargs)\n _, y_oos_pred_int = self._predict_fixed_cutoff(fh_oos, **kwargs)\n for ins_int, oos_int, a in zip(y_ins_pred_int, y_oos_pred_int, coverage):\n pred_int[(\"Coverage\", a, \"lower\")] = ins_int.append(oos_int)[\"lower\"]\n pred_int[(\"Coverage\", a, \"upper\")] = ins_int.append(oos_int)[\"upper\"]\n\n return pred_int\n\n def get_fitted_params(self):\n \"\"\"Get fitted parameters.\n\n Returns\n -------\n fitted_params : dict\n \"\"\"\n self.check_is_fitted()\n names = self._get_fitted_param_names()\n params = self._get_fitted_params()\n fitted_params = {name: param for name, param in zip(names, params)}\n\n if hasattr(self._forecaster, \"model_\"): # AutoARIMA\n fitted_params[\"order\"] = self._forecaster.model_.order\n fitted_params[\"seasonal_order\"] = self._forecaster.model_.seasonal_order\n res = self._forecaster.model_.arima_res_\n elif hasattr(self._forecaster, \"arima_res_\"): # ARIMA\n res = self._forecaster.arima_res_\n else:\n res = None\n\n for name in [\"aic\", \"aicc\", \"bic\", \"hqic\"]:\n fitted_params[name] = getattr(res, name, None)\n\n return fitted_params\n\n def _get_fitted_params(self):\n # Return parameter values under `arima_res_`\n if hasattr(self._forecaster, \"model_\"): # AutoARIMA\n return self._forecaster.model_.arima_res_._results.params\n elif hasattr(self._forecaster, \"arima_res_\"): # ARIMA\n return self._forecaster.arima_res_._results.params\n else:\n raise NotImplementedError()\n\n def _get_fitted_param_names(self):\n # Return parameter names under `arima_res_`\n if hasattr(self._forecaster, \"model_\"): # AutoARIMA\n return self._forecaster.model_.arima_res_._results.param_names\n elif hasattr(self._forecaster, \"arima_res_\"): # ARIMA\n return self._forecaster.arima_res_._results.param_names\n else:\n raise NotImplementedError()\n\n def summary(self):\n \"\"\"Summary of the fitted model.\"\"\"\n return self._forecaster.summary()\n" ]
[ [ "pandas.MultiIndex.from_product", "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
nalderto/otter-grader
[ "a4714bf48df07b7eb8b3c41530ce7a778fd42c98" ]
[ "test/test_grade.py" ]
[ "#################################\n##### Tests for otter grade #####\n#################################\n\nimport os\nimport unittest\nimport subprocess\nimport json\nimport re\nimport pandas as pd\n\nfrom unittest import mock\nfrom subprocess import PIPE\nfrom glob import glob\n\nfrom otter.argparser import get_parser\nfrom otter.grade import main as grade\nfrom otter.grade.metadata import GradescopeParser, CanvasParser, JSONParser, YAMLParser\n\nfrom . import TestCase\n\nparser = get_parser()\n\nTEST_FILES_PATH = \"test/test-grade/\"\n\nclass TestGrade(TestCase):\n \n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n \n create_image_cmd = [\"make\", \"docker-test\"]\n subprocess.run(create_image_cmd, check=True)\n # create_image = subprocess.run(create_image_cmd, check=True)\n # assert not create_image.stderr, create_image.stderr.decode(\"utf-8\")\n \n def setUp(self):\n \"\"\"\n Load in point values\n \"\"\"\n self.test_points = {}\n for test_file in glob(TEST_FILES_PATH + \"tests/*.py\"):\n env = {}\n with open(test_file) as f:\n exec(f.read(), env)\n self.test_points[env['test']['name']] = env['test']['points']\n return super().setUp()\n\n\n def test_docker(self):\n \"\"\"\n Check that we have the right container installed and that docker is running\n \"\"\"\n # use docker image inspect to see that the image is installed and tagged as otter-grader\n inspect = subprocess.run([\"docker\", \"image\", \"inspect\", \"otter-test\"], stdout=PIPE, stderr=PIPE)\n\n # assert that it didn't fail, it will fail if it is not installed\n self.assertEqual(len(inspect.stderr), 0, inspect.stderr.decode(\"utf-8\"))\n\n\n def test_metadata_parsers(self):\n \"\"\"\n Check that metadata parsers work correctly\n \"\"\"\n correct_metadata = [\n {\n \"identifier\": \"12345\",\n \"filename\": \"12345_empty_file.ipynb\"\n }, {\n \"identifier\": \"23456\",\n \"filename\": \"23456_empty_file.ipynb\"\n }, {\n \"identifier\": \"34567\",\n \"filename\": \"34567_empty_file.ipynb\"\n }, {\n \"identifier\": \"45678\",\n \"filename\": \"45678_empty_file.ipynb\"\n }, {\n \"identifier\": \"56789\",\n \"filename\": \"56789_empty_file.ipynb\"\n }\n ]\n\n correct_file_to_id = {\n \"12345_empty_file.ipynb\": \"12345\",\n \"23456_empty_file.ipynb\": \"23456\",\n \"34567_empty_file.ipynb\": \"34567\",\n \"45678_empty_file.ipynb\": \"45678\",\n \"56789_empty_file.ipynb\": \"56789\",\n }\n\n correct_id_to_file = {\n \"12345\": \"12345_empty_file.ipynb\",\n \"23456\": \"23456_empty_file.ipynb\",\n \"34567\": \"34567_empty_file.ipynb\",\n \"45678\": \"45678_empty_file.ipynb\",\n \"56789\": \"56789_empty_file.ipynb\",\n }\n\n correct_filenames = [\n \"12345_empty_file.ipynb\",\n \"23456_empty_file.ipynb\",\n \"34567_empty_file.ipynb\",\n \"45678_empty_file.ipynb\",\n \"56789_empty_file.ipynb\",\n ]\n\n correct_identifiers = [\n \"12345\",\n \"23456\",\n \"34567\",\n \"45678\",\n \"56789\",\n ]\n\n try:\n # gradescope parser\n gs_parser = GradescopeParser(TEST_FILES_PATH + \"gradescope-export\")\n self.assertCountEqual(gs_parser.get_metadata(), correct_metadata)\n self.assertCountEqual(gs_parser.get_filenames(), correct_filenames)\n self.assertCountEqual(gs_parser.get_identifiers(), correct_identifiers)\n for file, identifier in zip(correct_file_to_id, correct_id_to_file):\n self.assertEqual(correct_file_to_id[file], gs_parser.file_to_id(file))\n self.assertEqual(correct_id_to_file[identifier], gs_parser.id_to_file(identifier))\n\n # canvas parser\n canvas_parser = CanvasParser(TEST_FILES_PATH + \"canvas-export\")\n self.assertCountEqual(canvas_parser.get_metadata(), correct_metadata)\n self.assertCountEqual(canvas_parser.get_filenames(), correct_filenames)\n self.assertCountEqual(canvas_parser.get_identifiers(), correct_identifiers)\n for file, identifier in zip(correct_file_to_id, correct_id_to_file):\n self.assertEqual(correct_file_to_id[file], canvas_parser.file_to_id(file))\n self.assertEqual(correct_id_to_file[identifier], canvas_parser.id_to_file(identifier))\n\n # JSON parser\n json_parser = JSONParser(TEST_FILES_PATH + \"meta.json\")\n self.assertCountEqual(json_parser.get_metadata(), correct_metadata)\n self.assertCountEqual(json_parser.get_filenames(), correct_filenames)\n self.assertCountEqual(json_parser.get_identifiers(), correct_identifiers)\n for file, identifier in zip(correct_file_to_id, correct_id_to_file):\n self.assertEqual(correct_file_to_id[file], json_parser.file_to_id(file))\n self.assertEqual(correct_id_to_file[identifier], json_parser.id_to_file(identifier))\n\n # YAML parser\n yaml_parser = YAMLParser(TEST_FILES_PATH + \"meta.yml\")\n self.assertCountEqual(yaml_parser.get_metadata(), correct_metadata)\n self.assertCountEqual(yaml_parser.get_filenames(), correct_filenames)\n self.assertCountEqual(yaml_parser.get_identifiers(), correct_identifiers)\n for file, identifier in zip(correct_file_to_id, correct_id_to_file):\n self.assertEqual(correct_file_to_id[file], yaml_parser.file_to_id(file))\n self.assertEqual(correct_id_to_file[identifier], yaml_parser.id_to_file(identifier))\n\n # cleanup\n gs_rm = subprocess.run([\"rm\", \"-rf\"] + glob(TEST_FILES_PATH + \"gradescope-export/*.ipynb\"), stdout=PIPE, stderr=PIPE)\n self.assertEqual(len(gs_rm.stderr), 0, gs_rm.stderr.decode(\"utf-8\"))\n\n except:\n # cleanup\n gs_rm = subprocess.run([\"rm\", \"-rf\"] + glob(TEST_FILES_PATH + \"gradescope-export/*.ipynb\"), stdout=PIPE, stderr=PIPE)\n self.assertEqual(len(gs_rm.stderr), 0, gs_rm.stderr.decode(\"utf-8\"))\n raise\n\n\n def test_notebooks(self):\n \"\"\"\n Check that the example of 100 notebooks runs correctely locally.\n \"\"\"\n # grade the 100 notebooks\n grade_command = [\"grade\",\n # NO METADATA PASSED, test case when no metadata provided\n # \"-y\", TEST_FILES_PATH + \"notebooks/meta.yml\", \n \"-p\", TEST_FILES_PATH + \"notebooks/\", \n \"-t\", TEST_FILES_PATH + \"tests/\", \n \"-r\", TEST_FILES_PATH + \"requirements.txt\",\n \"-o\", \"test/\",\n \"--image\", \"otter-test\",\n \"-v\"\n ]\n args = parser.parse_args(grade_command)\n args.func = grade\n args.func(args)\n\n # read the output and expected output\n df_test = pd.read_csv(\"test/final_grades.csv\")\n self.assertTrue(\"identifier\" not in df_test.columns, \"did not drop identifier column when no metadata passed\")\n\n # sort by filename\n df_test = df_test.sort_values(\"file\").reset_index(drop=True)\n df_test[\"failures\"] = df_test[\"file\"].apply(lambda x: [int(n) for n in re.split(r\"\\D+\", x) if len(n) > 0])\n\n # check point values\n for _, row in df_test.iterrows():\n for test in self.test_points:\n if int(re.sub(r\"\\D\", \"\", test)) in row[\"failures\"]:\n self.assertEqual(row[test], 0, \"{} supposed to fail {} but passed\".format(row[\"file\"], test))\n else:\n self.assertEqual(row[test], self.test_points[test], \"{} supposed to pass {} but failed\".format(row[\"file\"], test))\n\n # df_correct = pd.read_csv(TEST_FILES_PATH + \"final_grades_correct_notebooks.csv\").sort_values(\"identifier\").reset_index(drop=True)\n\n # # assert the dataframes are as expected\n # self.assertTrue(df_test.equals(df_correct), \"Dataframes not equal\")\n\n # remove the extra output\n cleanup_command = [\"rm\", \"test/final_grades.csv\"]\n cleanup = subprocess.run(cleanup_command, stdout=PIPE, stderr=PIPE)\n\n # assert cleanup worked\n self.assertEqual(len(cleanup.stderr), 0, \"Error in cleanup\")\n\n\n def test_notebooks_with_pdfs(self):\n \"\"\"\n Check that the example of 100 notebooks runs correctely locally.\n \"\"\"\n # grade the 100 notebooks\n grade_command = [\"grade\",\n \"-y\", TEST_FILES_PATH + \"notebooks/meta.yml\", \n \"-p\", TEST_FILES_PATH + \"notebooks/\", \n \"-t\", TEST_FILES_PATH + \"tests/\", \n \"-r\", TEST_FILES_PATH + \"requirements.txt\",\n \"-o\", \"test/\",\n \"--pdfs\",\n \"--containers\", \"5\",\n \"--image\", \"otter-test\"\n ]\n args = parser.parse_args(grade_command)\n args.func = grade\n args.func(args)\n\n # check that we have PDFs\n self.assertTrue(os.path.isdir(\"test/submission_pdfs\"))\n for file in glob(TEST_FILES_PATH + \"notebooks/*.ipynb\"):\n pdf = \"test/submission_pdfs/\" + os.path.split(file)[1][:-5] + \"pdf\"\n self.assertTrue(os.path.isfile(pdf))\n\n # remove the extra output\n cleanup_command = [\"rm\", \"-rf\", \"test/final_grades.csv\", \"test/submission_pdfs\"]\n cleanup = subprocess.run(cleanup_command, stdout=PIPE, stderr=PIPE)\n self.assertEqual(len(cleanup.stderr), 0, cleanup.stderr.decode(\"utf-8\"))\n\n\n def test_scripts(self):\n \"\"\"\n Check that the example of 100 scripts runs correctely locally.\n \"\"\"\n grade_command = [\"grade\",\n \"-sy\", TEST_FILES_PATH + \"scripts/meta.yml\", \n \"-p\", TEST_FILES_PATH + \"scripts/\", \n \"-t\", TEST_FILES_PATH + \"tests/\", \n \"-r\", TEST_FILES_PATH + \"requirements.txt\",\n \"-o\", \"test/\",\n \"--image\", \"otter-test\"\n ]\n args = parser.parse_args(grade_command)\n args.func = grade\n args.func(args)\n\n # read the output and expected output\n df_test = pd.read_csv(\"test/final_grades.csv\").sort_values(\"identifier\").reset_index(drop=True)\n df_test[\"failures\"] = df_test[\"identifier\"].apply(lambda x: [int(n) for n in re.split(r\"\\D+\", x) if len(n) > 0])\n\n # check point values\n for _, row in df_test.iterrows():\n for test in self.test_points:\n if int(re.sub(r\"\\D\", \"\", test)) in row[\"failures\"]:\n self.assertEqual(row[test], 0, \"{} supposed to fail {} but passed\".format(row[\"identifier\"], test))\n else:\n self.assertEqual(row[test], self.test_points[test], \"{} supposed to pass {} but failed\".format(row[\"identifier\"], test))\n \n # grade the 100 scripts\n \n # args = parser.parse_args(grade_command)\n # args.func = grade\n # args.func(args)\n\n # # read the output and expected output\n # df_test = pd.read_csv(\"test/final_grades.csv\").sort_values(\"identifier\").reset_index(drop=True)\n # df_correct = pd.read_csv(TEST_FILES_PATH + \"final_grades_correct_script.csv\").sort_values(\"identifier\").reset_index(drop=True)\n\n # # assert the dataframes are as expected\n # self.assertTrue(df_test.equals(df_correct), \"Dataframes not equal\")\n\n # remove the extra output\n cleanup_command = [\"rm\", \"test/final_grades.csv\"]\n cleanup = subprocess.run(cleanup_command, stdout=PIPE, stderr=PIPE)\n\n # assert cleanup worked\n self.assertEqual(len(cleanup.stderr), 0, \"Error in cleanup\")\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
cjhsieh/pecos
[ "6d5a657945f0a70f13dcf3afec224713cd2deb4d" ]
[ "pecos/core/base.py" ]
[ "# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance\n# with the License. A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n# OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions\n# and limitations under the License.\nimport copy\nimport ctypes\nimport logging\nimport os\nfrom ctypes import (\n CDLL,\n CFUNCTYPE,\n POINTER,\n byref,\n c_bool,\n c_char_p,\n c_double,\n c_float,\n c_int,\n c_int32,\n c_uint32,\n c_uint64,\n c_void_p,\n cast,\n)\nfrom glob import glob\nfrom subprocess import check_output\n\nimport numpy as np\nimport pecos\nimport scipy.sparse as smat\nfrom pecos.utils import smat_util\n\nLOGGER = logging.getLogger(\"__name__\")\n\nXLINEAR_SOLVERS = {\"L2R_L2LOSS_SVC_DUAL\": 1, \"L2R_L1LOSS_SVC_DUAL\": 3, \"L2R_LR_DUAL\": 7}\nXLINEAR_INFERENCE_MODEL_TYPES = {\"CSC\": 0, \"HASH_CHUNKED\": 1, \"BINARY_SEARCH_CHUNKED\": 2}\nTFIDF_TOKENIZER_CODES = {\"word\": 10, \"char\": 20, \"char_wb\": 30}\n\n\nclass TfidfBaseVectorizerParam(ctypes.Structure):\n \"\"\"\n python class for handling struct TfidfBaseVectorizerParam in tfidf.hpp\n \"\"\"\n\n _fields_ = [\n (\"min_ngram\", c_int32),\n (\"max_ngram\", c_int32),\n (\"max_length\", c_int32),\n (\"max_feature\", c_int32),\n (\"min_df_ratio\", c_float),\n (\"max_df_ratio\", c_float),\n (\"min_df_cnt\", c_int32),\n (\"max_df_cnt\", c_int32),\n (\"binary\", c_bool),\n (\"use_idf\", c_bool),\n (\"smooth_idf\", c_bool),\n (\"sublinear_tf\", c_bool),\n (\"keep_frequent_feature\", c_bool),\n (\"norm_p\", c_int32),\n (\"tok_type\", c_int32),\n ]\n\n DEFAULTS = {\n \"min_ngram\": 1,\n \"max_ngram\": 1,\n \"max_length\": -1,\n \"max_feature\": 0,\n \"min_df_ratio\": 0.0,\n \"max_df_ratio\": 1.0,\n \"min_df_cnt\": 0,\n \"max_df_cnt\": -1,\n \"binary\": False,\n \"use_idf\": True,\n \"smooth_idf\": True,\n \"sublinear_tf\": False,\n \"keep_frequent_feature\": True,\n \"norm_p\": 2,\n \"tok_type\": TFIDF_TOKENIZER_CODES[\"word\"],\n }\n\n @classmethod\n def get_default(cls, name):\n return copy.deepcopy(cls.DEFAULTS[name])\n\n def __init__(self, config_dict=None):\n if config_dict is None:\n config_dict = {}\n\n def extract_dict_key(config_dict, key, alias):\n return config_dict.get(key, config_dict.get(alias, self.get_default(key)))\n\n config_dict[\"norm_p\"] = extract_dict_key(config_dict, \"norm_p\", \"norm\")\n # to support norm_p being \"l1\" or \"l2\"\n if isinstance(config_dict[\"norm_p\"], str):\n config_dict[\"norm_p\"] = int(config_dict[\"norm_p\"][1:])\n if not (config_dict[\"norm_p\"] == 1 or config_dict[\"norm_p\"] == 2):\n raise NotImplementedError(\"norm_p only support 1 or 2\")\n\n config_dict[\"tok_type\"] = extract_dict_key(config_dict, \"tok_type\", \"analyzer\")\n if isinstance(config_dict[\"tok_type\"], str):\n config_dict[\"tok_type\"] = TFIDF_TOKENIZER_CODES[config_dict[\"tok_type\"]]\n\n config_dict[\"max_length\"] = extract_dict_key(config_dict, \"max_length\", \"truncate_length\")\n\n if \"ngram_range\" in config_dict:\n config_dict[\"min_ngram\"] = config_dict[\"ngram_range\"][0]\n config_dict[\"max_ngram\"] = config_dict[\"ngram_range\"][1]\n\n name2type = dict(TfidfBaseVectorizerParam._fields_)\n for name in name2type:\n setattr(self, name, name2type[name](config_dict.get(name, self.get_default(name))))\n\n\nclass TfidfVectorizerParam(ctypes.Structure):\n \"\"\"\n python class for handling struct TfidfVectorizerParam in tfidf.hpp\n \"\"\"\n\n _fields_ = [\n (\"base_param_ptr\", POINTER(TfidfBaseVectorizerParam)),\n (\"num_base_vect\", c_int32),\n (\"norm_p\", c_int32),\n ]\n\n def __init__(self, base_vect_param_list, norm_p):\n\n self.num_base_vect = len(base_vect_param_list)\n self.c_base_params = (TfidfBaseVectorizerParam * self.num_base_vect)()\n for i, base_vect_param in enumerate(base_vect_param_list):\n self.c_base_params[i] = base_vect_param\n\n self.base_param_ptr = cast(self.c_base_params, POINTER(TfidfBaseVectorizerParam))\n self.num_base_vect = c_int32(self.num_base_vect)\n self.norm_p = c_int32(norm_p)\n\n\nclass ScipyCscF32(ctypes.Structure):\n \"\"\"\n PyMatrix for scipy.sparse.csc_matrix\n \"\"\"\n\n _fields_ = [\n (\"rows\", c_uint32),\n (\"cols\", c_uint32),\n (\"col_ptr\", POINTER(c_uint64)),\n (\"row_idx\", POINTER(c_uint32)),\n (\"val\", POINTER(c_float)),\n ]\n\n def __init__(self, A):\n assert isinstance(A, smat.csc_matrix)\n assert A.dtype == np.float32\n self.py_buf = {\n \"col_ptr\": A.indptr.astype(np.uint64, copy=False),\n \"row_idx\": A.indices.astype(np.uint32, copy=False),\n \"val\": A.data.astype(np.float32, copy=False),\n }\n\n self.rows = c_uint32(A.shape[0])\n self.cols = c_uint32(A.shape[1])\n name2type = dict(ScipyCscF32._fields_)\n for name in self.py_buf:\n setattr(self, name, self.py_buf[name].ctypes.data_as(name2type[name]))\n self.buf = A\n\n @property\n def dtype(self):\n return self.buf.dtype\n\n @property\n def shape(self):\n return self.buf.shape\n\n @classmethod\n def init_from(cls, A):\n if A is None:\n return None\n elif isinstance(A, cls):\n return A\n else:\n return cls(A)\n\n\nclass ScipyCsrF32(ctypes.Structure):\n \"\"\"\n PyMatrix for scipy.sparse.csr_matrix\n \"\"\"\n\n _fields_ = [\n (\"rows\", c_uint32),\n (\"cols\", c_uint32),\n (\"row_ptr\", POINTER(c_uint64)),\n (\"col_idx\", POINTER(c_uint32)),\n (\"val\", POINTER(c_float)),\n ]\n\n def __init__(self, A):\n assert isinstance(A, smat.csr_matrix)\n assert A.dtype == np.float32\n self.py_buf = {\n \"row_ptr\": A.indptr.astype(np.uint64, copy=False),\n \"col_idx\": A.indices.astype(np.uint32, copy=False),\n \"val\": A.data.astype(np.float32, copy=False),\n }\n\n self.rows = c_uint32(A.shape[0])\n self.cols = c_uint32(A.shape[1])\n name2type = dict(ScipyCsrF32._fields_)\n for name in self.py_buf:\n setattr(self, name, self.py_buf[name].ctypes.data_as(name2type[name]))\n self.buf = A\n\n @classmethod\n def init_from(cls, A):\n if A is None:\n return None\n elif isinstance(A, cls):\n return A\n else:\n return cls(A)\n\n @property\n def dtype(self):\n return self.buf.dtype\n\n @property\n def shape(self):\n return self.buf.shape\n\n def dot(self, other):\n return self.buf.dot(other)\n\n\nclass ScipyDrmF32(ctypes.Structure):\n \"\"\"\n PyMatrix for row-major scipy.ndarray\n \"\"\"\n\n _fields_ = [(\"rows\", c_uint32), (\"cols\", c_uint32), (\"val\", POINTER(c_float))]\n\n def __init__(self, A):\n assert isinstance(A, np.ndarray)\n assert A.dtype == np.float32\n assert A.flags.c_contiguous is True\n self.py_buf = {\"val\": A}\n\n self.rows = c_uint32(A.shape[0])\n self.cols = c_uint32(A.shape[1])\n name2type = dict(ScipyDrmF32._fields_)\n for name in self.py_buf:\n setattr(self, name, self.py_buf[name].ctypes.data_as(name2type[name]))\n self.buf = A\n\n @classmethod\n def init_from(cls, A):\n if A is None:\n return None\n elif isinstance(A, cls):\n return A\n else:\n return cls(A)\n\n @property\n def dtype(self):\n return self.buf.dtype\n\n @property\n def shape(self):\n return self.buf.shape\n\n def dot(self, other):\n if isinstance(other, smat.spmatrix):\n return other.T.dot(self.buf.T).T\n else:\n return self.buf.dot(other)\n\n\nclass ScipyDcmF32(ctypes.Structure):\n \"\"\"\n PyMatrix for col-major scipy.ndarray\n \"\"\"\n\n _fields_ = [(\"rows\", c_uint32), (\"cols\", c_uint32), (\"val\", POINTER(c_float))]\n\n def __init__(self, A):\n assert isinstance(A, np.ndarray)\n assert A.dtype == np.float32\n assert A.flags.f_contiguous is True\n self.py_buf = {\"val\": A}\n\n self.rows = c_uint32(A.shape[0])\n self.cols = c_uint32(A.shape[1])\n name2type = dict(ScipyDcmF32._fields_)\n for name in self.py_buf:\n setattr(self, name, self.py_buf[name].ctypes.data_as(name2type[name]))\n self.buf = A\n\n @classmethod\n def init_from(cls, A):\n if A is None:\n return None\n elif isinstance(A, cls):\n return A\n else:\n return cls(A)\n\n @property\n def dtype(self):\n return self.buf.dtype\n\n @property\n def shape(self):\n return self.buf.shape\n\n def dot(self, other):\n if isinstance(other, smat.spmatrix):\n return other.T.dot(self.buf.T).T\n else:\n return self.buf.dot(other)\n\n\nclass ScipyCoordinateSparseAllocator(object):\n \"\"\"\n Scipy Coordinate Sparse Matrix Allocator for C++/C code\n \"\"\"\n\n CFUNCTYPE = CFUNCTYPE(None, c_uint32, c_uint32, c_uint64, c_void_p, c_void_p, c_void_p)\n\n def __init__(self, rows=0, cols=0, dtype=np.float64):\n self.rows = rows\n self.cols = cols\n self.row_idx = None\n self.col_idx = None\n self.data = None\n self.dtype = dtype\n assert dtype == np.float32 or dtype == np.float64\n\n def __call__(self, rows, cols, nnz, row_ptr, col_ptr, val_ptr):\n self.rows = rows\n self.cols = cols\n self.row_idx = np.zeros(nnz, dtype=np.uint64)\n self.col_idx = np.zeros(nnz, dtype=np.uint64)\n self.data = np.zeros(nnz, dtype=self.dtype)\n cast(row_ptr, POINTER(c_uint64)).contents.value = self.row_idx.ctypes.data_as(\n c_void_p\n ).value\n cast(col_ptr, POINTER(c_uint64)).contents.value = self.col_idx.ctypes.data_as(\n c_void_p\n ).value\n cast(val_ptr, POINTER(c_uint64)).contents.value = self.data.ctypes.data_as(c_void_p).value\n\n def tocoo(self):\n return smat.coo_matrix(\n (self.data, (self.row_idx, self.col_idx)), shape=(self.rows, self.cols)\n )\n\n def tocsr(self):\n return smat.csr_matrix(\n (self.data, (self.row_idx, self.col_idx)), shape=(self.rows, self.cols)\n )\n\n def tocsc(self):\n return smat.csc_matrix(\n (self.data, (self.row_idx, self.col_idx)), shape=(self.rows, self.cols)\n )\n\n @property\n def cfunc(self):\n return self.CFUNCTYPE(self)\n\n\nclass ScipyCompressedSparseAllocator(object):\n \"\"\"\n Scipy Compressed Sparse Matrix Allocator for C++/C code,\n which supports both smat.csr_matrix and smat.csc_matrix.\n\n Whether it is row or column major is controlled by self.is_col_major,\n which is passed in by the first argument in the __call__().\n\n Attributes:\n CFUNCTYPE (ctypes.CFUNCTYPE): a function prototype creates functions that uses the standard C calling convention\n \"\"\"\n\n CFUNCTYPE = CFUNCTYPE(None, c_bool, c_uint64, c_uint64, c_uint64, c_void_p, c_void_p, c_void_p)\n\n def __init__(self, rows=0, cols=0, dtype=np.float32):\n self.cols = cols\n self.rows = rows\n self.indices = None\n self.indptr = None\n self.data = None\n self.dtype = dtype\n self.is_col_major = None\n assert dtype == np.float32\n\n def __call__(self, is_col_major, rows, cols, nnz, indices_ptr, indptr_ptr, data_ptr):\n \"\"\"\n Allocate memory for the members\n\n Parameters:\n is_col_major (bool): specifying whether the to-be allocated matrix is row-majored or col-majored.\n rows (int): the number of rows of the sparse matrix.\n cols (int): the number of cols of the sparse matrix.\n nnz (int): the number of non-zeros of the sparse matrix.\n indptr_ptr (pointer): the pointer to the nnz array, of length (rows+1) or (cols+1).\n indices_ptr (pointer): the pointer to the row/col indices array, of length nnz.\n data_ptr (pointer): the pointer to the non-zero values array, of length nnz.\n\n Returns:\n None\n \"\"\"\n\n self.cols = cols\n self.rows = rows\n self.is_col_major = is_col_major\n if is_col_major:\n self.indptr = np.zeros(cols + 1, dtype=np.uint64)\n else:\n self.indptr = np.zeros(rows + 1, dtype=np.uint64)\n self.indices = np.zeros(nnz, dtype=np.uint32)\n self.data = np.zeros(nnz, dtype=self.dtype)\n\n cast(indices_ptr, POINTER(c_uint64)).contents.value = self.indices.ctypes.data_as(\n c_void_p\n ).value\n cast(indptr_ptr, POINTER(c_uint64)).contents.value = self.indptr.ctypes.data_as(\n c_void_p\n ).value\n cast(data_ptr, POINTER(c_uint64)).contents.value = self.data.ctypes.data_as(c_void_p).value\n\n def get(self):\n if self.is_col_major:\n return smat_util.csc_matrix(\n (self.data, self.indices, self.indptr), shape=(self.rows, self.cols)\n )\n else:\n return smat_util.csr_matrix(\n (self.data, self.indices, self.indptr), shape=(self.rows, self.cols)\n )\n\n @property\n def cfunc(self):\n return self.CFUNCTYPE(self)\n\n\nclass corelib(object):\n \"\"\"\n The core functions for linear problems\n \"\"\"\n\n @staticmethod\n def fillprototype(f, restype, argtypes):\n \"\"\"\n Specify corelib function's return type and argument types.\n\n Args:\n restype (single or list of ctypes): The return type.\n argtypes (list of ctypes): The argument types.\n \"\"\"\n f.restype = restype\n f.argtypes = argtypes\n\n @staticmethod\n def load_dynamic_library(dirname, soname, forced_rebuild=False):\n \"\"\"\n Load compiled C library into Python.\n If not found, will build upon loading.\n\n Args:\n dirname (str): The directory of C library.\n soname (str): The name of C library.\n force_rebuild (bool, optional): Whether to force rebuild C library upon calling.\n\n Return:\n c_lib (CDLL): Ctypes CDLL library.\n \"\"\"\n try:\n if forced_rebuild:\n check_output(\"make -C {} clean lib\".format(dirname), shell=True)\n path_to_so = glob(os.path.join(dirname, soname) + \"*.so\")[0]\n _c_lib = CDLL(path_to_so)\n except BaseException:\n try:\n check_output(\"make -C {} clean lib\".format(dirname), shell=True)\n path_to_so = glob(os.path.join(dirname, soname) + \"*.so\")[0]\n _c_lib = CDLL(path_to_so)\n except BaseException:\n raise Exception(\"{soname} library cannot be found and built.\".format(soname=soname))\n return _c_lib\n\n def __init__(self, dirname, soname, forced_rebuild=False):\n self.clib_float32 = corelib.load_dynamic_library(\n dirname, soname + \"_float32\", forced_rebuild=forced_rebuild\n )\n self.link_xlinear_methods()\n self.link_sparse_operations()\n self.link_clustering()\n self.link_tfidf_vectorizer()\n\n def link_xlinear_methods(self):\n \"\"\"\n Specify C-lib's Xlinear methods argument and return type.\n \"\"\"\n arg_list = [\n POINTER(ScipyCsrF32), # CSR X\n POINTER(ScipyCscF32), # CSC Y\n POINTER(ScipyCscF32), # CSC C\n POINTER(ScipyCscF32), # CSC M\n POINTER(ScipyCscF32), # CSC R\n ScipyCoordinateSparseAllocator.CFUNCTYPE, # py_coo_allocator\n c_double, # threshold\n c_uint32, # max_nonzeros_per_label\n c_int, # solver_type\n c_double, # Cp\n c_double, # Cn\n c_uint64, # max_iter\n c_double, # eps\n c_double, # bias\n c_int, # threads\n ]\n corelib.fillprototype(\n self.clib_float32.c_xlinear_single_layer_train_csr_f32,\n None,\n [POINTER(ScipyCsrF32)] + arg_list[1:],\n )\n corelib.fillprototype(\n self.clib_float32.c_xlinear_single_layer_train_drm_f32,\n None,\n [POINTER(ScipyDrmF32)] + arg_list[1:],\n )\n\n arg_list = [c_void_p]\n corelib.fillprototype(self.clib_float32.c_xlinear_destruct_model, None, arg_list)\n\n # Interface for sparse prediction\n arg_list = [\n c_void_p,\n POINTER(ScipyCsrF32),\n c_uint32,\n c_char_p,\n c_uint32,\n c_int,\n ScipyCompressedSparseAllocator.CFUNCTYPE,\n ]\n corelib.fillprototype(self.clib_float32.c_xlinear_predict_csr_f32, None, arg_list)\n\n # Interface for dense prediction\n arg_list = [\n c_void_p,\n POINTER(ScipyDrmF32),\n c_uint32,\n c_char_p,\n c_uint32,\n c_int,\n ScipyCompressedSparseAllocator.CFUNCTYPE,\n ]\n corelib.fillprototype(self.clib_float32.c_xlinear_predict_drm_f32, None, arg_list)\n\n # c interface for loading just model tree directly (no tfidf)\n res_list = c_void_p\n arg_list = [c_char_p]\n corelib.fillprototype(self.clib_float32.c_xlinear_load_model_from_disk, res_list, arg_list)\n\n res_list = c_void_p\n arg_list = [c_char_p, c_int]\n corelib.fillprototype(\n self.clib_float32.c_xlinear_load_model_from_disk_ext, res_list, arg_list\n )\n\n # c interface for per-layer prediction\n arg_list = [\n POINTER(ScipyCsrF32),\n POINTER(ScipyCsrF32),\n POINTER(ScipyCscF32),\n POINTER(ScipyCscF32),\n c_char_p,\n c_uint32,\n c_int,\n c_float,\n ScipyCompressedSparseAllocator.CFUNCTYPE,\n ]\n corelib.fillprototype(\n self.clib_float32.c_xlinear_single_layer_predict_csr_f32, None, arg_list\n )\n\n arg_list = [\n POINTER(ScipyDrmF32),\n POINTER(ScipyCsrF32),\n POINTER(ScipyCscF32),\n POINTER(ScipyCscF32),\n c_char_p,\n c_uint32,\n c_int,\n c_float,\n ScipyCompressedSparseAllocator.CFUNCTYPE,\n ]\n corelib.fillprototype(\n self.clib_float32.c_xlinear_single_layer_predict_drm_f32, None, arg_list\n )\n\n res_list = c_uint32\n arg_list = [c_void_p, c_char_p]\n corelib.fillprototype(self.clib_float32.c_xlinear_get_int_attr, res_list, arg_list)\n\n def xlinear_load_predict_only(\n self,\n folder,\n weight_matrix_type=\"BINARY_SEARCH_CHUNKED\",\n ):\n \"\"\"\n Load xlinear model in predict only mode.\n\n Args:\n folder (str): The folder path for xlinear model.\n weight_matrix_type (str, optional): The xlinear inference model types.\n\n Return:\n cmodel (ptr): The pointer to xlinear model.\n \"\"\"\n weight_matrix_type_id = XLINEAR_INFERENCE_MODEL_TYPES[weight_matrix_type]\n cmodel = self.clib_float32.c_xlinear_load_model_from_disk_ext(\n c_char_p(folder.encode(\"utf-8\")), c_int(int(weight_matrix_type_id))\n )\n return cmodel\n\n def xlinear_destruct_model(self, c_model):\n \"\"\"\n Destruct xlinear model.\n\n Args:\n cmodel (ptr): The pointer to xlinear model.\n \"\"\"\n self.clib_float32.c_xlinear_destruct_model(c_model)\n\n def xlinear_predict(\n self,\n c_model,\n X,\n overriden_beam_size,\n overriden_post_processor_str,\n overriden_only_topk,\n threads,\n pred_alloc,\n ):\n \"\"\"\n Performs a full prediction using the given model and queries.\n\n Args:\n c_model (c_pointer): A C pointer to the model to use for prediction. This pointer\n is returned by the c_load_xlinear_model_from_disk and\n c_load_xlinear_model_from_disk_ext functions in corelib.clib_float32.\n X: The query matrix (admissible formats are smat.csr_matrix,\n np.ndarray, ScipyCsrF32, or ScipyDrmF32). Note that if this is smat.csr_matrix,\n the matrix must have sorted indices. You can call sort_indices() to ensure this.\n overriden_beam_size (uint): Overrides the beam size to use for prediction. Use None for\n model defaults.\n overriden_post_processor_str (string): Overrides the post processor to use by name. Use\n None for model defaults.\n overriden_only_topk (uint): Overrides the number of results to return for each query. Use\n None for model defaults.\n threads (int): Sets the number of threads to use in computation. Use\n -1 to use the maximum amount of available threads.\n pred_alloc (ScipyCompressedSparseAllocator): The allocator to store the result in.\n \"\"\"\n clib = self.clib_float32\n\n if isinstance(X, smat.csr_matrix):\n if not X.has_sorted_indices:\n raise ValueError(\"Query matrix does not have sorted indices!\")\n X = ScipyCsrF32.init_from(X)\n elif isinstance(X, np.ndarray):\n X = ScipyDrmF32.init_from(X)\n\n if isinstance(X, ScipyCsrF32):\n c_predict = clib.c_xlinear_predict_csr_f32\n elif isinstance(X, ScipyDrmF32):\n c_predict = clib.c_xlinear_predict_drm_f32\n else:\n raise NotImplementedError(\"type(X) = {} not implemented\".format(type(X)))\n\n c_predict(\n c_model,\n byref(X),\n overriden_beam_size if overriden_beam_size else 0,\n overriden_post_processor_str.encode(\"utf-8\") if overriden_post_processor_str else None,\n overriden_only_topk if overriden_only_topk else 0,\n threads,\n pred_alloc.cfunc,\n )\n\n def xlinear_single_layer_predict(\n self,\n X,\n csr_codes,\n W,\n C,\n post_processor_str,\n only_topk,\n num_threads,\n bias,\n pred_alloc,\n ):\n \"\"\"\n Performs a single layer prediction in C++ using matrices owned by Python.\n\n Args:\n X (csr_matrix): The query matrix.\n Note that if this is smat.csr_matrix, the matrix must have sorted indices.\n You can call sort_indices() to ensure this.\n csr_codes (smat.csr_matrix or ScipyCsrF32): The prediction for the previous layer, None if this is the first layer.\n W (smat.csc_matrix, ScipyCscF32): The weight matrix for this layer.\n C (smat.csc_matrix, ScipyCscF32): The child/parent map for this layer.\n post_processor_str (str): A string specifying which post processor to use.\n only_topk (uint): How many results to return for each query.\n num_threads (uint): How many threads to use in this computation. Set to -1 to use defaults.\n bias (float): The bias of the model.\n pred_alloc (ScipyCompressedSparseAllocator): The allocator to store the result in.\n \"\"\"\n clib = self.clib_float32\n\n post_processor_str = post_processor_str.encode(\"utf-8\")\n\n W = ScipyCscF32.init_from(W)\n\n if isinstance(X, smat.csr_matrix):\n if not X.has_sorted_indices:\n raise ValueError(\"Query matrix does not have sorted indices!\")\n X = ScipyCsrF32.init_from(X)\n elif isinstance(X, np.ndarray):\n X = ScipyDrmF32.init_from(X)\n\n if isinstance(X, ScipyCsrF32):\n c_single_layer_predict = clib.c_xlinear_single_layer_predict_csr_f32\n elif isinstance(X, ScipyDrmF32):\n c_single_layer_predict = clib.c_xlinear_single_layer_predict_drm_f32\n else:\n raise NotImplementedError(\"type(X) = {} not implemented\".format(type(X)))\n\n # csr_codes and pC might be null\n if csr_codes is not None:\n csr_codes = ScipyCsrF32.init_from(csr_codes)\n\n if C is None:\n C = smat.csc_matrix(np.ones((W.shape[1], 1), dtype=W.dtype))\n C = ScipyCscF32.init_from(C)\n\n c_single_layer_predict(\n byref(X),\n byref(csr_codes) if csr_codes is not None else None,\n byref(W),\n byref(C),\n post_processor_str,\n only_topk,\n num_threads,\n bias,\n pred_alloc.cfunc,\n )\n\n def xlinear_single_layer_train(\n self,\n pX,\n pY,\n pC,\n pM,\n pR,\n threshold=0.1,\n max_nonzeros_per_label=None,\n solver_type=\"L2R_L2LOSS_SVC_DUAL\",\n Cp=1.0,\n Cn=1.0,\n max_iter=1000,\n eps=0.1,\n bias=1.0,\n threads=-1,\n verbose=0,\n **kwargs,\n ):\n \"\"\"\n Performs a single layer training in C++ using matrices owned by Python.\n\n Args:\n pX (ScipyCsrF32 or ScipyDrmF32): Instance feature matrix of shape (nr_inst, nr_feat).\n pY (ScipyCscF32): Label matrix of shape (nr_inst, nr_labels).\n pC (ScipyCscF32): Single matrix from clustering chain, representing a hierarchical clustering.\n pM (ScipyCsrF32): Single matrix from matching chain.\n pR (ScipyCscF32): Relevance matrix for cost-sensitive learning, of shape (nr_inst, nr_labels).\n threshold (float, optional): sparsify the final model by eliminating all entrees with abs value less than threshold.\n Default to 0.1.\n max_nonzeros_per_label (int, optional): keep at most NONZEROS weight parameters per label in model.\n Default None to set to (nr_feat + 1)\n solver_type (string, optional): backend linear solver type.\n Options: L2R_L2LOSS_SVC_DUAL(default), L2R_L1LOSS_SVC_DUAL.\n Cp (float, optional): positive penalty parameter. Defaults to 1.0\n Cn (float, optional): negative penalty parameter. Defaults to 1.0\n max_iter (int, optional): maximum iterations. Defaults to 100\n eps (float, optional): epsilon. Defaults to 0.1\n bias (float, optional): if >0, append the bias value to each instance feature. Defaults to 1.0\n threads (int, optional): the number of threads to use for training. Defaults to -1 to use all\n verbose (int, optional): verbose level. Defaults to 0\n\n Return:\n layer_train_res (smat.csc_matrix): The layer training result.\n \"\"\"\n clib = self.clib_float32\n coo_alloc = ScipyCoordinateSparseAllocator(dtype=np.float32)\n if isinstance(pX, ScipyCsrF32):\n c_xlinear_single_layer_train = clib.c_xlinear_single_layer_train_csr_f32\n elif isinstance(pX, ScipyDrmF32):\n c_xlinear_single_layer_train = clib.c_xlinear_single_layer_train_drm_f32\n else:\n raise NotImplementedError(\"type(pX) = {} not implemented\".format(type(pX)))\n\n c_xlinear_single_layer_train(\n byref(pX),\n byref(pY),\n byref(pC) if pC is not None else None,\n byref(pM) if pM is not None else None,\n byref(pR) if pR is not None else None,\n coo_alloc.cfunc,\n threshold,\n 0 if max_nonzeros_per_label is None else max_nonzeros_per_label,\n XLINEAR_SOLVERS[solver_type],\n Cp,\n Cn,\n max_iter,\n eps,\n bias,\n threads,\n )\n return coo_alloc.tocsc().astype(np.float32)\n\n def xlinear_get_int_attr(self, c_model, attr):\n \"\"\"\n Get int attribute from C xlinear model.\n\n Args:\n c_model (ptr): The C xlinear model pointer.\n attr (str): The attribute name to get.\n\n Return:\n int_attr (int): The int attribute under given name.\n \"\"\"\n assert attr in {\n \"depth\",\n \"nr_features\",\n \"nr_labels\",\n \"nr_codes\",\n }, f\"attr {attr} not implemented\"\n return self.clib_float32.c_xlinear_get_int_attr(c_model, c_char_p(attr.encode(\"utf-8\")))\n\n def link_sparse_operations(self):\n \"\"\"\n Specify C-lib's sparse matrix operation methods argument and return type.\n \"\"\"\n arg_list = [\n POINTER(ScipyCscF32), # pX (should support both CSC and CSR)\n POINTER(ScipyCscF32), # pY (should support both CSC and CSR)\n ScipyCompressedSparseAllocator.CFUNCTYPE, # allocator for pZ\n c_bool, # eliminate_zeros\n c_bool, # sorted_indices\n c_int, # threads\n ]\n corelib.fillprototype(\n self.clib_float32.c_sparse_matmul_csc_f32,\n None,\n [POINTER(ScipyCscF32), POINTER(ScipyCscF32)] + arg_list[2:],\n )\n corelib.fillprototype(\n self.clib_float32.c_sparse_matmul_csr_f32,\n None,\n [POINTER(ScipyCsrF32), POINTER(ScipyCsrF32)] + arg_list[2:],\n )\n\n arg_list = [\n POINTER(ScipyCsrF32), # pX\n POINTER(ScipyCscF32), # pW\n c_uint64, # len\n POINTER(c_uint32), # X_row_idx\n POINTER(c_uint32), # W_col_idx\n POINTER(c_float), # val\n c_int, # threads\n ]\n corelib.fillprototype(\n self.clib_float32.c_sparse_inner_products_csr_f32,\n None,\n [POINTER(ScipyCsrF32)] + arg_list[1:],\n )\n corelib.fillprototype(\n self.clib_float32.c_sparse_inner_products_drm_f32,\n None,\n [POINTER(ScipyDrmF32)] + arg_list[1:],\n )\n\n def sparse_matmul(self, X, Y, eliminate_zeros=False, sorted_indices=True, threads=-1):\n \"\"\"\n Sparse-Sparse matrix multiplication with multithreading (shared-memory).\n\n Args:\n X (smat.csc_matrix, smat.csr_matrix, ScipyCscF32, ScipyCsrF32): The first sparse matrix.\n Y (smat.csc_matrix, smat.csr_matrix, ScipyCscF32, ScipyCsrF32): The second sparse matrix.\n eliminate_zeros (bool, optional): if true, then eliminate (potential) zeros created by maxnnz in output matrix Z. Default is false.\n sorted_indices (bool, optional): if true, then sort the Z.indices for the output matrix Z. Default is true.\n threads (int, optional): The number of threads. Default -1 to use all cores.\n\n Return:\n matmul_res (smat.csc_matrix or smat.csr_matrix): The matrix multiplication results of X and Y\n \"\"\"\n\n if X.shape[1] != Y.shape[0]:\n raise ValueError(\"X.shape[1]={} != Y.shape[0]={}\".format(X.shape[1], Y.shape[0]))\n\n clib = self.clib_float32\n pred_alloc = ScipyCompressedSparseAllocator()\n\n def is_col_major(X):\n return isinstance(X, smat.csc_matrix) or isinstance(X, ScipyCscF32)\n\n def is_row_major(X):\n return isinstance(X, smat.csr_matrix) or isinstance(X, ScipyCsrF32)\n\n if is_col_major(X) and is_col_major(Y):\n pX = ScipyCscF32.init_from(X)\n pY = ScipyCscF32.init_from(Y)\n clib.c_sparse_matmul_csc_f32(\n pX, pY, pred_alloc.cfunc, eliminate_zeros, sorted_indices, threads\n )\n elif is_row_major(X) and is_row_major(Y):\n pX = ScipyCsrF32.init_from(X)\n pY = ScipyCsrF32.init_from(Y)\n clib.c_sparse_matmul_csr_f32(\n pX, pY, pred_alloc.cfunc, eliminate_zeros, sorted_indices, threads\n )\n elif is_col_major(X) and is_row_major(Y):\n if X.nnz > Y.nnz:\n Y = Y.tocsc()\n pX = ScipyCscF32.init_from(X)\n pY = ScipyCscF32.init_from(Y)\n clib.c_sparse_matmul_csc_f32(\n pX, pY, pred_alloc.cfunc, eliminate_zeros, sorted_indices, threads\n )\n else:\n X = X.tocsr()\n pX = ScipyCsrF32.init_from(X)\n pY = ScipyCsrF32.init_from(Y)\n clib.c_sparse_matmul_csr_f32(\n pX, pY, pred_alloc.cfunc, eliminate_zeros, sorted_indices, threads\n )\n elif is_row_major(X) and is_col_major(Y):\n if X.nnz > Y.nnz:\n Y = Y.tocsr()\n pX = ScipyCsrF32.init_from(X)\n pY = ScipyCsrF32.init_from(Y)\n clib.c_sparse_matmul_csr_f32(\n pX, pY, pred_alloc.cfunc, eliminate_zeros, sorted_indices, threads\n )\n else:\n X = X.tocsc()\n pX = ScipyCscF32.init_from(X)\n pY = ScipyCscF32.init_from(Y)\n clib.c_sparse_matmul_csc_f32(\n pX, pY, pred_alloc.cfunc, eliminate_zeros, sorted_indices, threads\n )\n else:\n raise ValueError(\n \"X and Y should be either csr_matrix/csc_matrix/ScipyCscF32/ScipyCsrF32 !\"\n )\n\n return pred_alloc.get()\n\n def sparse_inner_products(self, pX, pW, X_row_idx, W_col_idx, pred_values=None, threads=-1):\n \"\"\"\n Sparse-Sparse matrix batch inner product with multithreading (shared-memory).\n Do inner product for rows from `pX` indicated by `X_row_idx`, and columns from `pW` indicated by `W_col_idx`.\n Results will be written in `pred_values` if provided; Otherwise, create a new array for results.\n\n Args:\n pX (ScipyCsrF32, ScipyDrmF32): The first sparse matrix.\n pW (ScipyCscF32, ScipyDcmF32): The second sparse matrix.\n X_row_idx (ndarray): Row indexes for `pX`.\n W_col_idx (ndarray): Column indexes for `pW`.\n pred_values (ndarray, optional): The inner product result array.\n threads (int, optional): The number of threads. Default -1 to use all cores.\n\n Return:\n pred_values (ndarray): The matrix batch inner product results.\n If `pred_values` not given, return a new allocated ndarray, dtype same as `pW`.\n \"\"\"\n clib = self.clib_float32\n\n nnz = len(X_row_idx)\n assert nnz == len(W_col_idx)\n\n if not isinstance(pW, ScipyCscF32):\n raise NotImplementedError(\"type(pW) = {} no implemented\".format(type(pW)))\n\n if isinstance(pX, ScipyCsrF32):\n c_sparse_inner_products = clib.c_sparse_inner_products_csr_f32\n elif isinstance(pX, ScipyDrmF32):\n c_sparse_inner_products = clib.c_sparse_inner_products_drm_f32\n else:\n raise NotImplementedError(\"type(pX) = {} no implemented\".format(type(pX)))\n\n if pred_values is None or len(pred_values) != nnz or pred_values.dtype != np.float32:\n pred_values = np.zeros(nnz, pW.dtype)\n\n c_sparse_inner_products(\n byref(pX),\n byref(pW),\n nnz,\n X_row_idx.ctypes.data_as(POINTER(c_uint32)),\n W_col_idx.ctypes.data_as(POINTER(c_uint32)),\n pred_values.ctypes.data_as(POINTER(c_float)),\n threads,\n )\n return pred_values\n\n def link_clustering(self):\n \"\"\"\n Specify C-lib's clustering method argument and return type.\n \"\"\"\n arg_list = [\n POINTER(ScipyCsrF32),\n c_uint32,\n c_uint32,\n c_int,\n c_uint32,\n c_int,\n POINTER(c_uint32),\n ]\n corelib.fillprototype(\n self.clib_float32.c_run_clustering_csr_f32, None, [POINTER(ScipyCsrF32)] + arg_list[1:]\n )\n corelib.fillprototype(\n self.clib_float32.c_run_clustering_drm_f32, None, [POINTER(ScipyDrmF32)] + arg_list[1:]\n )\n\n def run_clustering(self, py_feat_mat, depth, algo, seed, codes=None, max_iter=10, threads=-1):\n \"\"\"\n Run clustering with given label embedding matrix and parameters in C++.\n\n Args:\n py_feat_mat (ScipyCsrF32, ScipyDrmF32): label embedding matrix. (num_labels x num_features).\n depth (int): Depth of K-means clustering N-nary tree.\n algo (str): The algorithm for clustering, either `KMEANS` or `SKMEANS`.\n seed (int): Randoms seed.\n codes (ndarray, optional): Label clustering results.\n max_iter (int, optional): Maximum number of iter for reordering each node based on score.\n threads (int, optional): The number of threads. Default -1 to use all cores.\n\n Return:\n codes (ndarray): The clustering result.\n If `codes` not given, return a new allocated ndarray, dtype `np.uint32`.\n \"\"\"\n clib = self.clib_float32\n if isinstance(py_feat_mat, ScipyCsrF32):\n run_clustering = clib.c_run_clustering_csr_f32\n elif isinstance(py_feat_mat, ScipyDrmF32):\n run_clustering = clib.c_run_clustering_drm_f32\n else:\n raise NotImplementedError(\n \"type(py_feat_mat) = {} no implemented\".format(type(py_feat_mat))\n )\n\n if codes is None or len(codes) != py_feat_mat.shape[0] or codes.dtype != np.uint32:\n codes = np.zeros(py_feat_mat.rows, dtype=np.uint32)\n run_clustering(\n byref(py_feat_mat),\n depth,\n algo,\n seed,\n max_iter,\n threads,\n codes.ctypes.data_as(POINTER(c_uint32)),\n )\n return codes\n\n def link_tfidf_vectorizer(self):\n \"\"\"\n Specify C-lib's Tfidf vectorizer method argument and return type.\n \"\"\"\n res_list = c_void_p\n arg_list = [c_char_p]\n corelib.fillprototype(self.clib_float32.c_tfidf_load, res_list, arg_list)\n\n arg_list = [c_void_p, c_char_p]\n corelib.fillprototype(self.clib_float32.c_tfidf_save, None, arg_list)\n\n arg_list = [c_void_p]\n corelib.fillprototype(self.clib_float32.c_tfidf_destruct, None, arg_list)\n\n arg_list = [\n c_int, # threads\n ScipyCompressedSparseAllocator.CFUNCTYPE, # pred_alloc for result\n ]\n\n # model, fname, fname_len, buffer_size\n corelib.fillprototype(\n self.clib_float32.c_tfidf_predict_from_file,\n None,\n [c_void_p, c_void_p, c_uint64, c_uint64] + arg_list,\n )\n\n # model, corpus, doc_lens, nr_docs\n corelib.fillprototype(\n self.clib_float32.c_tfidf_predict,\n None,\n [c_void_p, c_void_p, POINTER(c_uint64), c_uint64] + arg_list,\n )\n\n res_list = c_void_p\n\n # file-list, fname_lens, nr_files, param, buffer_size, threads\n corelib.fillprototype(\n self.clib_float32.c_tfidf_train_from_file,\n res_list,\n [c_void_p, POINTER(c_uint64), c_uint64, POINTER(TfidfVectorizerParam), c_uint64, c_int],\n )\n # corpus, doc_lens, nr_docs, params, threads\n corelib.fillprototype(\n self.clib_float32.c_tfidf_train,\n res_list,\n [c_void_p, POINTER(c_uint64), c_uint64, POINTER(TfidfVectorizerParam), c_int],\n )\n\n def tfidf_destruct(self, model):\n \"\"\"\n Destruct Tfdif model.\n\n Args:\n model (ptr): Pointer to C Tfdif model.\n \"\"\"\n if type(model) == c_void_p:\n self.clib_float32.c_tfidf_destruct(model)\n\n def tfidf_save(self, model, save_dir):\n \"\"\"\n Save trained tfidf vectorizer to disk.\n\n Args:\n save_dir (str): Folder to save the model.\n \"\"\"\n self.clib_float32.c_tfidf_save(model, c_char_p(save_dir.encode(\"utf-8\")))\n\n def tfidf_load(self, load_dir):\n \"\"\"\n Load a CppTfidf vectorizer from disk.\n\n Args:\n load_dir (str): Folder inside which the model is loaded.\n\n Returns:\n pointer to C instance tfidf::Vectorizer\n \"\"\"\n return self.clib_float32.c_tfidf_load(c_char_p(load_dir.encode(\"utf-8\")))\n\n def tfidf_train(self, trn_corpus, config=None):\n \"\"\"\n Train on a corpus.\n\n Args:\n trn_corpus (list of str or str): Training corpus in the form of a list of strings or path to corpus file/folder.\n config (dict): Dict with keyword arguments to pass to C++ class tfidf::Vectorizer. None to use default in TfidfVectorizerParam.\n For TfidfVectorizerParam, the config should contain\n base_vect_configs (List(Dict)): list of config (list[TfidfBaseVectorizerParam]) to be used for TfidfBaseVectorizerParam.\n norm_p (int): after ensembling feature sub matrices, do row-wise normalization with norm_p.\n buffer_size (int): if train from file, number of bytes allocated for file I/O. Set to 0 to use default value.\n threads (int): number of threads to use, set to negative to use all\n For TfidfBaseVectorizerParam, the config should contain\n ngram_range (tuple of int): (min_ngram, max_ngram)\n truncate_length (int): sequence truncation length, set to negative to disable\n max_feature (int): maximum number of features allowed, set to 0 to disable\n min_df_ratio (float, [0, max_df_ratio)): min ratio for document frequency truncation\n max_df_ratio (float, (min_df_ratio, 1]): max ratio for document frequency truncation\n min_df_cnt (int, [0, max_df_cnt)): min count for document frequency truncation\n max_df_cnt (float, (min_df_cnt, Inf)): max count for document frequency truncation. Default -1 to disable.\n binary (bool): whether to binarize term frequency, default False\n use_idf (bool): whether to use inverse document frequency, default True\n smooth_idf (bool): whether to smooth IDF by adding 1 to all DF counts, default True\n sublinear_tf (bool): whether to use sublinear mapping (log) on term frequency, default False\n keep_frequent_feature (bool): if max_feature > 0, will only keep max_feature features by\n ignoring features with low document frequency (if True, default),\n ignoring features with high document frequency (if False)\n norm (str, 'l1' or 'l2'): feature vector will have unit l1 or l2 norm\n analyzer (str, 'word', 'char' or 'char_wb'): Whether to use word or character n-grams.\n Option ‘char_wb’ creates character n-grams only from text inside word boundaries,\n n-grams at the edges of words are padded with space.\n buffer_size (int): if train from file, number of bytes allocated for file I/O. Set to 0 to use default value.\n threads (int): number of threads to use, set to negative to use all\n\n Returns:\n pointer to C instance tfidf::Vectorizer\n \"\"\"\n\n # Check whether \"base_vect_configs\" is in config.keys()\n # If not, this config is for TfidfBaseVectorizerParam.\n # Otherwise, this config is for TfidfVectorizerParam.\n if \"base_vect_configs\" not in config:\n base_vect_param_list = [TfidfBaseVectorizerParam(config)]\n norm_p = base_vect_param_list[0].norm_p\n else:\n base_vect_param_list = [\n TfidfBaseVectorizerParam(base_vect_config)\n for base_vect_config in config[\"base_vect_configs\"]\n ]\n norm_p = config[\"norm_p\"]\n params = TfidfVectorizerParam(base_vect_param_list, norm_p)\n\n if isinstance(trn_corpus, str):\n if os.path.isfile(trn_corpus): # train from a single corpus file\n corpus_files = [trn_corpus]\n elif os.path.isdir(trn_corpus): # train from a folder of corpus files\n corpus_files = [\n os.path.join(trn_corpus, f)\n for f in sorted(os.listdir(trn_corpus))\n if os.path.isfile(os.path.join(trn_corpus, f))\n ]\n else:\n raise Exception(\"Failed to load training corpus from {}\".format(trn_corpus))\n nr_files = len(corpus_files)\n c_corpusf_arr = (c_char_p * nr_files)()\n c_corpusf_arr[:] = [line.encode(\"utf-8\") for line in corpus_files]\n fname_lens = np.array([len(line) for line in c_corpusf_arr], dtype=np.uint64)\n\n model = self.clib_float32.c_tfidf_train_from_file(\n c_corpusf_arr,\n fname_lens.ctypes.data_as(POINTER(c_uint64)),\n nr_files,\n params,\n config[\"buffer_size\"],\n config[\"threads\"],\n )\n else:\n nr_doc = len(trn_corpus)\n c_corpus_arr = (c_char_p * nr_doc)()\n c_corpus_arr[:] = [line.encode(\"utf-8\") for line in trn_corpus]\n doc_lens = np.array([len(line) for line in c_corpus_arr], dtype=np.uint64)\n\n model = self.clib_float32.c_tfidf_train(\n c_corpus_arr,\n doc_lens.ctypes.data_as(POINTER(c_uint64)),\n nr_doc,\n params,\n config[\"threads\"],\n )\n\n return model\n\n def tfidf_predict(self, model, corpus, buffer_size=0, threads=-1):\n \"\"\"\n Vectorize a corpus.\n\n Args:\n model (ctypes.c_void_p): pointer to tfidf::Vectorizer model\n corpus (list): List of strings to vectorize.\n buffer_size (int, default 0): number of bytes used for file I/O while train from file, set to 0 to use default value\n threads (int, default -1): number of threads to use for predict, set to negative to use all\n\n Returns:\n scipy.sparse.csr.csr_matrix: Matrix of features.\n \"\"\"\n pred_alloc = ScipyCompressedSparseAllocator()\n if isinstance(corpus, str):\n # train from file\n assert os.path.isfile(corpus), \"Cannot predict from {}!\".format(corpus)\n corpus_utf8 = corpus.encode(\"utf-8\")\n\n self.clib_float32.c_tfidf_predict_from_file(\n model,\n c_char_p(corpus_utf8),\n len(corpus_utf8),\n buffer_size,\n threads,\n pred_alloc.cfunc,\n )\n\n else:\n # in memory predict\n nr_doc = len(corpus)\n c_corpus_arr = (c_char_p * nr_doc)()\n c_corpus_arr[:] = [line.encode(\"utf-8\") for line in corpus]\n doc_lens = np.array([len(line) for line in c_corpus_arr], dtype=np.uint64)\n\n self.clib_float32.c_tfidf_predict(\n model,\n c_corpus_arr,\n doc_lens.ctypes.data_as(POINTER(c_uint64)),\n nr_doc,\n threads,\n pred_alloc.cfunc,\n )\n return pred_alloc.get()\n\n\nclib = corelib(os.path.join(os.path.dirname(os.path.abspath(pecos.__file__)), \"core\"), \"libpecos\")\n" ]
[ [ "scipy.sparse.csc_matrix", "scipy.sparse.coo_matrix", "scipy.sparse.csr_matrix", "numpy.ones", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
breandan/tensorflow
[ "7509bad95200e1baed4eb488dbeaaa2c505a2824", "7509bad95200e1baed4eb488dbeaaa2c505a2824", "7509bad95200e1baed4eb488dbeaaa2c505a2824", "7509bad95200e1baed4eb488dbeaaa2c505a2824", "7509bad95200e1baed4eb488dbeaaa2c505a2824", "7509bad95200e1baed4eb488dbeaaa2c505a2824", "7509bad95200e1baed4eb488dbeaaa2c505a2824" ]
[ "tensorflow/contrib/learn/python/learn/dataframe/transforms/batch.py", "tensorflow/python/lib/io/tf_record.py", "tensorflow/contrib/distributions/python/ops/chi2.py", "tensorflow/python/framework/test_util_test.py", "tensorflow/contrib/framework/python/ops/variables_test.py", "tensorflow/contrib/learn/python/learn/tests/multioutput_test.py", "tensorflow/python/training/training_ops_test.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Batches `Series` objects. For internal use, not part of the public API.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.learn.python.learn.dataframe import transform\nfrom tensorflow.python.training import input as input_ops\n\n\nclass AbstractBatchTransform(transform.Transform):\n \"\"\"Abstract parent class for batching Transforms.\"\"\"\n\n def __init__(self,\n batch_size,\n output_names,\n num_threads=1,\n queue_capacity=None):\n super(AbstractBatchTransform, self).__init__()\n self._batch_size = batch_size\n self._output_name_list = output_names\n self._num_threads = num_threads\n self._queue_capacity = (self.batch_size * 10 if queue_capacity is None\n else queue_capacity)\n\n @transform.parameter\n def batch_size(self):\n return self._batch_size\n\n @transform.parameter\n def num_threads(self):\n return self._num_threads\n\n @transform.parameter\n def queue_capacity(self):\n return self._queue_capacity\n\n @property\n def input_valency(self):\n return len(self.output_names)\n\n @property\n def _output_names(self):\n return self._output_name_list\n\n\nclass Batch(AbstractBatchTransform):\n \"\"\"Batches Columns to specified size.\n\n Note that dimension 0 is assumed to correspond to \"example number\" so\n `Batch` does not prepend an additional dimension to incoming `Series`.\n For example, if a `Tensor` in `transform_input` has shape [x, y], the\n corresponding output will have shape [batch_size, y].\n \"\"\"\n\n @property\n def name(self):\n return \"Batch\"\n\n def _apply_transform(self, transform_input):\n batched = input_ops.batch(transform_input,\n batch_size=self.batch_size,\n num_threads=self.num_threads,\n capacity=self.queue_capacity,\n enqueue_many=True)\n # TODO(jamieas): batch will soon return a list regardless of the number of\n # enqueued tensors. Remove the following once that change is in place.\n if not isinstance(batched, (tuple, list)):\n batched = (batched,)\n # pylint: disable=not-callable\n return self.return_type(*batched)\n\n\nclass ShuffleBatch(AbstractBatchTransform):\n \"\"\"Creates shuffled batches from `Series` containing a single row.\n\n Note that dimension 0 is assumed to correspond to \"example number\" so\n `ShuffleBatch` does not prepend an additional dimension to incoming `Series`.\n For example, if a `Tensor` in `transform_input` has shape [x, y], the\n corresponding output will have shape [batch_size, y].\n \"\"\"\n\n @property\n def name(self):\n return \"ShuffleBatch\"\n\n def __init__(self,\n batch_size,\n output_names,\n num_threads=1,\n queue_capacity=None,\n min_after_dequeue=None,\n seed=None):\n super(ShuffleBatch, self).__init__(batch_size, output_names, num_threads,\n queue_capacity)\n self._min_after_dequeue = (self.queue_capacity / 4 if\n min_after_dequeue is None else min_after_dequeue)\n self._seed = seed\n\n @transform.parameter\n def min_after_dequeue(self):\n return self._min_after_dequeue\n\n @transform.parameter\n def seed(self):\n return self._seed\n\n def _apply_transform(self, transform_input):\n batched = input_ops.shuffle_batch(transform_input,\n batch_size=self.batch_size,\n capacity=self.queue_capacity,\n min_after_dequeue=self.min_after_dequeue,\n num_threads=self.num_threads,\n seed=self.seed,\n enqueue_many=True)\n # TODO(jamieas): batch will soon return a list regardless of the number of\n # enqueued tensors. Remove the following once that change is in place.\n if not isinstance(batched, (tuple, list)):\n batched = (batched,)\n # pylint: disable=not-callable\n return self.return_type(*batched)\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"For reading and writing TFRecords files.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.util import compat\n\n\ndef tf_record_iterator(path):\n \"\"\"An iterator that read the records from a TFRecords file.\n\n Args:\n path: The path to the TFRecords file.\n\n Yields:\n Strings.\n\n Raises:\n IOError: If `path` cannot be opened for reading.\n \"\"\"\n reader = pywrap_tensorflow.PyRecordReader_New(compat.as_bytes(path), 0)\n if reader is None:\n raise IOError(\"Could not open %s.\" % path)\n while reader.GetNext():\n yield reader.record()\n reader.Close()\n\n\nclass TFRecordWriter(object):\n \"\"\"A class to write records to a TFRecords file.\n\n This class implements `__enter__` and `__exit__`, and can be used\n in `with` blocks like a normal file.\n\n @@__init__\n @@write\n @@close\n \"\"\"\n # TODO(josh11b): Support appending?\n def __init__(self, path):\n \"\"\"Opens file `path` and creates a `TFRecordWriter` writing to it.\n\n Args:\n path: The path to the TFRecords file.\n\n Raises:\n IOError: If `path` cannot be opened for writing.\n \"\"\"\n self._writer = pywrap_tensorflow.PyRecordWriter_New(compat.as_bytes(path))\n if self._writer is None:\n raise IOError(\"Could not write to %s.\" % path)\n\n def __enter__(self):\n \"\"\"Enter a `with` block.\"\"\"\n return self\n\n def __exit__(self, unused_type, unused_value, unused_traceback):\n \"\"\"Exit a `with` block, closing the file.\"\"\"\n self.close()\n\n def write(self, record):\n \"\"\"Write a string record to the file.\n\n Args:\n record: str\n \"\"\"\n self._writer.WriteRecord(record)\n\n def close(self):\n \"\"\"Close the file.\"\"\"\n self._writer.Close()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The Chi2 distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.distributions.python.ops import gamma\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import math_ops\n\n\nclass Chi2(gamma.Gamma):\n \"\"\"The Chi2 distribution with degrees of freedom df.\n\n The PDF of this distribution is:\n\n ```pdf(x) = (x^(df/2 - 1)e^(-x/2))/(2^(k/2)Gamma(k/2)), x > 0```\n\n Note that the Chi2 distribution is a special case of the Gamma distribution,\n with Chi2(df) = Gamma(df/2, 1/2).\n \"\"\"\n\n def __init__(self, df, name=\"Chi2\"):\n with ops.op_scope([df], name, \"init\"):\n df = ops.convert_to_tensor(df)\n self._df = df\n super(Chi2, self).__init__(alpha=df / 2,\n beta=math_ops.cast(0.5, dtype=df.dtype))\n\n @property\n def df(self):\n return self._df\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for tensorflow.ops.test_util.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport threading\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\n\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.ops import constant_op\nfrom tensorflow.python.ops import logging_ops\n\n\nclass TestUtilTest(test_util.TensorFlowTestCase):\n\n def test_assert_ops_in_graph(self):\n with self.test_session():\n constant_op.constant([\"hello\", \"taffy\"], name=\"hello\")\n test_util.assert_ops_in_graph({\"hello\": \"Const\"}, ops.get_default_graph())\n\n self.assertRaises(\n ValueError, test_util.assert_ops_in_graph, {\"bye\": \"Const\"},\n ops.get_default_graph())\n\n self.assertRaises(\n ValueError, test_util.assert_ops_in_graph, {\"hello\": \"Variable\"},\n ops.get_default_graph())\n\n def test_assert_equal_graph_def(self):\n with tf.Graph().as_default() as g:\n def_empty = g.as_graph_def()\n tf.constant(5, name=\"five\")\n tf.constant(7, name=\"seven\")\n def_57 = g.as_graph_def()\n with tf.Graph().as_default() as g:\n tf.constant(7, name=\"seven\")\n tf.constant(5, name=\"five\")\n def_75 = g.as_graph_def()\n # Comparing strings is order dependent\n self.assertNotEqual(str(def_57), str(def_75))\n # assert_equal_graph_def doesn't care about order\n tf.test.assert_equal_graph_def(def_57, def_75)\n # Compare two unequal graphs\n with self.assertRaisesRegexp(AssertionError,\n r\"^Found unexpected node 'seven\"):\n tf.test.assert_equal_graph_def(def_57, def_empty)\n\n def testIsGoogleCudaEnabled(self):\n # The test doesn't assert anything. It ensures the py wrapper\n # function is generated correctly.\n if test_util.IsGoogleCudaEnabled():\n print(\"GoogleCuda is enabled\")\n else:\n print(\"GoogleCuda is disabled\")\n\n def testAssertProtoEqualsStr(self):\n\n graph_str = \"node { name: 'w1' op: 'params' }\"\n graph_def = graph_pb2.GraphDef()\n text_format.Merge(graph_str, graph_def)\n\n # test string based comparison\n self.assertProtoEquals(graph_str, graph_def)\n\n # test original comparison\n self.assertProtoEquals(graph_def, graph_def)\n\n def testNDArrayNear(self):\n a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])\n self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))\n self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))\n\n def testCheckedThreadSucceeds(self):\n def noop(ev):\n ev.set()\n\n event_arg = threading.Event()\n\n self.assertFalse(event_arg.is_set())\n t = self.checkedThread(target=noop, args=(event_arg,))\n t.start()\n t.join()\n self.assertTrue(event_arg.is_set())\n\n def testCheckedThreadFails(self):\n def err_func():\n return 1 // 0\n\n t = self.checkedThread(target=err_func)\n t.start()\n with self.assertRaises(self.failureException) as fe:\n t.join()\n self.assertTrue(\"integer division or modulo by zero\" in str(fe.exception))\n\n def testCheckedThreadWithWrongAssertionFails(self):\n x = 37\n\n def err_func():\n self.assertTrue(x < 10)\n\n t = self.checkedThread(target=err_func)\n t.start()\n with self.assertRaises(self.failureException) as fe:\n t.join()\n self.assertTrue(\"False is not true\" in str(fe.exception))\n\n def testMultipleThreadsWithOneFailure(self):\n def err_func(i):\n self.assertTrue(i != 7)\n\n threads = [self.checkedThread(target=err_func, args=(i,))\n for i in range(10)]\n for t in threads:\n t.start()\n for i, t in enumerate(threads):\n if i == 7:\n with self.assertRaises(self.failureException):\n t.join()\n else:\n t.join()\n\n def _WeMustGoDeeper(self, msg):\n with self.assertRaisesOpError(msg):\n node_def = ops._NodeDef(\"op_type\", \"name\")\n node_def_orig = ops._NodeDef(\"op_type_orig\", \"orig\")\n op_orig = ops.Operation(node_def_orig, ops.get_default_graph())\n op = ops.Operation(node_def, ops.get_default_graph(), original_op=op_orig)\n raise errors.UnauthenticatedError(node_def, op, \"true_err\")\n\n def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):\n with self.assertRaises(AssertionError):\n self._WeMustGoDeeper(\"this_is_not_the_error_you_are_looking_for\")\n\n self._WeMustGoDeeper(\"true_err\")\n self._WeMustGoDeeper(\"name\")\n self._WeMustGoDeeper(\"orig\")\n\n def testAllCloseScalars(self):\n self.assertAllClose(7, 7 + 1e-8)\n with self.assertRaisesRegexp(AssertionError, r\"Not equal to tolerance\"):\n self.assertAllClose(7, 8)\n\n def testArrayNear(self):\n a = [1, 2]\n b = [1, 2, 5]\n with self.assertRaises(AssertionError):\n self.assertArrayNear(a, b, 0.001)\n a = [1, 2]\n b = [[1, 2], [3, 4]]\n with self.assertRaises(TypeError):\n self.assertArrayNear(a, b, 0.001)\n a = [1, 2]\n b = [1, 2]\n self.assertArrayNear(a, b, 0.001)\n\n def testForceGPU(self):\n with self.assertRaisesRegexp(errors.InvalidArgumentError,\n \"Cannot assign a device to node\"):\n with self.test_session(force_gpu=True):\n # this relies on us not having a GPU implementation for assert, which\n # seems sensible\n x = [True]\n y = [15]\n logging_ops.Assert(x, y).run()\n\nif __name__ == \"__main__\":\n googletest.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"variables tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass LocalVariableTest(tf.test.TestCase):\n\n def test_local_variable(self):\n with self.test_session() as sess:\n self.assertEquals([], tf.local_variables())\n value0 = 42\n tf.contrib.framework.local_variable(value0)\n value1 = 43\n tf.contrib.framework.local_variable(value1)\n variables = tf.local_variables()\n self.assertEquals(2, len(variables))\n self.assertRaises(tf.OpError, sess.run, variables)\n tf.initialize_variables(variables).run()\n self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))\n\n def testLocalVariableNameAndShape(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.local_variable([1, 1, 1, 1, 1], name='a')\n self.assertEquals(a.op.name, 'A/a')\n self.assertListEqual(a.get_shape().as_list(), [5])\n self.assertListEqual([a], tf.contrib.framework.get_local_variables())\n\n def testLocalVariableNotInAllVariables(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.local_variable(0)\n self.assertFalse(a in tf.all_variables())\n self.assertTrue(a in tf.local_variables())\n\n def testLocalVariableNotInVariablesToRestore(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.local_variable(0)\n self.assertFalse(a in tf.contrib.framework.get_variables_to_restore())\n self.assertTrue(a in tf.local_variables())\n\n def testGetVariablesDontReturnsTransients(self):\n with self.test_session():\n with tf.variable_scope('A'):\n tf.contrib.framework.local_variable(0)\n with tf.variable_scope('B'):\n tf.contrib.framework.local_variable(0)\n self.assertEquals([], tf.contrib.framework.get_variables('A'))\n self.assertEquals([], tf.contrib.framework.get_variables('B'))\n\n def testGetLocalVariablesReturnsTransients(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.local_variable(0)\n with tf.variable_scope('B'):\n b = tf.contrib.framework.local_variable(0)\n self.assertEquals([a], tf.contrib.framework.get_local_variables('A'))\n self.assertEquals([b], tf.contrib.framework.get_local_variables('B'))\n\n def testInitializedVariableValue(self):\n with self.test_session() as sess:\n a = tf.contrib.framework.local_variable([0, 0, 0, 0, 0], name='a')\n sess.run(tf.initialize_local_variables())\n self.assertAllEqual(a.eval(), [0]*5)\n\n\nclass GlobalStepTest(tf.test.TestCase):\n\n def _assert_global_step(self, global_step, expected_dtype=tf.int64):\n self.assertEquals('%s:0' % tf.GraphKeys.GLOBAL_STEP, global_step.name)\n self.assertEquals(expected_dtype, global_step.dtype.base_dtype)\n self.assertEquals([], global_step.get_shape().as_list())\n\n def test_invalid_dtype(self):\n with tf.Graph().as_default() as g:\n self.assertEquals(None, tf.contrib.framework.get_global_step())\n tf.Variable(\n 0.0, trainable=False, dtype=tf.float32, name=tf.GraphKeys.GLOBAL_STEP)\n self.assertRaisesRegexp(\n TypeError, 'does not have integer type',\n tf.contrib.framework.get_global_step)\n self.assertRaisesRegexp(\n TypeError, 'does not have integer type',\n tf.contrib.framework.get_global_step, g)\n\n def test_invalid_shape(self):\n with tf.Graph().as_default() as g:\n self.assertEquals(None, tf.contrib.framework.get_global_step())\n tf.Variable(\n [0], trainable=False, dtype=tf.int32, name=tf.GraphKeys.GLOBAL_STEP)\n self.assertRaisesRegexp(\n TypeError, 'not scalar',\n tf.contrib.framework.get_global_step)\n self.assertRaisesRegexp(\n TypeError, 'not scalar',\n tf.contrib.framework.get_global_step, g)\n\n def test_create_global_step(self):\n self.assertEquals(None, tf.contrib.framework.get_global_step())\n with tf.Graph().as_default() as g:\n global_step = tf.contrib.framework.create_global_step()\n self._assert_global_step(global_step)\n self.assertRaisesRegexp(\n ValueError, 'already exists', tf.contrib.framework.create_global_step)\n self.assertRaisesRegexp(\n ValueError, 'already exists', tf.contrib.framework.create_global_step,\n g)\n self._assert_global_step(\n tf.contrib.framework.create_global_step(tf.Graph()))\n\n def test_get_global_step(self):\n with tf.Graph().as_default() as g:\n self.assertEquals(None, tf.contrib.framework.get_global_step())\n tf.Variable(\n 0, trainable=False, dtype=tf.int32, name=tf.GraphKeys.GLOBAL_STEP)\n self._assert_global_step(\n tf.contrib.framework.get_global_step(), expected_dtype=tf.int32)\n self._assert_global_step(\n tf.contrib.framework.get_global_step(g), expected_dtype=tf.int32)\n\n def test_get_or_create_global_step(self):\n with tf.Graph().as_default() as g:\n self.assertEquals(None, tf.contrib.framework.get_global_step())\n self._assert_global_step(\n tf.contrib.framework.get_or_create_global_step())\n self._assert_global_step(\n tf.contrib.framework.get_or_create_global_step(g))\n\n\nclass VariablesTest(tf.test.TestCase):\n\n def testCreateVariable(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n self.assertEquals(a.op.name, 'A/a')\n self.assertListEqual(a.get_shape().as_list(), [5])\n\n def testGetVariables(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n with tf.variable_scope('B'):\n b = tf.contrib.framework.variable('a', [5])\n self.assertEquals([a, b], tf.contrib.framework.get_variables())\n self.assertEquals([a], tf.contrib.framework.get_variables('A'))\n self.assertEquals([b], tf.contrib.framework.get_variables('B'))\n\n def testGetVariablesSuffix(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n with tf.variable_scope('A'):\n b = tf.contrib.framework.variable('b', [5])\n self.assertEquals([a], tf.contrib.framework.get_variables(suffix='a'))\n self.assertEquals([b], tf.contrib.framework.get_variables(suffix='b'))\n\n def testGetVariableWithSingleVar(self):\n with self.test_session():\n with tf.variable_scope('parent'):\n a = tf.contrib.framework.variable('child', [5])\n self.assertEquals(\n a, tf.contrib.framework.get_unique_variable('parent/child'))\n\n def testGetVariableWithDistractors(self):\n with self.test_session():\n with tf.variable_scope('parent'):\n a = tf.contrib.framework.variable('child', [5])\n with tf.variable_scope('child'):\n tf.contrib.framework.variable('grandchild1', [7])\n tf.contrib.framework.variable('grandchild2', [9])\n self.assertEquals(\n a, tf.contrib.framework.get_unique_variable('parent/child'))\n\n def testGetVariableThrowsExceptionWithNoMatch(self):\n var_name = 'cant_find_me'\n with self.test_session():\n with self.assertRaises(ValueError):\n tf.contrib.framework.get_unique_variable(var_name)\n\n def testGetThrowsExceptionWithChildrenButNoMatch(self):\n var_name = 'parent/child'\n with self.test_session():\n with tf.variable_scope(var_name):\n tf.contrib.framework.variable('grandchild1', [7])\n tf.contrib.framework.variable('grandchild2', [9])\n with self.assertRaises(ValueError):\n tf.contrib.framework.get_unique_variable(var_name)\n\n def testGetVariablesToRestore(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n with tf.variable_scope('B'):\n b = tf.contrib.framework.variable('a', [5])\n self.assertEquals([a, b],\n tf.contrib.framework.get_variables_to_restore())\n\n def testIncludeGetVariablesToRestore(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n with tf.variable_scope('B'):\n b = tf.contrib.framework.variable('a', [5])\n self.assertEquals([a, b], tf.contrib.framework.get_variables())\n self.assertEquals([a],\n tf.contrib.framework.get_variables_to_restore(['A']))\n\n def testExcludeGetVariablesToRestore(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n with tf.variable_scope('B'):\n b = tf.contrib.framework.variable('a', [5])\n self.assertEquals([a, b], tf.contrib.framework.get_variables())\n self.assertEquals([a],\n tf.contrib.framework.get_variables_to_restore(\n exclude=['B']))\n\n def testWrongIncludeGetVariablesToRestore(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n with tf.variable_scope('B'):\n b = tf.contrib.framework.variable('a', [5])\n self.assertEquals([a, b], tf.contrib.framework.get_variables())\n self.assertEquals([],\n tf.contrib.framework.get_variables_to_restore(['a']))\n\n def testGetMixedVariablesToRestore(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n b = tf.contrib.framework.variable('b', [5])\n with tf.variable_scope('B'):\n c = tf.contrib.framework.variable('c', [5])\n d = tf.contrib.framework.variable('d', [5])\n self.assertEquals([a, b, c, d], tf.contrib.framework.get_variables())\n self.assertEquals([a, c],\n tf.contrib.framework.get_variables_to_restore(\n include=['A/a', 'B/c']))\n\n def testExcludeGetMixedVariablesToRestore(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n b = tf.contrib.framework.variable('b', [5])\n with tf.variable_scope('B'):\n c = tf.contrib.framework.variable('c', [5])\n d = tf.contrib.framework.variable('d', [5])\n self.assertEquals([a, b, c, d], tf.contrib.framework.get_variables())\n self.assertEquals([b, d],\n tf.contrib.framework.get_variables_to_restore(\n exclude=['A/a', 'B/c']))\n\n def testReuseVariable(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [])\n with tf.variable_scope('A', reuse=True):\n b = tf.contrib.framework.variable('a', [])\n self.assertEquals(a, b)\n self.assertListEqual([a], tf.contrib.framework.get_variables())\n\n def testVariableWithRegularizer(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [], regularizer=tf.nn.l2_loss)\n loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]\n self.assertDeviceEqual(loss.device, a.device)\n\n def testVariableWithRegularizerColocate(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [], device='gpu:0',\n regularizer=tf.nn.l2_loss)\n loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]\n self.assertDeviceEqual(loss.device, a.device)\n\n def testVariableWithDevice(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [], device='cpu:0')\n b = tf.contrib.framework.variable('b', [], device='cpu:1')\n self.assertDeviceEqual(a.device, 'cpu:0')\n self.assertDeviceEqual(b.device, 'cpu:1')\n\n def testVariableWithDeviceFromScope(self):\n with self.test_session():\n with tf.device('/cpu:0'):\n a = tf.contrib.framework.variable('a', [])\n b = tf.contrib.framework.variable('b', [], device='cpu:1')\n self.assertDeviceEqual(a.device, 'cpu:0')\n self.assertDeviceEqual(b.device, 'cpu:1')\n\n def testVariableWithDeviceFunction(self):\n class DevFn(object):\n\n def __init__(self):\n self.counter = -1\n\n def __call__(self, op):\n self.counter += 1\n return 'cpu:%d' % self.counter\n\n with self.test_session():\n with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],\n device=DevFn()):\n a = tf.contrib.framework.variable('a', [])\n b = tf.contrib.framework.variable('b', [])\n c = tf.contrib.framework.variable('c', [], device='cpu:12')\n d = tf.contrib.framework.variable('d', [])\n with tf.device('cpu:99'):\n e_init = tf.constant(12)\n e = tf.contrib.framework.variable('e', initializer=e_init)\n self.assertDeviceEqual(a.device, 'cpu:0')\n self.assertDeviceEqual(a.initial_value.device, 'cpu:0')\n self.assertDeviceEqual(b.device, 'cpu:1')\n self.assertDeviceEqual(b.initial_value.device, 'cpu:1')\n self.assertDeviceEqual(c.device, 'cpu:12')\n self.assertDeviceEqual(c.initial_value.device, 'cpu:12')\n self.assertDeviceEqual(d.device, 'cpu:2')\n self.assertDeviceEqual(d.initial_value.device, 'cpu:2')\n self.assertDeviceEqual(e.device, 'cpu:3')\n self.assertDeviceEqual(e.initial_value.device, 'cpu:99')\n\n def testVariableWithReplicaDeviceSetter(self):\n with self.test_session():\n with tf.device(tf.train.replica_device_setter(ps_tasks=2)):\n a = tf.contrib.framework.variable('a', [])\n b = tf.contrib.framework.variable('b', [])\n c = tf.contrib.framework.variable('c', [], device='cpu:12')\n d = tf.contrib.framework.variable('d', [])\n with tf.device('cpu:99'):\n e_init = tf.constant(12)\n e = tf.contrib.framework.variable('e', initializer=e_init)\n # The values below highlight how the replica_device_setter puts initial\n # values on the worker job, and how it merges explicit devices.\n self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')\n self.assertDeviceEqual(a.initial_value.device, a.device)\n self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')\n self.assertDeviceEqual(b.initial_value.device, b.device)\n self.assertDeviceEqual(c.device, '/job:ps/task:0/cpu:12')\n self.assertDeviceEqual(c.initial_value.device, c.device)\n self.assertDeviceEqual(d.device, '/job:ps/task:1/cpu:0')\n self.assertDeviceEqual(d.initial_value.device, d.device)\n self.assertDeviceEqual(e.device, '/job:ps/task:0/cpu:0')\n self.assertDeviceEqual(e.initial_value.device, '/job:worker/cpu:99')\n\n def testVariableWithVariableDeviceChooser(self):\n\n with tf.Graph().as_default():\n device_fn = tf.contrib.framework.VariableDeviceChooser(num_tasks=2)\n with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],\n device=device_fn):\n a = tf.contrib.framework.variable('a', [])\n b = tf.contrib.framework.variable('b', [])\n c = tf.contrib.framework.variable('c', [], device='cpu:12')\n d = tf.contrib.framework.variable('d', [])\n with tf.device('cpu:99'):\n e_init = tf.constant(12)\n e = tf.contrib.framework.variable('e', initializer=e_init)\n # The values below highlight how the VariableDeviceChooser puts initial\n # values on the same device as the variable job.\n self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')\n self.assertDeviceEqual(a.initial_value.device, a.device)\n self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')\n self.assertDeviceEqual(b.initial_value.device, b.device)\n self.assertDeviceEqual(c.device, '/cpu:12')\n self.assertDeviceEqual(c.initial_value.device, c.device)\n self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0')\n self.assertDeviceEqual(d.initial_value.device, d.device)\n self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0')\n self.assertDeviceEqual(e.initial_value.device, '/cpu:99')\n\n def testVariableGPUPlacement(self):\n\n with tf.Graph().as_default():\n device_fn = tf.contrib.framework.VariableDeviceChooser(device_type='GPU')\n with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],\n device=device_fn):\n a = tf.contrib.framework.variable('a', [])\n b = tf.contrib.framework.variable('b', [])\n c = tf.contrib.framework.variable('c', [], device='cpu:12')\n d = tf.contrib.framework.variable('d', [])\n with tf.device('cpu:99'):\n e_init = tf.constant(12)\n e = tf.contrib.framework.variable('e', initializer=e_init)\n # The values below highlight how the VariableDeviceChooser puts initial\n # values on the same device as the variable job.\n self.assertDeviceEqual(a.device, '/gpu:0')\n self.assertDeviceEqual(a.initial_value.device, a.device)\n self.assertDeviceEqual(b.device, '/gpu:0')\n self.assertDeviceEqual(b.initial_value.device, b.device)\n self.assertDeviceEqual(c.device, '/cpu:12')\n self.assertDeviceEqual(c.initial_value.device, c.device)\n self.assertDeviceEqual(d.device, '/gpu:0')\n self.assertDeviceEqual(d.initial_value.device, d.device)\n self.assertDeviceEqual(e.device, '/gpu:0')\n self.assertDeviceEqual(e.initial_value.device, '/cpu:99')\n\n\nclass ModelVariablesTest(tf.test.TestCase):\n\n def testNameAndShape(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.model_variable('a', [5])\n self.assertEquals(a.op.name, 'A/a')\n self.assertListEqual(a.get_shape().as_list(), [5])\n self.assertListEqual([a], tf.contrib.framework.get_model_variables('A'))\n\n def testNotInLocalVariables(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.model_variable('a', [5])\n self.assertTrue(a in tf.all_variables())\n self.assertFalse(a in tf.local_variables())\n\n def testGetVariablesReturns(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.model_variable('a', [5])\n with tf.variable_scope('B'):\n b = tf.contrib.framework.model_variable('a', [5])\n self.assertEquals([a], tf.contrib.framework.get_variables('A'))\n self.assertEquals([b], tf.contrib.framework.get_variables('B'))\n\n def testGetModelVariables(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.model_variable('a', [5])\n with tf.variable_scope('B'):\n b = tf.contrib.framework.model_variable('a', [5])\n self.assertEquals([a], tf.contrib.framework.get_model_variables('A'))\n self.assertEquals([b], tf.contrib.framework.get_model_variables('B'))\n\n def testGetLocalVariables(self):\n with self.test_session():\n with tf.variable_scope('A'):\n _ = tf.contrib.framework.model_variable('a', [5])\n with tf.variable_scope('B'):\n _ = tf.contrib.framework.model_variable('a', [5])\n self.assertEquals([], tf.contrib.framework.get_local_variables('A'))\n self.assertEquals([], tf.contrib.framework.get_local_variables('B'))\n\n def testInitializedVariableValue(self):\n with self.test_session() as sess:\n a = tf.contrib.framework.model_variable('a', [5], initializer=tf.ones)\n sess.run(tf.initialize_all_variables())\n self.assertAllEqual(a.eval(), [1]*5)\n\n def testDeviceFn(self):\n class DevFn(object):\n\n def __init__(self):\n self.counter = -1\n\n def __call__(self, op):\n self.counter += 1\n return '/cpu:%d' % self.counter\n\n with tf.Graph().as_default():\n with tf.contrib.framework.arg_scope([tf.contrib.framework.model_variable],\n device=DevFn()):\n a = tf.contrib.framework.model_variable('a', [5])\n b = tf.contrib.framework.model_variable('b', [20])\n self.assertDeviceEqual(a.device, '/cpu:0')\n self.assertDeviceEqual(a.initial_value.device, '/cpu:0')\n self.assertDeviceEqual(b.device, '/cpu:1')\n self.assertDeviceEqual(b.initial_value.device, '/cpu:1')\n\n def testVariableWithVariableDeviceChooser(self):\n\n with tf.Graph().as_default():\n device_fn = tf.contrib.framework.VariableDeviceChooser()\n with tf.contrib.framework.arg_scope([tf.contrib.framework.model_variable],\n device=device_fn):\n a = tf.contrib.framework.model_variable('a', [5])\n b = tf.contrib.framework.model_variable('b', [20])\n self.assertDeviceEqual(a.device, 'cpu:0')\n self.assertDeviceEqual(a.initial_value.device, a.device)\n self.assertDeviceEqual(b.device, 'cpu:0')\n self.assertDeviceEqual(b.initial_value.device, b.device)\n\n\nclass GetVariablesCollections(tf.test.TestCase):\n\n def testVariableCollection(self):\n with self.test_session():\n a = tf.contrib.framework.variable('a', [], collections='A')\n b = tf.contrib.framework.variable('b', [], collections='B')\n self.assertEquals(a, tf.get_collection('A')[0])\n self.assertEquals(b, tf.get_collection('B')[0])\n\n def testVariableCollections(self):\n with self.test_session():\n a = tf.contrib.framework.variable('a', [], collections=['A', 'C'])\n b = tf.contrib.framework.variable('b', [], collections=['B', 'C'])\n self.assertEquals(a, tf.get_collection('A')[0])\n self.assertEquals(b, tf.get_collection('B')[0])\n self.assertListEqual([a, b], tf.get_collection('C'))\n\n def testVariableCollectionsWithArgScope(self):\n with self.test_session():\n with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],\n collections='A'):\n a = tf.contrib.framework.variable('a', [])\n b = tf.contrib.framework.variable('b', [])\n self.assertListEqual([a, b], tf.get_collection('A'))\n\n def testVariableCollectionsWithArgScopeNested(self):\n with self.test_session():\n with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],\n collections='A'):\n a = tf.contrib.framework.variable('a', [])\n with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],\n collections='B'):\n b = tf.contrib.framework.variable('b', [])\n self.assertEquals(a, tf.get_collection('A')[0])\n self.assertEquals(b, tf.get_collection('B')[0])\n\n def testVariableCollectionsWithArgScopeNonNested(self):\n with self.test_session():\n with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],\n collections='A'):\n a = tf.contrib.framework.variable('a', [])\n with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],\n collections='B'):\n b = tf.contrib.framework.variable('b', [])\n tf.contrib.framework.variable('c', [])\n self.assertListEqual([a], tf.get_collection('A'))\n self.assertListEqual([b], tf.get_collection('B'))\n\n def testVariableRestoreWithArgScopeNested(self):\n with self.test_session():\n a = tf.contrib.framework.variable('a', [])\n with tf.contrib.framework.arg_scope([tf.contrib.framework.variable],\n trainable=False,\n collections=['A', 'B']):\n b = tf.contrib.framework.variable('b', [])\n c = tf.contrib.framework.variable('c', [], trainable=False)\n self.assertEquals([a, c], tf.contrib.framework.get_variables_to_restore())\n self.assertEquals([a], tf.trainable_variables())\n self.assertEquals([b], tf.get_collection('A'))\n self.assertEquals([b], tf.get_collection('B'))\n\n\nclass GetVariablesBySuffixTest(tf.test.TestCase):\n\n def testGetVariableGivenNameScoped(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n b = tf.contrib.framework.variable('b', [5])\n self.assertEquals([a],\n tf.contrib.framework.get_variables_by_suffix('a'))\n self.assertEquals([b],\n tf.contrib.framework.get_variables_by_suffix('b'))\n\n def testGetVariableWithScope(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n fooa = tf.contrib.framework.variable('fooa', [5])\n with tf.variable_scope('B'):\n a2 = tf.contrib.framework.variable('a', [5])\n matched_variables = tf.contrib.framework.get_variables_by_suffix('a')\n self.assertEquals([a, fooa, a2], matched_variables)\n matched_variables = tf.contrib.framework.get_variables_by_suffix('/a')\n self.assertEquals([a, a2], matched_variables)\n matched_variables = tf.contrib.framework.get_variables_by_suffix(\n 'a', scope='A')\n self.assertEquals([a, fooa], matched_variables)\n\n def testGetVariableWithoutScope(self):\n with self.test_session():\n a = tf.contrib.framework.variable('a', [5])\n fooa = tf.contrib.framework.variable('fooa', [5])\n b_a = tf.contrib.framework.variable('B/a', [5])\n matched_variables = tf.contrib.framework.get_variables_by_suffix('a')\n self.assertEquals([a, fooa, b_a], matched_variables)\n matched_variables = tf.contrib.framework.get_variables_by_suffix('fooa')\n self.assertEquals([fooa], matched_variables)\n\n\nclass GetVariablesByNameTest(tf.test.TestCase):\n\n def testGetVariableGivenNameScoped(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n b = tf.contrib.framework.variable('b', [5])\n self.assertEquals([a], tf.contrib.framework.get_variables_by_name('a'))\n self.assertEquals([b], tf.contrib.framework.get_variables_by_name('b'))\n\n def testGetVariableWithScope(self):\n with self.test_session():\n with tf.variable_scope('A'):\n a = tf.contrib.framework.variable('a', [5])\n fooa = tf.contrib.framework.variable('fooa', [5])\n with tf.variable_scope('B'):\n a2 = tf.contrib.framework.variable('a', [5])\n matched_variables = tf.contrib.framework.get_variables_by_name('a')\n self.assertEquals([a, a2], matched_variables)\n matched_variables = tf.contrib.framework.get_variables_by_name('fooa')\n self.assertEquals([fooa], matched_variables)\n matched_variables = tf.contrib.framework.get_variables_by_name('/a')\n self.assertEquals([], matched_variables)\n matched_variables = tf.contrib.framework.get_variables_by_name('a',\n scope='A')\n self.assertEquals([a], matched_variables)\n\n def testGetVariableWithoutScope(self):\n with self.test_session():\n a = tf.contrib.framework.variable('a', [5])\n fooa = tf.contrib.framework.variable('fooa', [5])\n b_a = tf.contrib.framework.variable('B/a', [5])\n matched_variables = tf.contrib.framework.get_variables_by_name('a')\n self.assertEquals([a, b_a], matched_variables)\n matched_variables = tf.contrib.framework.get_variables_by_name('fooa')\n self.assertEquals([fooa], matched_variables)\n\n\nclass AssignFromValuesTest(tf.test.TestCase):\n\n def testNoScopes(self):\n init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))\n init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))\n\n with self.test_session() as sess:\n initializer = tf.truncated_normal_initializer(stddev=.1)\n var0 = tf.contrib.framework.variables.variable(\n 'my_var0', shape=[1, 3, 1], initializer=initializer)\n var1 = tf.contrib.framework.variables.variable(\n 'my_var1', shape=[2, 1, 2], initializer=initializer)\n\n var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1}\n assign_op, feed_dict = tf.contrib.framework.variables.assign_from_values(\n var_names_to_values)\n\n # Initialize the variables.\n sess.run(tf.initialize_all_variables())\n\n # Perform the assignment.\n sess.run(assign_op, feed_dict)\n\n # Request and test the variable values:\n var0, var1 = sess.run([var0, var1])\n self.assertAllEqual(init_value0, var0)\n self.assertAllEqual(init_value1, var1)\n\n def testWithScopes(self):\n init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))\n init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))\n\n with self.test_session() as sess:\n initializer = tf.truncated_normal_initializer(stddev=.1)\n\n with tf.variable_scope('my_model/my_layer0'):\n var0 = tf.contrib.framework.variables.variable(\n 'my_var0', shape=[1, 3, 1], initializer=initializer)\n with tf.variable_scope('my_model/my_layer1'):\n var1 = tf.contrib.framework.variables.variable(\n 'my_var1', shape=[2, 1, 2], initializer=initializer)\n\n var_names_to_values = {'my_model/my_layer0/my_var0': init_value0,\n 'my_model/my_layer1/my_var1': init_value1}\n assign_op, feed_dict = tf.contrib.framework.variables.assign_from_values(\n var_names_to_values)\n\n # Initialize the variables.\n sess.run(tf.initialize_all_variables())\n\n # Perform the assignment.\n sess.run(assign_op, feed_dict)\n\n # Request and test the variable values:\n var0, var1 = sess.run([var0, var1])\n self.assertAllEqual(init_value0, var0)\n self.assertAllEqual(init_value1, var1)\n\n\nclass AssignFromCheckpointTest(tf.test.TestCase):\n\n def create_checkpoint_from_values(self, var_names_to_values, checkpoint_dir,\n global_step=None):\n \"\"\"Creates a checkpoint from a mapping of name to values in model_dir.\n\n Args:\n var_names_to_values: a map from variable names to values.\n checkpoint_dir: the directory where the checkpoint will be saved.\n global_step: the global step used to save the checkpoint.\n\n Returns:\n the model_path to the checkpoint.\n \"\"\"\n var_list = []\n with tf.Session('', graph=tf.Graph()) as sess:\n # Create a set of variables to save in the checkpoint.\n for var_name in var_names_to_values:\n var_value = var_names_to_values[var_name]\n var_list.append(tf.Variable(var_value, name=var_name))\n saver = tf.train.Saver(var_list)\n init_op = tf.initialize_variables(var_list)\n sess.run(init_op)\n # Save the initialized values in the file at 'checkpoint_dir'\n return saver.save(sess, checkpoint_dir, global_step=global_step)\n\n def testLoadExistingVariables(self):\n init_value0 = 10.0\n init_value1 = 20.0\n var_names_to_values = {'v0': init_value0, 'v1': init_value1}\n\n model_dir = os.path.join(self.get_temp_dir(), 'model')\n with self.test_session() as sess:\n model_path = self.create_checkpoint_from_values(var_names_to_values,\n model_dir)\n var0 = tf.contrib.framework.variables.variable('my_var0', shape=[])\n var1 = tf.contrib.framework.variables.variable('my_var1', shape=[])\n\n vars_to_restore = {'v0': var0, 'v1': var1}\n op, feed_dict = tf.contrib.framework.variables.assign_from_checkpoint(\n model_path, vars_to_restore)\n\n # Initialize the variables.\n sess.run(tf.initialize_all_variables())\n\n # Perform the assignment.\n sess.run(op, feed_dict)\n\n # Request and test the variable values:\n self.assertEqual(init_value0, var0.eval())\n self.assertEqual(init_value1, var1.eval())\n\n def testRaisesValueErrorIfAVariableIsntFound(self):\n init_value0 = 10.0\n init_value1 = 20.0\n var_names_to_values = {'v0': init_value0, 'v1': init_value1}\n\n model_dir = os.path.join(self.get_temp_dir(), 'model')\n with self.test_session():\n model_path = self.create_checkpoint_from_values(var_names_to_values,\n model_dir)\n var0 = tf.contrib.framework.variables.variable('my_var0', shape=[])\n var1 = tf.contrib.framework.variables.variable('my_var1', shape=[])\n\n vars_to_restore = {'v0_fake': var0, 'v1': var1}\n\n with self.assertRaises(ValueError):\n tf.contrib.framework.variables.assign_from_checkpoint(model_path,\n vars_to_restore)\n\n def testInitFromCheckpointWithScopes(self):\n init_value0 = np.asarray([1.0, 3.0, 9.0],\n dtype=np.float32).reshape((1, 3, 1))\n init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0],\n dtype=np.float32).reshape((2, 1, 2))\n\n var_names_to_values = {'layer0/v0': init_value0, 'layer1/v1': init_value1}\n model_dir = os.path.join(self.get_temp_dir(), 'model')\n with self.test_session() as sess:\n model_path = self.create_checkpoint_from_values(var_names_to_values,\n model_dir)\n with tf.variable_scope('my_model/my_layer0'):\n var0 = tf.contrib.framework.variables.variable('my_var0',\n shape=init_value0.shape)\n with tf.variable_scope('my_model/my_layer1'):\n var1 = tf.contrib.framework.variables.variable('my_var1',\n shape=init_value1.shape)\n\n vars_to_restore = {'layer0/v0': var0, 'layer1/v1': var1}\n op, feed_dict = tf.contrib.framework.variables.assign_from_checkpoint(\n model_path,\n vars_to_restore)\n\n # Initialize the variables.\n sess.run(tf.initialize_all_variables())\n\n # Perform the assignment.\n sess.run(op, feed_dict)\n\n # Request and test the variable values:\n self.assertAllEqual(init_value0, var0.eval())\n self.assertAllEqual(init_value1, var1.eval())\n\nif __name__ == '__main__':\n tf.test.main()\n", "# pylint: disable=g-bad-file-header\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Multi-output tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow.contrib.learn.python import learn\nfrom tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error\n\n\nclass MultiOutputTest(tf.test.TestCase):\n \"\"\"Multi-output tests.\"\"\"\n\n def testMultiRegression(self):\n random.seed(42)\n rng = np.random.RandomState(1)\n x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)\n y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T\n regressor = learn.TensorFlowLinearRegressor(learning_rate=0.01)\n regressor.fit(x, y)\n score = mean_squared_error(regressor.predict(x), y)\n self.assertLess(score, 10, \"Failed with score = {0}\".format(score))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for tensorflow.learning.training_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework.test_util import TensorFlowTestCase\nfrom tensorflow.python.ops import constant_op\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.training import training_ops\n\n\nclass TrainingOpsTest(TensorFlowTestCase):\n\n def _toType(self, dtype):\n if dtype == np.float16:\n return tf.float16\n elif dtype == np.float32:\n return tf.float32\n elif dtype == np.float64:\n return tf.float64\n elif dtype == np.int32:\n return tf.int32\n elif dtype == np.int64:\n return tf.int64\n else:\n assert False, (dtype)\n\n def _testTypes(self, x, alpha, delta, use_gpu=None):\n self.setUp()\n with self.test_session(use_gpu=use_gpu):\n var = variables.Variable(x)\n variables.initialize_all_variables().run()\n self.assertAllCloseAccordingToType(x, var.eval())\n apply_sgd = training_ops.apply_gradient_descent(var, alpha, delta)\n out = apply_sgd.eval()\n self.assertShapeEqual(out, apply_sgd)\n self.assertAllCloseAccordingToType(x - alpha * delta, out)\n\n def testApplyGradientDescent(self):\n for (dtype, use_gpu) in itertools.product(\n [np.float16, np.float32, np.float64], [False, True]):\n x = np.arange(100).astype(dtype)\n alpha = np.array(2.0).astype(dtype)\n delta = np.arange(100).astype(dtype)\n self._testTypes(x, alpha, delta, use_gpu)\n\n def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None):\n self.setUp()\n with self.test_session(use_gpu=use_gpu):\n var = variables.Variable(x)\n accum = variables.Variable(y)\n variables.initialize_all_variables().run()\n\n self.assertAllCloseAccordingToType(x, var.eval())\n apply_adagrad = training_ops.apply_adagrad(var, accum, lr, grad)\n out = apply_adagrad.eval()\n self.assertShapeEqual(out, apply_adagrad)\n self.assertAllCloseAccordingToType(\n x - lr * grad * (y + grad * grad) ** (-0.5), out)\n self.assertAllCloseAccordingToType(y + grad * grad, accum.eval())\n\n def _testTypesForFtrl(self, x, y, z, lr, grad, use_gpu=None, l1=0.0,\n l2=0.0, lr_power=-0.5):\n self.setUp()\n with self.test_session(use_gpu=use_gpu):\n var = variables.Variable(x)\n accum = variables.Variable(y)\n linear = variables.Variable(z)\n variables.initialize_all_variables().run()\n\n self.assertAllCloseAccordingToType(x, var.eval())\n apply_ftrl = training_ops.apply_ftrl(var, accum, linear, grad, lr, l1, l2,\n lr_power)\n out = apply_ftrl.eval()\n self.assertShapeEqual(out, apply_ftrl)\n accum_update = y + grad * grad\n linear_update = z + grad - (accum_update ** (-lr_power) - y ** (\n -lr_power)) / lr * x\n quadratic = 1.0 / (accum_update ** (lr_power) * lr) + 2 * l2\n expected_out = np.array([(np.sign(\n linear_update[i]) * l1 - linear_update[i]) / (\n quadratic[i]) if np.abs(\n linear_update[i]) > l1 else 0.0 for i in range(\n linear_update.size)])\n self.assertAllCloseAccordingToType(accum_update, accum.eval())\n if x.dtype == np.float16:\n # The calculations here really are not very precise in float16.\n self.assertAllClose(linear_update, linear.eval(), rtol=2e-2, atol=2e-2)\n self.assertAllClose(expected_out, out, rtol=2e-2, atol=2e-2)\n else:\n self.assertAllClose(linear_update, linear.eval())\n self.assertAllClose(expected_out, out)\n\n def testApplyAdagrad(self):\n for (dtype, use_gpu) in itertools.product(\n [np.float16, np.float32, np.float64], [False, True]):\n x = np.arange(100).astype(dtype)\n y = np.arange(1, 101).astype(dtype)\n lr = np.array(2.0).astype(dtype)\n grad = np.arange(100).astype(dtype)\n self._testTypesForAdagrad(x, y, lr, grad, use_gpu)\n\n def testApplyFtrl(self):\n for dtype in [np.float16, np.float32, np.float64]:\n x = np.arange(100).astype(dtype)\n y = np.arange(1, 101).astype(dtype)\n z = np.arange(102, 202).astype(dtype)\n lr = np.array(2.0).astype(dtype)\n l1 = np.array(3.0).astype(dtype)\n l2 = np.array(4.0).astype(dtype)\n grad = np.arange(100).astype(dtype)\n self._testTypesForFtrl(x, y, z, lr, grad, use_gpu=False, l1=l1, l2=l2)\n\n def _testTypesForSparseAdagrad(self, x, y, lr, grad, indices):\n self.setUp()\n with self.test_session(use_gpu=False):\n var = variables.Variable(x)\n accum = variables.Variable(y)\n variables.initialize_all_variables().run()\n\n self.assertAllCloseAccordingToType(x, var.eval())\n sparse_apply_adagrad = training_ops.sparse_apply_adagrad(\n var, accum, lr, grad,\n constant_op.constant(indices, self._toType(indices.dtype)))\n out = sparse_apply_adagrad.eval()\n self.assertShapeEqual(out, sparse_apply_adagrad)\n\n for (i, index) in enumerate(indices):\n self.assertAllCloseAccordingToType(\n x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i]) ** (-0.5),\n var.eval()[index])\n self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],\n accum.eval()[index])\n\n def _testTypesForSparseFtrl(self, x, y, z, lr, grad, indices, l1=0.0, l2=0.0,\n lr_power=-0.5):\n self.setUp()\n with self.test_session(use_gpu=False):\n var = variables.Variable(x)\n accum = variables.Variable(y)\n linear = variables.Variable(z)\n variables.initialize_all_variables().run()\n\n self.assertAllCloseAccordingToType(x, var.eval())\n sparse_apply_ftrl = training_ops.sparse_apply_ftrl(\n var, accum, linear, grad,\n constant_op.constant(indices, self._toType(indices.dtype)),\n lr, l1, l2, lr_power=lr_power)\n out = sparse_apply_ftrl.eval()\n self.assertShapeEqual(out, sparse_apply_ftrl)\n\n for (i, index) in enumerate(indices):\n self.assertAllCloseAccordingToType(\n x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i]) ** (\n lr_power),\n var.eval()[index])\n self.assertAllCloseAccordingToType(y[index] + grad[i] * grad[i],\n accum.eval()[index])\n\n def testSparseApplyAdagrad(self):\n for (dtype, index_type) in itertools.product(\n [np.float16, np.float32, np.float64], [np.int32, np.int64]):\n x_val = [np.arange(10), np.arange(10, 20), np.arange(20, 30)]\n y_val = [np.arange(1, 11), np.arange(11, 21), np.arange(21, 31)]\n x = np.array(x_val).astype(dtype)\n y = np.array(y_val).astype(dtype)\n lr = np.array(2.0).astype(dtype)\n grad_val = [np.arange(10), np.arange(10)]\n grad = np.array(grad_val).astype(dtype)\n indices = np.array([0, 2]).astype(index_type)\n self._testTypesForSparseAdagrad(x, y, lr, grad, indices)\n\n def testSparseApplyAdagradDim1(self):\n for (dtype, index_type) in itertools.product(\n [np.float16, np.float32, np.float64], [np.int32, np.int64]):\n x_val = [[1.0], [2.0], [3.0]]\n y_val = [[4.0], [5.0], [6.0]]\n x = np.array(x_val).astype(dtype)\n y = np.array(y_val).astype(dtype)\n lr = np.array(2.0).astype(dtype)\n grad_val = [[1.5], [2.5]]\n grad = np.array(grad_val).astype(dtype)\n indices = np.array([0, 2]).astype(index_type)\n self._testTypesForSparseAdagrad(x, y, lr, grad, indices)\n\n def testSparseApplyFtrlDim1(self):\n for (dtype, index_type) in itertools.product(\n [np.float16, np.float32, np.float64], [np.int32, np.int64]):\n x_val = [[0.0], [0.0], [0.0]]\n y_val = [[4.0], [5.0], [6.0]]\n z_val = [[0.0], [0.0], [0.0]]\n x = np.array(x_val).astype(dtype)\n y = np.array(y_val).astype(dtype)\n z = np.array(z_val).astype(dtype)\n lr = np.array(2.0).astype(dtype)\n grad_val = [[1.5], [2.5]]\n grad = np.array(grad_val).astype(dtype)\n indices = np.array([0, 2]).astype(index_type)\n self._testTypesForSparseFtrl(x, y, z, lr, grad, indices)\n\n def testApplyAdam(self):\n for dtype, use_gpu in itertools.product(\n [np.float16, np.float32, np.float64], [False, True]):\n var = np.arange(100).astype(dtype)\n m = np.arange(1, 101).astype(dtype)\n v = np.arange(101, 201).astype(dtype)\n grad = np.arange(100).astype(dtype)\n self._testTypesForAdam(var, m, v, grad, use_gpu)\n\n def _testTypesForAdam(self, var, m, v, grad, use_gpu):\n self.setUp()\n with self.test_session(use_gpu=use_gpu):\n var_t = variables.Variable(var)\n m_t = variables.Variable(m)\n v_t = variables.Variable(v)\n\n t = 1\n beta1 = np.array(0.9, dtype=var.dtype)\n beta2 = np.array(0.999, dtype=var.dtype)\n beta1_power = beta1**t\n beta2_power = beta2**t\n lr = np.array(0.001, dtype=var.dtype)\n epsilon = np.array(1e-8, dtype=var.dtype)\n beta1_t = constant_op.constant(beta1, self._toType(var.dtype), [])\n beta2_t = constant_op.constant(beta2, self._toType(var.dtype), [])\n beta1_power_t = variables.Variable(beta1_power)\n beta2_power_t = variables.Variable(beta2_power)\n lr_t = constant_op.constant(lr, self._toType(var.dtype), [])\n epsilon_t = constant_op.constant(epsilon, self._toType(var.dtype), [])\n variables.initialize_all_variables().run()\n\n self.assertAllCloseAccordingToType(var, var_t.eval())\n new_var, _, _ = self._adamUpdateNumpy(var, grad, t, m, v,\n lr, beta1, beta2, epsilon)\n apply_adam = training_ops.apply_adam(var_t, m_t, v_t, beta1_power_t,\n beta2_power_t, lr_t,\n beta1_t, beta2_t, epsilon_t, grad)\n out = apply_adam.eval()\n self.assertShapeEqual(out, apply_adam)\n self.assertAllCloseAccordingToType(new_var, out)\n\n def _adamUpdateNumpy(self, param, g_t, t, m, v, alpha, beta1,\n beta2, epsilon):\n alpha_t = alpha * np.sqrt(1 - beta2 ** t) / (1 - beta1 ** t)\n\n m_t = beta1 * m + (1 - beta1) * g_t\n v_t = beta2 * v + (1 - beta2) * g_t * g_t\n\n param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)\n return param_t, m_t, v_t\n\nif __name__ == '__main__':\n googletest.main()\n" ]
[ [ "tensorflow.python.training.input.batch", "tensorflow.python.training.input.shuffle_batch" ], [ "tensorflow.python.util.compat.as_bytes" ], [ "tensorflow.python.framework.ops.op_scope", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.math_ops.cast" ], [ "tensorflow.Graph", "tensorflow.constant", "tensorflow.python.ops.logging_ops.Assert", "tensorflow.python.framework.ops._NodeDef", "tensorflow.python.framework.errors.UnauthenticatedError", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.test_util.IsGoogleCudaEnabled", "tensorflow.python.platform.googletest.main", "tensorflow.python.ops.constant_op.constant", "tensorflow.test.assert_equal_graph_def", "numpy.array", "tensorflow.core.framework.graph_pb2.GraphDef" ], [ "tensorflow.device", "numpy.asarray", "tensorflow.contrib.framework.get_variables_by_name", "tensorflow.contrib.framework.variables.assign_from_values", "tensorflow.Graph", "tensorflow.all_variables", "tensorflow.Variable", "tensorflow.get_collection", "tensorflow.test.main", "tensorflow.truncated_normal_initializer", "tensorflow.contrib.framework.local_variable", "tensorflow.contrib.framework.VariableDeviceChooser", "tensorflow.initialize_all_variables", "tensorflow.trainable_variables", "tensorflow.train.Saver", "tensorflow.contrib.framework.create_global_step", "tensorflow.contrib.framework.variable", "tensorflow.contrib.framework.get_variables_to_restore", "tensorflow.contrib.framework.get_model_variables", "tensorflow.contrib.framework.variables.assign_from_checkpoint", "tensorflow.local_variables", "tensorflow.initialize_variables", "tensorflow.contrib.framework.get_variables_by_suffix", "tensorflow.contrib.framework.variables.variable", "tensorflow.constant", "tensorflow.contrib.framework.get_variables", "tensorflow.contrib.framework.get_local_variables", "tensorflow.initialize_local_variables", "tensorflow.contrib.framework.arg_scope", "tensorflow.contrib.framework.get_global_step", "tensorflow.contrib.framework.model_variable", "tensorflow.train.replica_device_setter", "tensorflow.variable_scope", "tensorflow.contrib.framework.get_or_create_global_step", "tensorflow.contrib.framework.get_unique_variable" ], [ "numpy.cos", "tensorflow.test.main", "numpy.sin", "tensorflow.contrib.learn.python.learn.TensorFlowLinearRegressor", "numpy.random.RandomState" ], [ "numpy.sqrt", "numpy.abs", "tensorflow.python.ops.variables.initialize_all_variables", "numpy.arange", "numpy.sign", "tensorflow.python.ops.variables.Variable", "tensorflow.python.training.training_ops.apply_adagrad", "tensorflow.python.training.training_ops.apply_ftrl", "tensorflow.python.training.training_ops.apply_adam", "tensorflow.python.platform.googletest.main", "tensorflow.python.training.training_ops.apply_gradient_descent", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] } ]
thegodone/gcnn_keras
[ "2009b9ab9a07c1a369849478812fcc2cb9799945", "2009b9ab9a07c1a369849478812fcc2cb9799945", "2009b9ab9a07c1a369849478812fcc2cb9799945" ]
[ "kgcnn/literature/GAT.py", "kgcnn/literature/GATv2.py", "kgcnn/data/tudataset.py" ]
[ "import tensorflow as tf\nimport tensorflow.keras as ks\n\nfrom kgcnn.layers.casting import ChangeTensorType\nfrom kgcnn.layers.conv.attention import AttentionHeadGAT\nfrom kgcnn.layers.keras import Concatenate, Dense, Average, Activation\nfrom kgcnn.layers.mlp import MLP\nfrom kgcnn.layers.pool.pooling import PoolingNodes\nfrom kgcnn.utils.models import generate_embedding, update_model_kwargs\n\n# Graph Attention Networks\n# by Veličković et al. (2018)\n# https://arxiv.org/abs/1710.10903\n\nmodel_default = {'name': \"GAT\",\n 'inputs': [{'shape': (None,), 'name': \"node_attributes\", 'dtype': 'float32', 'ragged': True},\n {'shape': (None,), 'name': \"edge_attributes\", 'dtype': 'float32', 'ragged': True},\n {'shape': (None, 2), 'name': \"edge_indices\", 'dtype': 'int64', 'ragged': True}],\n 'input_embedding': {\"node\": {\"input_dim\": 95, \"output_dim\": 64},\n \"edge\": {\"input_dim\": 5, \"output_dim\": 64}},\n 'output_embedding': 'graph',\n 'output_mlp': {\"use_bias\": [True, True, False], \"units\": [25, 10, 1],\n \"activation\": ['relu', 'relu', 'sigmoid']},\n 'attention_args': {\"units\": 32, \"use_final_activation\": False, \"use_edge_features\": True,\n \"has_self_loops\": True, \"activation\": \"kgcnn>leaky_relu\", 'use_bias': True},\n 'pooling_nodes_args': {'pooling_method': 'mean'},\n 'depth': 3, 'attention_heads_num': 5,\n 'attention_heads_concat': False, 'verbose': 1\n }\n\n\n@update_model_kwargs(model_default)\ndef make_model(inputs=None,\n input_embedding=None,\n output_embedding=None,\n output_mlp=None,\n attention_args=None,\n pooling_nodes_args=None,\n depth=None,\n attention_heads_num=None,\n attention_heads_concat=None,\n **kwargs):\n \"\"\"Make GAT graph network via functional API. Default parameters can be found in :obj:`model_default`.\n\n Args:\n inputs (list): List of dictionaries unpacked in :obj:`tf.keras.layers.Input`. Order must match model definition.\n input_embedding (dict): Dictionary of embedding arguments for nodes etc. unpacked in `Embedding` layers.\n output_embedding (str): Main embedding task for graph network. Either \"node\", (\"edge\") or \"graph\".\n output_mlp (dict): Dictionary of layer arguments unpacked in the final classification `MLP` layer block.\n Defines number of model outputs and activation.\n attention_args (dict): Dictionary of layer arguments unpacked in `AttentionHeadGAT` layer.\n pooling_nodes_args (dict): Dictionary of layer arguments unpacked in `PoolingNodes` layer.\n depth (int): Number of graph embedding units or depth of the network.\n attention_heads_num (int): Number of attention heads to use.\n attention_heads_concat (bool): Whether to concat attention heads. Otherwise average heads.\n\n Returns:\n tf.keras.models.Model\n \"\"\"\n\n # Make input\n node_input = ks.layers.Input(**inputs[0])\n edge_input = ks.layers.Input(**inputs[1])\n edge_index_input = ks.layers.Input(**inputs[2])\n # Embedding, if no feature dimension\n n = generate_embedding(node_input, inputs[0]['shape'], input_embedding['node'])\n ed = generate_embedding(edge_input, inputs[1]['shape'], input_embedding['edge'])\n edi = edge_index_input\n\n # Model\n nk = Dense(units=attention_args[\"units\"], activation=\"linear\")(n)\n for i in range(0, depth):\n heads = [AttentionHeadGAT(**attention_args)([n, ed, edi]) for _ in range(attention_heads_num)]\n if attention_heads_concat:\n nk = Concatenate(axis=-1)(heads)\n else:\n nk = Average()(heads)\n nk = Activation(activation=attention_args[\"activation\"])(nk)\n n = nk\n\n # Output embedding choice\n if output_embedding == 'graph':\n out = PoolingNodes(**pooling_nodes_args)(n)\n out = MLP(**output_mlp)(out)\n main_output = ks.layers.Flatten()(out) # will be dense\n elif output_embedding == 'node':\n out = MLP(**output_mlp)(n)\n main_output = ChangeTensorType(input_tensor_type=\"ragged\", output_tensor_type=\"tensor\")(out)\n else:\n raise ValueError(\"Unsupported graph embedding for `GAT`\")\n\n model = tf.keras.models.Model(inputs=[node_input, edge_input, edge_index_input], outputs=main_output)\n return model\n", "import tensorflow as tf\nimport tensorflow.keras as ks\n\nfrom kgcnn.layers.casting import ChangeTensorType\nfrom kgcnn.layers.conv.attention import AttentionHeadGATV2\nfrom kgcnn.layers.keras import Concatenate, Dense, Average, Activation\nfrom kgcnn.layers.mlp import MLP\nfrom kgcnn.layers.pool.pooling import PoolingNodes\nfrom kgcnn.utils.models import generate_embedding, update_model_kwargs\n\n# Graph Attention Networks by Veličković et al. (2018)\n# https://arxiv.org/abs/1710.10903\n# Improved by\n# How Attentive are Graph Attention Networks?\n# by Brody et al. (2021)\n\nmodel_default = {'name': \"GATv2\",\n 'inputs': [{'shape': (None,), 'name': \"node_attributes\", 'dtype': 'float32', 'ragged': True},\n {'shape': (None,), 'name': \"edge_attributes\", 'dtype': 'float32', 'ragged': True},\n {'shape': (None, 2), 'name': \"edge_indices\", 'dtype': 'int64', 'ragged': True}],\n 'input_embedding': {\"node\": {\"input_dim\": 95, \"output_dim\": 64},\n \"edge\": {\"input_dim\": 5, \"output_dim\": 64}},\n 'output_embedding': 'graph',\n 'output_mlp': {\"use_bias\": [True, True, False], \"units\": [25, 10, 1],\n \"activation\": ['relu', 'relu', 'sigmoid']},\n 'attention_args': {\"units\": 32, \"use_final_activation\": False, \"use_edge_features\": True,\n \"has_self_loops\": True, \"activation\": \"kgcnn>leaky_relu\", \"use_bias\": True},\n 'pooling_nodes_args': {'pooling_method': 'mean'},\n 'depth': 3, 'attention_heads_num': 5,\n 'attention_heads_concat': False, 'verbose': 1\n }\n\n\n@update_model_kwargs(model_default)\ndef make_model(inputs=None,\n input_embedding=None,\n output_embedding=None,\n output_mlp=None,\n attention_args=None,\n pooling_nodes_args=None,\n depth=None,\n attention_heads_num=None,\n attention_heads_concat=None,\n **kwargs):\n \"\"\"Make GATv2 graph network via functional API. Default parameters can be found in :obj:`model_default`.\n\n Args:\n inputs (list): List of dictionaries unpacked in :obj:`tf.keras.layers.Input`. Order must match model definition.\n input_embedding (dict): Dictionary of embedding arguments for nodes etc. unpacked in `Embedding` layers.\n output_embedding (str): Main embedding task for graph network. Either \"node\", (\"edge\") or \"graph\".\n output_mlp (dict): Dictionary of layer arguments unpacked in the final classification `MLP` layer block.\n Defines number of model outputs and activation.\n attention_args (dict): Dictionary of layer arguments unpacked in `AttentionHeadGATV2` layer.\n pooling_nodes_args (dict): Dictionary of layer arguments unpacked in `PoolingNodes` layer.\n depth (int): Number of graph embedding units or depth of the network.\n attention_heads_num (int): Number of attention heads to use.\n attention_heads_concat (bool): Whether to concat attention heads. Otherwise average heads.\n\n Returns:\n tf.keras.models.Model\n \"\"\"\n\n # Make input\n node_input = ks.layers.Input(**inputs[0])\n edge_input = ks.layers.Input(**inputs[1])\n edge_index_input = ks.layers.Input(**inputs[2])\n\n # Embedding, if no feature dimension\n n = generate_embedding(node_input, inputs[0]['shape'], input_embedding['node'])\n ed = generate_embedding(edge_input, inputs[1]['shape'], input_embedding['edge'])\n edi = edge_index_input\n\n # Model\n nk = Dense(units=attention_args[\"units\"], activation=\"linear\")(n)\n for i in range(0, depth):\n heads = [AttentionHeadGATV2(**attention_args)([nk, ed, edi]) for _ in range(attention_heads_num)]\n if attention_heads_concat:\n nk = Concatenate(axis=-1)(heads)\n else:\n nk = Average()(heads)\n nk = Activation(activation=attention_args[\"activation\"])(nk)\n n = nk\n\n # Output embedding choice\n if output_embedding == 'graph':\n out = PoolingNodes(**pooling_nodes_args)(n)\n out = MLP(**output_mlp)(out)\n main_output = ks.layers.Flatten()(out) # will be dense\n elif output_embedding == 'node':\n out = MLP(**output_mlp)(n)\n main_output = ChangeTensorType(input_tensor_type=\"ragged\", output_tensor_type=\"tensor\")(out)\n else:\n raise ValueError(\"Unsupported graph embedding for `GATv2`\")\n\n # Define model output\n model = tf.keras.models.Model(inputs=[node_input, edge_input, edge_index_input], outputs=main_output)\n return model\n", "import numpy as np\nimport os\n\nfrom kgcnn.data.base import DownloadDataset, MemoryGraphDataset\n\n\n# TUDataset: A collection of benchmark datasets for learning with graphs\n# by Christopher Morris and Nils M. Kriege and Franka Bause and Kristian Kersting and Petra Mutzel and Marion Neumann\n# http://graphlearning.io\n\n\nclass GraphTUDataset(DownloadDataset, MemoryGraphDataset):\n r\"\"\"Base class for loading graph datasets published by `TU Dortmund University\n <https://chrsmrrs.github.io/datasets>`_. Datasets contain non-isomorphic graphs. This general base class has\n functionality to load TUDatasets in a generic way.\n\n .. note::\n Note that sub-classes of `GraphTUDataset` in :obj:``kgcnn.data.datasets`` should still be made,\n if the dataset needs more refined post-precessing. Not all datasets can provide all types of graph\n properties like `edge_attributes` etc.\n\n \"\"\"\n\n # List of datasets in TUDatasets.\n tudataset_ids = [\n # Molecules\n \"AIDS\", \"alchemy_full\", \"aspirin\", \"benzene\", \"BZR\", \"BZR_MD\", \"COX2\", \"COX2_MD\", \"DHFR\", \"DHFR_MD\", \"ER_MD\",\n \"ethanol\", \"FRANKENSTEIN\", \"malonaldehyde\", \"MCF-7\", \"MCF-7H\", \"MOLT-4\", \"MOLT-4H\", \"Mutagenicity\", \"MUTAG\",\n \"naphthalene\", \"NCI1\", \"NCI109\", \"NCI-H23\", \"NCI-H23H\", \"OVCAR-8\", \"OVCAR-8H\", \"P388\", \"P388H\", \"PC-3\", \"PC-3H\",\n \"PTC_FM\", \"PTC_FR\", \"PTC_MM\", \"PTC_MR\", \"QM9\", \"salicylic_acid\", \"SF-295\", \"SF-295H\", \"SN12C\", \"SN12CH\",\n \"SW-620\", \"SW-620H\", \"toluene\", \"Tox21_AhR_training\", \"Tox21_AhR_testing\", \"Tox21_AhR_evaluation\",\n \"Tox21_AR_training\", \"Tox21_AR_testing\", \"Tox21_AR_evaluation\", \"Tox21_AR-LBD_training\", \"Tox21_AR-LBD_testing\",\n \"Tox21_AR-LBD_evaluation\", \"Tox21_ARE_training\", \"Tox21_ARE_testing\", \"Tox21_ARE_evaluation\",\n \"Tox21_aromatase_training\", \"Tox21_aromatase_testing\", \"Tox21_aromatase_evaluation\", \"Tox21_ATAD5_training\",\n \"Tox21_ATAD5_testing\", \"Tox21_ATAD5_evaluation\", \"Tox21_ER_training\", \"Tox21_ER_testing\", \"Tox21_ER_evaluation\",\n \"Tox21_ER-LBD_training\", \"Tox21_ER-LBD_testing\", \"Tox21_ER-LBD_evaluation\", \"Tox21_HSE_training\",\n \"Tox21_HSE_testing\", \"Tox21_HSE_evaluation\", \"Tox21_MMP_training\", \"Tox21_MMP_testing\", \"Tox21_MMP_evaluation\",\n \"Tox21_p53_training\", \"Tox21_p53_testing\", \"Tox21_p53_evaluation\", \"Tox21_PPAR-gamma_training\",\n \"Tox21_PPAR-gamma_testing\", \"Tox21_PPAR-gamma_evaluation\", \"UACC257\", \"UACC257H\", \"uracil\", \"Yeast\", \"YeastH\",\n \"ZINC_full\", \"ZINC_test\", \"ZINC_train\", \"ZINC_val\",\n # Bioinformatics\n \"DD\", \"ENZYMES\", \"KKI\", \"OHSU\", \"Peking_1\", \"PROTEINS\", \"PROTEINS_full\",\n # Computer vision\n \"COIL-DEL\", \"COIL-RAG\", \"Cuneiform\", \"Fingerprint\", \"FIRSTMM_DB\", \"Letter-high\", \"Letter-low\", \"Letter-med\",\n \"MSRC_9\", \"MSRC_21\", \"MSRC_21C\",\n # Social networks\n \"COLLAB\", \"dblp_ct1\", \"dblp_ct2\", \"DBLP_v1\", \"deezer_ego_nets\", \"facebook_ct1\", \"facebook_ct2\",\n \"github_stargazers\", \"highschool_ct1\", \"highschool_ct2\", \"IMDB-BINARY\", \"IMDB-MULTI\", \"infectious_ct1\",\n \"infectious_ct2\", \"mit_ct1\", \"mit_ct2\", \"REDDIT-BINARY\", \"REDDIT-MULTI-5K\", \"REDDIT-MULTI-12K\",\n \"reddit_threads\", \"tumblr_ct1\", \"tumblr_ct2\", \"twitch_egos\", \"TWITTER-Real-Graph-Partial\",\n # Synthetic\n \"COLORS-3\", \"SYNTHETIC\", \"SYNTHETICnew\", \"Synthie\", \"TRIANGLES\"\n ]\n\n def __init__(self, dataset_name: str, reload: bool = False, verbose: int = 1):\n \"\"\"Initialize a `GraphTUDataset` instance from string identifier.\n\n Args:\n dataset_name (str): Name of a dataset.\n reload (bool): Download the dataset again and prepare data on disk.\n verbose (int): Print progress or info for processing, where 0 is silent. Default is 1.\n \"\"\"\n if not isinstance(dataset_name, str):\n raise ValueError(\"ERROR:kgcnn: Please provide string identifier for TUDataset.\")\n\n if dataset_name in self.tudataset_ids:\n self.data_directory = dataset_name\n self.download_url = \"https://www.chrsmrrs.com/graphkerneldatasets/\"\n self.download_url = self.download_url + dataset_name + \".zip\"\n self.file_name = dataset_name + \".zip\"\n self.unpack_zip = True\n self.unpack_directory = dataset_name\n self.fits_in_memory = True\n self.dataset_name = dataset_name\n else:\n print(\"ERROR:kgcnn: Can not resolve %s as a TUDataset.\" % dataset_name,\n \"Add to `all_tudataset_identifier` list manually.\")\n\n DownloadDataset.__init__(self, reload=reload, verbose=verbose)\n MemoryGraphDataset.__init__(self, verbose=verbose)\n if verbose > 1:\n print(\"INFO:kgcnn: Reading dataset to memory with name %s\" % str(self.dataset_name))\n\n if self.fits_in_memory:\n self.read_in_memory(verbose=verbose)\n\n def read_in_memory(self, verbose: int = 1):\n r\"\"\"Read the TUDataset into memory. The TUDataset is stored in disjoint representations. The data is cast\n to a list of separate graph properties for `MemoryGraphDataset`.\n\n Args:\n verbose (int): Print progress or info for processing, where 0 is silent. Default is 1.\n\n Returns:\n self\n \"\"\"\n\n if self.file_name is not None and self.dataset_name in self.tudataset_ids:\n name_dataset = self.dataset_name\n path = os.path.join(self.data_main_dir, self.data_directory, self.unpack_directory, name_dataset)\n else:\n print(\"WARNING:kgcnn: Dataset with name %s not found in TUDatasets list.\" % self.dataset_name)\n return None\n\n # Define a graph with indices\n # They must be defined\n g_a = np.array(self.read_csv_simple(os.path.join(path, name_dataset + \"_A.txt\"), dtype=int), dtype=\"int\")\n g_n_id = np.array(self.read_csv_simple(os.path.join(path, name_dataset + \"_graph_indicator.txt\"), dtype=int),\n dtype=\"int\")\n\n # Try read in labels and attributes (optional)\n try:\n g_labels = np.array(\n self.read_csv_simple(os.path.join(path, name_dataset + \"_graph_labels.txt\"), dtype=float))\n except FileNotFoundError:\n g_labels = None\n try:\n n_labels = np.array(\n self.read_csv_simple(os.path.join(path, name_dataset + \"_node_labels.txt\"), dtype=float))\n except FileNotFoundError:\n n_labels = None\n try:\n e_labels = np.array(\n self.read_csv_simple(os.path.join(path, name_dataset + \"_edge_labels.txt\"), dtype=float))\n except FileNotFoundError:\n e_labels = None\n\n # Try read in attributes\n try:\n n_attr = np.array(\n self.read_csv_simple(os.path.join(path, name_dataset + \"_node_attributes.txt\"), dtype=float))\n except FileNotFoundError:\n n_attr = None\n try:\n e_attr = np.array(\n self.read_csv_simple(os.path.join(path, name_dataset + \"_edge_attributes.txt\"), dtype=float))\n except FileNotFoundError:\n e_attr = None\n try:\n g_attr = np.array(\n self.read_csv_simple(os.path.join(path, name_dataset + \"_graph_attributes.txt\"), dtype=float))\n except FileNotFoundError:\n g_attr = None\n\n # labels\n num_graphs = np.amax(g_n_id)\n if g_labels is not None:\n if len(g_labels) != num_graphs:\n print(\"ERROR:kgcnn: Wrong number of graphs, not matching graph labels, {0}, {1}\".format(len(g_labels),\n num_graphs))\n\n # shift index, should start at 0 for python indexing\n if int(np.amin(g_n_id)) == 1 and int(np.amin(g_a)) == 1:\n if verbose > 0:\n print(\"INFO:kgcnn: Shift start of graph id to zero for %s to match python indexing.\" % name_dataset)\n g_a = g_a - 1\n g_n_id = g_n_id - 1\n\n # split into separate graphs\n graph_id, counts = np.unique(g_n_id, return_counts=True)\n graphlen = np.zeros(num_graphs, dtype=np.int)\n graphlen[graph_id] = counts\n\n if n_attr is not None:\n n_attr = np.split(n_attr, np.cumsum(graphlen)[:-1])\n if n_labels is not None:\n n_labels = np.split(n_labels, np.cumsum(graphlen)[:-1])\n\n # edge_indicator\n graph_id_edge = g_n_id[g_a[:, 0]] # is the same for adj_matrix[:,1]\n graph_id2, counts_edge = np.unique(graph_id_edge, return_counts=True)\n edgelen = np.zeros(num_graphs, dtype=np.int)\n edgelen[graph_id2] = counts_edge\n\n if e_attr is not None:\n e_attr = np.split(e_attr, np.cumsum(edgelen)[:-1])\n if e_labels is not None:\n e_labels = np.split(e_labels, np.cumsum(edgelen)[:-1])\n\n # edge_indices\n node_index = np.concatenate([np.arange(x) for x in graphlen], axis=0)\n edge_indices = node_index[g_a]\n edge_indices = np.concatenate([edge_indices[:, 1:], edge_indices[:, :1]], axis=-1) # switch indices\n edge_indices = np.split(edge_indices, np.cumsum(edgelen)[:-1])\n\n # Check if unconnected\n all_cons = []\n for i in range(num_graphs):\n cons = np.arange(graphlen[i])\n test_cons = np.sort(np.unique(cons[edge_indices[i]].flatten()))\n is_cons = np.zeros_like(cons, dtype=np.bool)\n is_cons[test_cons] = True\n all_cons.append(np.sum(is_cons == False))\n all_cons = np.array(all_cons)\n\n if verbose > 0:\n print(\"INFO:kgcnn: Graph index which has unconnected\", np.arange(len(all_cons))[all_cons > 0], \"with\",\n all_cons[all_cons > 0], \"in total\", len(all_cons[all_cons > 0]))\n\n node_degree = [np.zeros(x, dtype=\"int\") for x in graphlen]\n for i, x in enumerate(edge_indices):\n nod_id, nod_counts = np.unique(x[:, 0], return_counts=True)\n node_degree[i][nod_id] = nod_counts\n\n self.node_degree = node_degree\n self.node_attributes = n_attr\n self.edge_attributes = e_attr\n self.graph_attributes = g_attr\n self.edge_indices = edge_indices\n self.node_labels = n_labels\n self.edge_labels = e_labels\n self.graph_labels = g_labels\n self.length = num_graphs\n\n return self\n\n @staticmethod\n def _debug_read_list():\n line_ids = []\n with open(\"datasets.md\", 'r') as f:\n for line in f.readlines():\n if line[:3] == \"|**\":\n line_ids.append(line.split(\"**\")[1])\n return line_ids\n\n" ]
[ [ "tensorflow.keras.layers.Flatten", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Input" ], [ "tensorflow.keras.layers.Flatten", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Input" ], [ "numpy.amax", "numpy.unique", "numpy.amin", "numpy.arange", "numpy.cumsum", "numpy.concatenate", "numpy.zeros_like", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kne42/napari
[ "d61d0be0ef8ea622dd3d6acd270c0529816c11ec" ]
[ "napari/layers/points/_tests/test_points.py" ]
[ "from copy import copy\nfrom itertools import cycle, islice\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom vispy.color import get_colormap\n\nfrom napari._tests.utils import check_layer_world_data_extent\nfrom napari.layers import Points\nfrom napari.layers.points._points_utils import points_to_squares\nfrom napari.layers.utils.color_manager import ColorProperties\nfrom napari.utils.colormaps.standardize_color import transform_color\n\n\ndef _make_cycled_properties(values, length):\n \"\"\"Helper function to make property values\n\n Parameters\n ----------\n values\n The values to be cycled.\n length : int\n The length of the resulting property array\n\n Returns\n -------\n cycled_properties : np.ndarray\n The property array comprising the cycled values.\n \"\"\"\n cycled_properties = np.array(list(islice(cycle(values), 0, length)))\n return cycled_properties\n\n\ndef test_empty_points():\n pts = Points()\n assert pts.data.shape == (0, 2)\n\n\ndef test_empty_points_with_properties():\n \"\"\"Test instantiating an empty Points layer with properties\n\n See: https://github.com/napari/napari/pull/1069\n \"\"\"\n properties = {\n 'label': np.array(['label1', 'label2']),\n 'cont_prop': np.array([0], dtype=float),\n }\n pts = Points(property_choices=properties)\n current_props = {k: v[0] for k, v in properties.items()}\n np.testing.assert_equal(pts.current_properties, current_props)\n\n # verify the property datatype is correct\n assert pts.properties['cont_prop'].dtype == float\n\n # add two points and verify the default property was applied\n pts.add([10, 10])\n pts.add([20, 20])\n props = {\n 'label': np.array(['label1', 'label1']),\n 'cont_prop': np.array([0, 0], dtype=float),\n }\n np.testing.assert_equal(pts.properties, props)\n\n\ndef test_empty_points_with_properties_list():\n \"\"\"Test instantiating an empty Points layer with properties\n stored in a list\n\n See: https://github.com/napari/napari/pull/1069\n \"\"\"\n properties = {'label': ['label1', 'label2'], 'cont_prop': [0]}\n pts = Points(property_choices=properties)\n current_props = {k: np.asarray(v[0]) for k, v in properties.items()}\n np.testing.assert_equal(pts.current_properties, current_props)\n\n # add two points and verify the default property was applied\n pts.add([10, 10])\n pts.add([20, 20])\n props = {\n 'label': np.array(['label1', 'label1']),\n 'cont_prop': np.array([0, 0], dtype=float),\n }\n np.testing.assert_equal(pts.properties, props)\n\n\ndef test_empty_layer_with_face_colormap():\n \"\"\"Test creating an empty layer where the face color is a colormap\n See: https://github.com/napari/napari/pull/1069\n \"\"\"\n default_properties = {'point_type': np.array([1.5], dtype=float)}\n layer = Points(\n property_choices=default_properties,\n face_color='point_type',\n face_colormap='gray',\n )\n\n assert layer.face_color_mode == 'colormap'\n\n # verify the current_face_color is correct\n face_color = np.array([1, 1, 1, 1])\n np.testing.assert_allclose(layer._face.current_color, face_color)\n\n\ndef test_empty_layer_with_edge_colormap():\n \"\"\"Test creating an empty layer where the face color is a colormap\n See: https://github.com/napari/napari/pull/1069\n \"\"\"\n default_properties = {'point_type': np.array([1.5], dtype=float)}\n layer = Points(\n property_choices=default_properties,\n edge_color='point_type',\n edge_colormap='gray',\n )\n\n assert layer.edge_color_mode == 'colormap'\n\n # verify the current_face_color is correct\n edge_color = np.array([1, 1, 1, 1])\n np.testing.assert_allclose(layer._edge.current_color, edge_color)\n\n\ndef test_empty_layer_with_text_properties():\n \"\"\"Test initializing an empty layer with text defined\"\"\"\n default_properties = {'point_type': np.array([1.5], dtype=float)}\n text_kwargs = {'text': 'point_type', 'color': 'red'}\n layer = Points(\n property_choices=default_properties,\n text=text_kwargs,\n )\n assert layer.text.values.size == 0\n np.testing.assert_allclose(layer.text.color, [1, 0, 0, 1])\n\n # add a point and check that the appropriate text value was added\n layer.add([1, 1])\n np.testing.assert_equal(layer.text.values, ['1.5'])\n np.testing.assert_allclose(layer.text.color, [1, 0, 0, 1])\n\n\ndef test_empty_layer_with_text_formatted():\n \"\"\"Test initializing an empty layer with text defined\"\"\"\n default_properties = {'point_type': np.array([1.5], dtype=float)}\n layer = Points(\n property_choices=default_properties,\n text='point_type: {point_type:.2f}',\n )\n assert layer.text.values.size == 0\n\n # add a point and check that the appropriate text value was added\n layer.add([1, 1])\n np.testing.assert_equal(layer.text.values, ['point_type: 1.50'])\n\n\ndef test_random_points():\n \"\"\"Test instantiating Points layer with random 2D data.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n assert np.all(layer.data == data)\n assert layer.ndim == shape[1]\n assert layer._view_data.ndim == 2\n assert len(layer.data) == 10\n assert len(layer.selected_data) == 0\n\n\ndef test_integer_points():\n \"\"\"Test instantiating Points layer with integer data.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 2))\n layer = Points(data)\n assert np.all(layer.data == data)\n assert layer.ndim == shape[1]\n assert layer._view_data.ndim == 2\n assert len(layer.data) == 10\n\n\ndef test_negative_points():\n \"\"\"Test instantiating Points layer with negative data.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape) - 10\n layer = Points(data)\n assert np.all(layer.data == data)\n assert layer.ndim == shape[1]\n assert layer._view_data.ndim == 2\n assert len(layer.data) == 10\n\n\ndef test_empty_points_array():\n \"\"\"Test instantiating Points layer with empty array.\"\"\"\n shape = (0, 2)\n data = np.empty(shape)\n layer = Points(data)\n assert np.all(layer.data == data)\n assert layer.ndim == shape[1]\n assert layer._view_data.ndim == 2\n assert len(layer.data) == 0\n\n\ndef test_3D_points():\n \"\"\"Test instantiating Points layer with random 3D data.\"\"\"\n shape = (10, 3)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n assert np.all(layer.data == data)\n assert layer.ndim == shape[1]\n assert layer._view_data.ndim == 2\n assert len(layer.data) == 10\n\n\ndef test_4D_points():\n \"\"\"Test instantiating Points layer with random 4D data.\"\"\"\n shape = (10, 4)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n assert np.all(layer.data == data)\n assert layer.ndim == shape[1]\n assert layer._view_data.ndim == 2\n assert len(layer.data) == 10\n\n\ndef test_changing_points():\n \"\"\"Test changing Points data.\"\"\"\n shape_a = (10, 2)\n shape_b = (20, 2)\n np.random.seed(0)\n data_a = 20 * np.random.random(shape_a)\n data_b = 20 * np.random.random(shape_b)\n layer = Points(data_a)\n layer.data = data_b\n assert np.all(layer.data == data_b)\n assert layer.ndim == shape_b[1]\n assert layer._view_data.ndim == 2\n assert len(layer.data) == 20\n\n\ndef test_selecting_points():\n \"\"\"Test selecting points.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n layer.mode = 'select'\n data_to_select = {1, 2}\n layer.selected_data = data_to_select\n assert layer.selected_data == data_to_select\n\n # test switching to 3D\n layer._slice_dims(ndisplay=3)\n assert layer.selected_data == data_to_select\n\n # select different points while in 3D mode\n other_data_to_select = {0}\n layer.selected_data = other_data_to_select\n assert layer.selected_data == other_data_to_select\n\n # selection should persist when going back to 2D mode\n layer._slice_dims(ndisplay=2)\n assert layer.selected_data == other_data_to_select\n\n # selection should persist when switching between between select and pan_zoom\n layer.mode = 'pan_zoom'\n assert layer.selected_data == other_data_to_select\n layer.mode = 'select'\n assert layer.selected_data == other_data_to_select\n\n # add mode should clear the selection\n layer.mode = 'add'\n assert layer.selected_data == set()\n\n\ndef test_adding_points():\n \"\"\"Test adding Points data.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n assert len(layer.data) == 10\n\n coord = [20, 20]\n layer.add(coord)\n assert len(layer.data) == 11\n assert np.all(layer.data[10] == coord)\n # the added point should be selected\n assert layer.selected_data == {10}\n\n # test adding multiple points\n coords = [[10, 10], [15, 15]]\n layer.add(coords)\n assert len(layer.data) == 13\n assert np.all(layer.data[11:, :] == coords)\n\n # test that the last added points can be deleted\n layer.remove_selected()\n np.testing.assert_equal(layer.data, np.vstack((data, coord)))\n\n\ndef test_adding_points_to_empty():\n \"\"\"Test adding Points data to empty.\"\"\"\n shape = (0, 2)\n data = np.empty(shape)\n layer = Points(data)\n assert len(layer.data) == 0\n\n coord = [20, 20]\n layer.add(coord)\n assert len(layer.data) == 1\n assert np.all(layer.data[0] == coord)\n\n\ndef test_removing_selected_points():\n \"\"\"Test selecting points.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n\n # With nothing selected no points should be removed\n layer.remove_selected()\n assert len(layer.data) == shape[0]\n\n # Select two points and remove them\n layer.selected_data = {0, 3}\n layer.remove_selected()\n assert len(layer.data) == shape[0] - 2\n assert len(layer.selected_data) == 0\n keep = [1, 2] + list(range(4, 10))\n assert np.all(layer.data == data[keep])\n\n # Select another point and remove it\n layer.selected_data = {4}\n layer.remove_selected()\n assert len(layer.data) == shape[0] - 3\n\n\ndef test_move():\n \"\"\"Test moving points.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n unmoved = copy(data)\n layer = Points(data)\n\n # Move one point relative to an initial drag start location\n layer._move([0], [0, 0])\n layer._move([0], [10, 10])\n layer._drag_start = None\n assert np.all(layer.data[0] == unmoved[0] + [10, 10])\n assert np.all(layer.data[1:] == unmoved[1:])\n\n # Move two points relative to an initial drag start location\n layer._move([1, 2], [2, 2])\n layer._move([1, 2], np.add([2, 2], [-3, 4]))\n assert np.all(layer.data[1:2] == unmoved[1:2] + [-3, 4])\n\n\ndef test_changing_modes():\n \"\"\"Test changing modes.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n assert layer.mode == 'pan_zoom'\n assert layer.interactive is True\n\n layer.mode = 'add'\n assert layer.mode == 'add'\n\n layer.mode = 'select'\n assert layer.mode == 'select'\n assert layer.interactive is False\n\n layer.mode = 'pan_zoom'\n assert layer.mode == 'pan_zoom'\n assert layer.interactive is True\n\n with pytest.raises(ValueError):\n layer.mode = 'not_a_mode'\n\n\ndef test_name():\n \"\"\"Test setting layer name.\"\"\"\n np.random.seed(0)\n data = 20 * np.random.random((10, 2))\n layer = Points(data)\n assert layer.name == 'Points'\n\n layer = Points(data, name='random')\n assert layer.name == 'random'\n\n layer.name = 'pts'\n assert layer.name == 'pts'\n\n\ndef test_visiblity():\n \"\"\"Test setting layer visibility.\"\"\"\n np.random.seed(0)\n data = 20 * np.random.random((10, 2))\n layer = Points(data)\n assert layer.visible is True\n\n layer.visible = False\n assert layer.visible is False\n\n layer = Points(data, visible=False)\n assert layer.visible is False\n\n layer.visible = True\n assert layer.visible is True\n\n\ndef test_opacity():\n \"\"\"Test setting layer opacity.\"\"\"\n np.random.seed(0)\n data = 20 * np.random.random((10, 2))\n layer = Points(data)\n assert layer.opacity == 1.0\n\n layer.opacity = 0.5\n assert layer.opacity == 0.5\n\n layer = Points(data, opacity=0.6)\n assert layer.opacity == 0.6\n\n layer.opacity = 0.3\n assert layer.opacity == 0.3\n\n\ndef test_blending():\n \"\"\"Test setting layer blending.\"\"\"\n np.random.seed(0)\n data = 20 * np.random.random((10, 2))\n layer = Points(data)\n assert layer.blending == 'translucent'\n\n layer.blending = 'additive'\n assert layer.blending == 'additive'\n\n layer = Points(data, blending='additive')\n assert layer.blending == 'additive'\n\n layer.blending = 'opaque'\n assert layer.blending == 'opaque'\n\n\ndef test_symbol():\n \"\"\"Test setting symbol.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n assert layer.symbol == 'disc'\n\n layer.symbol = 'cross'\n assert layer.symbol == 'cross'\n\n layer = Points(data, symbol='star')\n assert layer.symbol == 'star'\n\n\nproperties_array = {'point_type': _make_cycled_properties(['A', 'B'], 10)}\nproperties_list = {'point_type': list(_make_cycled_properties(['A', 'B'], 10))}\n\n\[email protected](\"properties\", [properties_array, properties_list])\ndef test_properties(properties):\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data, properties=copy(properties))\n np.testing.assert_equal(layer.properties, properties)\n\n current_prop = {'point_type': np.array(['B'])}\n assert layer.current_properties == current_prop\n\n # test removing points\n layer.selected_data = {0, 1}\n layer.remove_selected()\n remove_properties = properties['point_type'][2::]\n assert len(layer.properties['point_type']) == (shape[0] - 2)\n assert np.all(layer.properties['point_type'] == remove_properties)\n\n # test selection of properties\n layer.selected_data = {0}\n selected_annotation = layer.current_properties['point_type']\n assert len(selected_annotation) == 1\n assert selected_annotation[0] == 'A'\n\n # test adding points with properties\n layer.add([10, 10])\n add_annotations = np.concatenate((remove_properties, ['A']), axis=0)\n assert np.all(layer.properties['point_type'] == add_annotations)\n\n # test copy/paste\n layer.selected_data = {0, 1}\n layer._copy_data()\n assert np.all(layer._clipboard['properties']['point_type'] == ['A', 'B'])\n\n layer._paste_data()\n paste_annotations = np.concatenate((add_annotations, ['A', 'B']), axis=0)\n assert np.all(layer.properties['point_type'] == paste_annotations)\n\n\[email protected](\"attribute\", ['edge', 'face'])\ndef test_adding_properties(attribute):\n \"\"\"Test adding properties to an existing layer\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n\n # add properties\n properties = {'point_type': _make_cycled_properties(['A', 'B'], shape[0])}\n layer.properties = properties\n np.testing.assert_equal(layer.properties, properties)\n\n # add properties as a dataframe\n properties_df = pd.DataFrame(properties)\n layer.properties = properties_df\n np.testing.assert_equal(layer.properties, properties)\n\n # add properties as a dictionary with list values\n properties_list = {\n 'point_type': list(_make_cycled_properties(['A', 'B'], shape[0]))\n }\n layer.properties = properties_list\n assert isinstance(layer.properties['point_type'], np.ndarray)\n\n # removing a property that was the _*_color_property should give a warning\n color_manager = getattr(layer, f'_{attribute}')\n color_manager.color_properties = {\n 'name': 'point_type',\n 'values': np.empty(0),\n 'current_value': 'A',\n }\n properties_2 = {\n 'not_point_type': _make_cycled_properties(['A', 'B'], shape[0])\n }\n with pytest.warns(RuntimeWarning):\n layer.properties = properties_2\n\n\ndef test_properties_dataframe():\n \"\"\"Test if properties can be provided as a DataFrame\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n properties = {'point_type': _make_cycled_properties(['A', 'B'], shape[0])}\n properties_df = pd.DataFrame(properties)\n properties_df = properties_df.astype(properties['point_type'].dtype)\n layer = Points(data, properties=properties_df)\n np.testing.assert_equal(layer.properties, properties)\n\n\ndef test_add_points_with_properties_as_list():\n # test adding points initialized with properties as list\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n properties = {\n 'point_type': list(_make_cycled_properties(['A', 'B'], shape[0]))\n }\n layer = Points(data, properties=copy(properties))\n\n coord = [18, 18]\n layer.add(coord)\n new_prop = {'point_type': np.append(properties['point_type'], 'B')}\n np.testing.assert_equal(layer.properties, new_prop)\n\n\ndef test_updating_points_properties():\n # test adding points initialized with properties\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n properties = {'point_type': _make_cycled_properties(['A', 'B'], shape[0])}\n layer = Points(data, properties=copy(properties))\n\n layer.mode = 'select'\n layer.selected_data = [len(data) - 1]\n layer.current_properties = {'point_type': np.array(['A'])}\n\n updated_properties = properties\n updated_properties['point_type'][-1] = 'A'\n np.testing.assert_equal(layer.properties, updated_properties)\n\n\nproperties_array = {'point_type': _make_cycled_properties(['A', 'B'], 10)}\nproperties_list = {'point_type': list(_make_cycled_properties(['A', 'B'], 10))}\n\n\[email protected](\"properties\", [properties_array, properties_list])\ndef test_text_from_property_value(properties):\n \"\"\"Test setting text from a property value\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data, properties=copy(properties), text='point_type')\n\n np.testing.assert_equal(layer.text.values, properties['point_type'])\n\n\[email protected](\"properties\", [properties_array, properties_list])\ndef test_text_from_property_fstring(properties):\n \"\"\"Test setting text with an f-string from the property value\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(\n data, properties=copy(properties), text='type: {point_type}'\n )\n\n expected_text = ['type: ' + v for v in properties['point_type']]\n np.testing.assert_equal(layer.text.values, expected_text)\n\n # test updating the text\n layer.text = 'type-ish: {point_type}'\n expected_text_2 = ['type-ish: ' + v for v in properties['point_type']]\n np.testing.assert_equal(layer.text.values, expected_text_2)\n\n # copy/paste\n layer.selected_data = {0}\n layer._copy_data()\n layer._paste_data()\n expected_text_3 = expected_text_2 + ['type-ish: A']\n np.testing.assert_equal(layer.text.values, expected_text_3)\n\n # add point\n layer.selected_data = {0}\n new_shape = np.random.random((1, 2))\n layer.add(new_shape)\n expected_text_4 = expected_text_3 + ['type-ish: A']\n np.testing.assert_equal(layer.text.values, expected_text_4)\n\n\[email protected](\"properties\", [properties_array, properties_list])\ndef test_set_text_with_kwarg_dict(properties):\n text_kwargs = {\n 'text': 'type: {point_type}',\n 'color': [0, 0, 0, 1],\n 'rotation': 10,\n 'translation': [5, 5],\n 'anchor': 'upper_left',\n 'size': 10,\n 'visible': True,\n }\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data, properties=copy(properties), text=text_kwargs)\n\n expected_text = ['type: ' + v for v in properties['point_type']]\n np.testing.assert_equal(layer.text.values, expected_text)\n\n for property, value in text_kwargs.items():\n if property == 'text':\n continue\n layer_value = getattr(layer._text, property)\n np.testing.assert_equal(layer_value, value)\n\n\[email protected](\"properties\", [properties_array, properties_list])\ndef test_text_error(properties):\n \"\"\"creating a layer with text as the wrong type should raise an error\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n # try adding text as the wrong type\n with pytest.raises(TypeError):\n Points(data, properties=copy(properties), text=123)\n\n\ndef test_refresh_text():\n \"\"\"Test refreshing the text after setting new properties\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n properties = {'point_type': ['A'] * shape[0]}\n layer = Points(data, properties=copy(properties), text='point_type')\n\n new_properties = {'point_type': ['B'] * shape[0]}\n layer.properties = new_properties\n np.testing.assert_equal(layer.text.values, new_properties['point_type'])\n\n\ndef test_points_errors():\n shape = (3, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n\n # try adding properties with the wrong number of properties\n with pytest.raises(ValueError):\n annotations = {'point_type': np.array(['A', 'B'])}\n Points(data, properties=copy(annotations))\n\n\ndef test_edge_width():\n \"\"\"Test setting edge width.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n assert layer.edge_width == 1\n\n layer.edge_width = 2\n assert layer.edge_width == 2\n\n layer = Points(data, edge_width=3)\n assert layer.edge_width == 3\n\n\ndef test_n_dimensional():\n \"\"\"Test setting n_dimensional flag for 2D and 4D data.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n assert layer.n_dimensional is False\n\n layer.n_dimensional = True\n assert layer.n_dimensional is True\n\n layer = Points(data, n_dimensional=True)\n assert layer.n_dimensional is True\n\n shape = (10, 4)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n assert layer.n_dimensional is False\n\n layer.n_dimensional = True\n assert layer.n_dimensional is True\n\n layer = Points(data, n_dimensional=True)\n assert layer.n_dimensional is True\n\n\[email protected](\"ignore:elementwise comparison fail:FutureWarning\")\[email protected](\"attribute\", ['edge', 'face'])\ndef test_switch_color_mode(attribute):\n \"\"\"Test switching between color modes\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n # create a continuous property with a known value in the last element\n continuous_prop = np.random.random((shape[0],))\n continuous_prop[-1] = 1\n properties = {\n 'point_truthiness': continuous_prop,\n 'point_type': _make_cycled_properties(['A', 'B'], shape[0]),\n }\n initial_color = [1, 0, 0, 1]\n color_cycle = ['red', 'blue']\n color_kwarg = f'{attribute}_color'\n colormap_kwarg = f'{attribute}_colormap'\n color_cycle_kwarg = f'{attribute}_color_cycle'\n args = {\n color_kwarg: initial_color,\n colormap_kwarg: 'gray',\n color_cycle_kwarg: color_cycle,\n }\n layer = Points(data, properties=properties, **args)\n\n layer_color_mode = getattr(layer, f'{attribute}_color_mode')\n layer_color = getattr(layer, f'{attribute}_color')\n assert layer_color_mode == 'direct'\n np.testing.assert_allclose(\n layer_color, np.repeat([initial_color], shape[0], axis=0)\n )\n\n # there should not be an edge_color_property\n color_manager = getattr(layer, f'_{attribute}')\n color_property = color_manager.color_properties\n assert color_property is None\n\n # transitioning to colormap should raise a warning\n # because there isn't an edge color property yet and\n # the first property in points.properties is being automatically selected\n with pytest.warns(UserWarning):\n setattr(layer, f'{attribute}_color_mode', 'colormap')\n color_manager = getattr(layer, f'_{attribute}')\n color_property_name = color_manager.color_properties.name\n assert color_property_name == next(iter(properties))\n layer_color = getattr(layer, f'{attribute}_color')\n np.testing.assert_allclose(layer_color[-1], [1, 1, 1, 1])\n\n # switch to color cycle\n setattr(layer, f'{attribute}_color_mode', 'cycle')\n setattr(layer, f'{attribute}_color', 'point_type')\n color = getattr(layer, f'{attribute}_color')\n layer_color = transform_color(color_cycle * int(shape[0] / 2))\n np.testing.assert_allclose(color, layer_color)\n\n # switch back to direct, edge_colors shouldn't change\n setattr(layer, f'{attribute}_color_mode', 'direct')\n new_edge_color = getattr(layer, f'{attribute}_color')\n np.testing.assert_allclose(new_edge_color, color)\n\n\[email protected](\"attribute\", ['edge', 'face'])\ndef test_colormap_without_properties(attribute):\n \"\"\"Setting the colormode to colormap should raise an exception\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n\n with pytest.raises(ValueError):\n setattr(layer, f'{attribute}_color_mode', 'colormap')\n\n\[email protected](\"attribute\", ['edge', 'face'])\ndef test_colormap_with_categorical_properties(attribute):\n \"\"\"Setting the colormode to colormap should raise an exception\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n properties = {'point_type': _make_cycled_properties(['A', 'B'], shape[0])}\n layer = Points(data, properties=properties)\n\n with pytest.raises(TypeError):\n with pytest.warns(UserWarning):\n setattr(layer, f'{attribute}_color_mode', 'colormap')\n\n\[email protected](\"attribute\", ['edge', 'face'])\ndef test_add_colormap(attribute):\n \"\"\"Test directly adding a vispy Colormap object\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n annotations = {'point_type': _make_cycled_properties([0, 1.5], shape[0])}\n color_kwarg = f'{attribute}_color'\n colormap_kwarg = f'{attribute}_colormap'\n args = {color_kwarg: 'point_type', colormap_kwarg: 'viridis'}\n layer = Points(data, properties=annotations, **args)\n\n setattr(layer, f'{attribute}_colormap', get_colormap('gray'))\n layer_colormap = getattr(layer, f'{attribute}_colormap')\n assert 'unnamed colormap' in layer_colormap.name\n\n\[email protected](\"attribute\", ['edge', 'face'])\ndef test_add_point_direct(attribute: str):\n \"\"\"Test adding points to layer directly\"\"\"\n layer = Points()\n assert len(getattr(layer, f'{attribute}_color')) == 0\n setattr(layer, f'current_{attribute}_color', 'red')\n coord = [18, 18]\n layer.add(coord)\n np.testing.assert_allclose(\n [[1, 0, 0, 1]], getattr(layer, f'{attribute}_color')\n )\n\n\[email protected](\"attribute\", ['edge', 'face'])\ndef test_color_direct(attribute: str):\n \"\"\"Test setting colors directly\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer_kwargs = {f'{attribute}_color': 'black'}\n layer = Points(data, **layer_kwargs)\n color_array = transform_color(['black'] * shape[0])\n current_color = getattr(layer, f'current_{attribute}_color')\n layer_color = getattr(layer, f'{attribute}_color')\n assert current_color == 'black'\n assert len(layer.edge_color) == shape[0]\n np.testing.assert_allclose(color_array, layer_color)\n\n # With no data selected changing color has no effect\n setattr(layer, f'current_{attribute}_color', 'blue')\n current_color = getattr(layer, f'current_{attribute}_color')\n assert current_color == 'blue'\n np.testing.assert_allclose(color_array, layer_color)\n\n # Select data and change edge color of selection\n selected_data = {0, 1}\n layer.selected_data = {0, 1}\n current_color = getattr(layer, f'current_{attribute}_color')\n assert current_color == 'black'\n setattr(layer, f'current_{attribute}_color', 'green')\n colorarray_green = transform_color(['green'] * len(layer.selected_data))\n color_array[list(selected_data)] = colorarray_green\n layer_color = getattr(layer, f'{attribute}_color')\n np.testing.assert_allclose(color_array, layer_color)\n\n # Add new point and test its color\n coord = [18, 18]\n layer.selected_data = {}\n setattr(layer, f'current_{attribute}_color', 'blue')\n layer.add(coord)\n color_array = np.vstack([color_array, transform_color('blue')])\n layer_color = getattr(layer, f'{attribute}_color')\n assert len(layer_color) == shape[0] + 1\n np.testing.assert_allclose(color_array, layer_color)\n\n # Check removing data adjusts colors correctly\n layer.selected_data = {0, 2}\n layer.remove_selected()\n assert len(layer.data) == shape[0] - 1\n\n layer_color = getattr(layer, f'{attribute}_color')\n assert len(layer_color) == shape[0] - 1\n np.testing.assert_allclose(\n layer_color,\n np.vstack((color_array[1], color_array[3:])),\n )\n\n\ncolor_cycle_str = ['red', 'blue']\ncolor_cycle_rgb = [[1, 0, 0], [0, 0, 1]]\ncolor_cycle_rgba = [[1, 0, 0, 1], [0, 0, 1, 1]]\n\n\[email protected](\"attribute\", ['edge', 'face'])\[email protected](\n \"color_cycle\",\n [color_cycle_str, color_cycle_rgb, color_cycle_rgba],\n)\ndef test_color_cycle(attribute, color_cycle):\n \"\"\"Test setting edge/face color with a color cycle list\"\"\"\n # create Points using list color cycle\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n properties = {'point_type': _make_cycled_properties(['A', 'B'], shape[0])}\n points_kwargs = {\n 'properties': properties,\n f'{attribute}_color': 'point_type',\n f'{attribute}_color_cycle': color_cycle,\n }\n layer = Points(data, **points_kwargs)\n\n assert layer.properties == properties\n color_array = transform_color(\n list(islice(cycle(color_cycle), 0, shape[0]))\n )\n layer_color = getattr(layer, f'{attribute}_color')\n np.testing.assert_allclose(layer_color, color_array)\n\n # Add new point and test its color\n coord = [18, 18]\n layer.selected_data = {0}\n layer.add(coord)\n layer_color = getattr(layer, f'{attribute}_color')\n assert len(layer_color) == shape[0] + 1\n np.testing.assert_allclose(\n layer_color,\n np.vstack((color_array, transform_color('red'))),\n )\n\n # Check removing data adjusts colors correctly\n layer.selected_data = {0, 2}\n layer.remove_selected()\n assert len(layer.data) == shape[0] - 1\n\n layer_color = getattr(layer, f'{attribute}_color')\n assert len(layer_color) == shape[0] - 1\n np.testing.assert_allclose(\n layer_color,\n np.vstack((color_array[1], color_array[3:], transform_color('red'))),\n )\n\n # test adding a point with a new property value\n layer.selected_data = {}\n current_properties = layer.current_properties\n current_properties['point_type'] = np.array(['new'])\n layer.current_properties = current_properties\n layer.add([10, 10])\n color_manager = getattr(layer, f'_{attribute}')\n color_cycle_map = color_manager.categorical_colormap.colormap\n\n assert 'new' in color_cycle_map\n np.testing.assert_allclose(\n color_cycle_map['new'], np.squeeze(transform_color(color_cycle[0]))\n )\n\n\[email protected](\"attribute\", ['edge', 'face'])\ndef test_color_cycle_dict(attribute):\n \"\"\"Test setting edge/face color with a color cycle dict\"\"\"\n data = np.array([[0, 0], [100, 0], [0, 100]])\n properties = {'my_colors': [2, 6, 3]}\n points_kwargs = {\n 'properties': properties,\n f'{attribute}_color': 'my_colors',\n f'{attribute}_color_cycle': {1: 'green', 2: 'red', 3: 'blue'},\n }\n layer = Points(data, **points_kwargs)\n\n color_manager = getattr(layer, f'_{attribute}')\n color_cycle_map = color_manager.categorical_colormap.colormap\n np.testing.assert_allclose(color_cycle_map[2], [1, 0, 0, 1]) # 2 is red\n np.testing.assert_allclose(color_cycle_map[3], [0, 0, 1, 1]) # 3 is blue\n np.testing.assert_allclose(color_cycle_map[6], [1, 1, 1, 1]) # 6 is white\n\n\[email protected](\"attribute\", ['edge', 'face'])\ndef test_add_color_cycle_to_empty_layer(attribute):\n \"\"\"Test adding a point to an empty layer when edge/face color is a color cycle\n\n See: https://github.com/napari/napari/pull/1069\n \"\"\"\n default_properties = {'point_type': np.array(['A'])}\n color_cycle = ['red', 'blue']\n points_kwargs = {\n 'property_choices': default_properties,\n f'{attribute}_color': 'point_type',\n f'{attribute}_color_cycle': color_cycle,\n }\n layer = Points(**points_kwargs)\n\n # verify the current_edge_color is correct\n expected_color = transform_color(color_cycle[0])[0]\n color_manager = getattr(layer, f'_{attribute}')\n current_color = color_manager.current_color\n np.testing.assert_allclose(current_color, expected_color)\n\n # add a point\n layer.add([10, 10])\n props = {'point_type': np.array(['A'])}\n expected_color = np.array([[1, 0, 0, 1]])\n np.testing.assert_equal(layer.properties, props)\n attribute_color = getattr(layer, f'{attribute}_color')\n np.testing.assert_allclose(attribute_color, expected_color)\n\n # add a point with a new property\n layer.selected_data = []\n layer.current_properties = {'point_type': np.array(['B'])}\n layer.add([12, 12])\n new_color = np.array([0, 0, 1, 1])\n expected_color = np.vstack((expected_color, new_color))\n new_properties = {'point_type': np.array(['A', 'B'])}\n attribute_color = getattr(layer, f'{attribute}_color')\n np.testing.assert_allclose(attribute_color, expected_color)\n np.testing.assert_equal(layer.properties, new_properties)\n\n\[email protected](\"attribute\", ['edge', 'face'])\ndef test_adding_value_color_cycle(attribute):\n \"\"\"Test that adding values to properties used to set a color cycle\n and then calling Points.refresh_colors() performs the update and adds the\n new value to the face/edge_color_cycle_map.\n\n See: https://github.com/napari/napari/issues/988\n \"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n properties = {'point_type': _make_cycled_properties(['A', 'B'], shape[0])}\n color_cycle = ['red', 'blue']\n points_kwargs = {\n 'properties': properties,\n f'{attribute}_color': 'point_type',\n f'{attribute}_color_cycle': color_cycle,\n }\n layer = Points(data, **points_kwargs)\n\n # make point 0 point_type C\n props = layer.properties\n point_types = props['point_type']\n point_types[0] = 'C'\n props['point_type'] = point_types\n layer.properties = props\n\n color_manager = getattr(layer, f'_{attribute}')\n color_cycle_map = color_manager.categorical_colormap.colormap\n color_map_keys = [*color_cycle_map]\n assert 'C' in color_map_keys\n\n\[email protected](\"attribute\", ['edge', 'face'])\ndef test_color_colormap(attribute):\n \"\"\"Test setting edge/face color with a colormap\"\"\"\n # create Points using with a colormap\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n properties = {'point_type': _make_cycled_properties([0, 1.5], shape[0])}\n points_kwargs = {\n 'properties': properties,\n f'{attribute}_color': 'point_type',\n f'{attribute}_colormap': 'gray',\n }\n layer = Points(data, **points_kwargs)\n assert layer.properties == properties\n color_mode = getattr(layer, f'{attribute}_color_mode')\n assert color_mode == 'colormap'\n color_array = transform_color(['black', 'white'] * int(shape[0] / 2))\n attribute_color = getattr(layer, f'{attribute}_color')\n assert np.all(attribute_color == color_array)\n\n # change the color cycle - face_color should not change\n setattr(layer, f'{attribute}_color_cycle', ['red', 'blue'])\n attribute_color = getattr(layer, f'{attribute}_color')\n assert np.all(attribute_color == color_array)\n\n # Add new point and test its color\n coord = [18, 18]\n layer.selected_data = {0}\n layer.add(coord)\n attribute_color = getattr(layer, f'{attribute}_color')\n assert len(attribute_color) == shape[0] + 1\n np.testing.assert_allclose(\n attribute_color,\n np.vstack((color_array, transform_color('black'))),\n )\n\n # Check removing data adjusts colors correctly\n layer.selected_data = {0, 2}\n layer.remove_selected()\n assert len(layer.data) == shape[0] - 1\n attribute_color = getattr(layer, f'{attribute}_color')\n assert len(attribute_color) == shape[0] - 1\n np.testing.assert_allclose(\n attribute_color,\n np.vstack(\n (\n color_array[1],\n color_array[3:],\n transform_color('black'),\n )\n ),\n )\n\n # adjust the clims\n setattr(layer, f'{attribute}_contrast_limits', (0, 3))\n attribute_color = getattr(layer, f'{attribute}_color')\n np.testing.assert_allclose(attribute_color[-2], [0.5, 0.5, 0.5, 1])\n\n # change the colormap\n new_colormap = 'viridis'\n setattr(layer, f'{attribute}_colormap', new_colormap)\n attribute_colormap = getattr(layer, f'{attribute}_colormap')\n assert attribute_colormap.name == new_colormap\n\n\ndef test_size():\n \"\"\"Test setting size with scalar.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n assert layer.current_size == 10\n assert layer.size.shape == shape\n assert np.unique(layer.size)[0] == 10\n\n # Add a new point, it should get current size\n coord = [17, 17]\n layer.add(coord)\n assert layer.size.shape == (11, 2)\n assert np.unique(layer.size)[0] == 10\n\n # Setting size affects newly added points not current points\n layer.current_size = 20\n assert layer.current_size == 20\n assert layer.size.shape == (11, 2)\n assert np.unique(layer.size)[0] == 10\n\n # Add new point, should have new size\n coord = [18, 18]\n layer.add(coord)\n assert layer.size.shape == (12, 2)\n assert np.unique(layer.size[:11])[0] == 10\n assert np.all(layer.size[11] == [20, 20])\n\n # Select data and change size\n layer.selected_data = {0, 1}\n assert layer.current_size == 10\n layer.current_size = 16\n assert layer.size.shape == (12, 2)\n assert np.unique(layer.size[2:11])[0] == 10\n assert np.unique(layer.size[:2])[0] == 16\n\n # Select data and size changes\n layer.selected_data = {11}\n assert layer.current_size == 20\n\n\ndef test_size_with_arrays():\n \"\"\"Test setting size with arrays.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n sizes = 5 * np.random.random(shape)\n layer.size = sizes\n assert np.all(layer.size == sizes)\n\n # Test broadcasting of sizes\n sizes = [5, 5]\n layer.size = sizes\n assert np.all(layer.size[0] == sizes)\n\n # Test broadcasting of transposed sizes\n sizes = np.random.randint(low=1, high=5, size=shape[::-1])\n layer.size = sizes\n np.testing.assert_equal(layer.size, sizes.T)\n\n # Un-broadcastable array should raise an exception\n bad_sizes = np.random.randint(low=1, high=5, size=(3, 8))\n with pytest.raises(ValueError):\n layer.size = bad_sizes\n\n # Create new layer with new size array data\n sizes = 5 * np.random.random(shape)\n layer = Points(data, size=sizes)\n assert layer.current_size == 10\n assert layer.size.shape == shape\n assert np.all(layer.size == sizes)\n\n # Create new layer with new size array data\n sizes = [5, 5]\n layer = Points(data, size=sizes)\n assert layer.current_size == 10\n assert layer.size.shape == shape\n assert np.all(layer.size[0] == sizes)\n\n # Add new point, should have new size\n coord = [18, 18]\n layer.current_size = 13\n layer.add(coord)\n assert layer.size.shape == (11, 2)\n assert np.unique(layer.size[:10])[0] == 5\n assert np.all(layer.size[10] == [13, 13])\n\n # Select data and change size\n layer.selected_data = {0, 1}\n assert layer.current_size == 5\n layer.current_size = 16\n assert layer.size.shape == (11, 2)\n assert np.unique(layer.size[2:10])[0] == 5\n assert np.unique(layer.size[:2])[0] == 16\n\n # Check removing data adjusts colors correctly\n layer.selected_data = {0, 2}\n layer.remove_selected()\n assert len(layer.data) == 9\n assert len(layer.size) == 9\n assert np.all(layer.size[0] == [16, 16])\n assert np.all(layer.size[1] == [5, 5])\n\n\ndef test_size_with_3D_arrays():\n \"\"\"Test setting size with 3D arrays.\"\"\"\n shape = (10, 3)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n data[:2, 0] = 0\n layer = Points(data)\n assert layer.current_size == 10\n assert layer.size.shape == shape\n assert np.unique(layer.size)[0] == 10\n\n sizes = 5 * np.random.random(shape)\n layer.size = sizes\n assert np.all(layer.size == sizes)\n\n # Test broadcasting of sizes\n sizes = [1, 5, 5]\n layer.size = sizes\n assert np.all(layer.size[0] == sizes)\n\n # Create new layer with new size array data\n sizes = 5 * np.random.random(shape)\n layer = Points(data, size=sizes)\n assert layer.current_size == 10\n assert layer.size.shape == shape\n assert np.all(layer.size == sizes)\n\n # Create new layer with new size array data\n sizes = [1, 5, 5]\n layer = Points(data, size=sizes)\n assert layer.current_size == 10\n assert layer.size.shape == shape\n assert np.all(layer.size[0] == sizes)\n\n # Add new point, should have new size in last dim only\n coord = [4, 18, 18]\n layer.current_size = 13\n layer.add(coord)\n assert layer.size.shape == (11, 3)\n assert np.unique(layer.size[:10, 1:])[0] == 5\n assert np.all(layer.size[10] == [1, 13, 13])\n\n # Select data and change size\n layer.selected_data = {0, 1}\n assert layer.current_size == 5\n layer.current_size = 16\n assert layer.size.shape == (11, 3)\n assert np.unique(layer.size[2:10, 1:])[0] == 5\n assert np.all(layer.size[0] == [16, 16, 16])\n\n # Create new 3D layer with new 2D points size data\n sizes = [0, 5, 5]\n layer = Points(data, size=sizes)\n assert layer.current_size == 10\n assert layer.size.shape == shape\n assert np.all(layer.size[0] == sizes)\n\n # Add new point, should have new size only in last 2 dimensions\n coord = [4, 18, 18]\n layer.current_size = 13\n layer.add(coord)\n assert layer.size.shape == (11, 3)\n assert np.all(layer.size[10] == [0, 13, 13])\n\n # Select data and change size\n layer.selected_data = {0, 1}\n assert layer.current_size == 5\n layer.current_size = 16\n assert layer.size.shape == (11, 3)\n assert np.unique(layer.size[2:10, 1:])[0] == 5\n assert np.all(layer.size[0] == [0, 16, 16])\n\n\ndef test_copy_and_paste():\n \"\"\"Test copying and pasting selected points.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n layer = Points(data)\n # Clipboard starts empty\n assert layer._clipboard == {}\n\n # Pasting empty clipboard doesn't change data\n layer._paste_data()\n assert len(layer.data) == 10\n\n # Copying with nothing selected leave clipboard empty\n layer._copy_data()\n assert layer._clipboard == {}\n\n # Copying and pasting with two points selected adds to clipboard and data\n layer.selected_data = {0, 1}\n layer._copy_data()\n layer._paste_data()\n assert len(layer._clipboard.keys()) > 0\n assert len(layer.data) == shape[0] + 2\n assert np.all(layer.data[:2] == layer.data[-2:])\n\n # Pasting again adds two more points to data\n layer._paste_data()\n assert len(layer.data) == shape[0] + 4\n assert np.all(layer.data[:2] == layer.data[-2:])\n\n # Unselecting everything and copying and pasting will empty the clipboard\n # and add no new data\n layer.selected_data = {}\n layer._copy_data()\n layer._paste_data()\n assert layer._clipboard == {}\n assert len(layer.data) == shape[0] + 4\n\n\ndef test_value():\n \"\"\"Test getting the value of the data at the current coordinates.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n data[-1] = [0, 0]\n layer = Points(data)\n value = layer.get_value((0, 0))\n assert value == 9\n\n layer.data = layer.data + 20\n value = layer.get_value((0, 0))\n assert value is None\n\n\ndef test_message():\n \"\"\"Test converting value and coords to message.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n data[-1] = [0, 0]\n layer = Points(data)\n msg = layer.get_status((0,) * 2)\n assert type(msg) == str\n\n\ndef test_thumbnail():\n \"\"\"Test the image thumbnail for square data.\"\"\"\n shape = (10, 2)\n np.random.seed(0)\n data = 20 * np.random.random(shape)\n data[0] = [0, 0]\n data[-1] = [20, 20]\n layer = Points(data)\n layer._update_thumbnail()\n assert layer.thumbnail.shape == layer._thumbnail_shape\n\n\ndef test_thumbnail_with_n_points_greater_than_max():\n \"\"\"Test thumbnail generation with n_points > _max_points_thumbnail\n\n see: https://github.com/napari/napari/pull/934\n \"\"\"\n # 2D\n max_points = Points._max_points_thumbnail * 2\n bigger_data = np.random.randint(10, 100, (max_points, 2))\n big_layer = Points(bigger_data)\n big_layer._update_thumbnail()\n assert big_layer.thumbnail.shape == big_layer._thumbnail_shape\n\n # #3D\n bigger_data_3d = np.random.randint(10, 100, (max_points, 3))\n bigger_layer_3d = Points(bigger_data_3d)\n bigger_layer_3d._slice_dims(ndisplay=3)\n bigger_layer_3d._update_thumbnail()\n assert bigger_layer_3d.thumbnail.shape == bigger_layer_3d._thumbnail_shape\n\n\ndef test_view_data():\n coords = np.array([[0, 1, 1], [0, 2, 2], [1, 3, 3], [3, 3, 3]])\n layer = Points(coords)\n\n layer._slice_dims([0, slice(None), slice(None)])\n assert np.all(\n layer._view_data == coords[np.ix_([0, 1], layer._dims_displayed)]\n )\n\n layer._slice_dims([1, slice(None), slice(None)])\n assert np.all(\n layer._view_data == coords[np.ix_([2], layer._dims_displayed)]\n )\n\n layer._slice_dims([1, slice(None), slice(None)], ndisplay=3)\n assert np.all(layer._view_data == coords)\n\n\ndef test_view_size():\n coords = np.array([[0, 1, 1], [0, 2, 2], [1, 3, 3], [3, 3, 3]])\n sizes = np.array([[3, 5, 5], [3, 5, 5], [3, 3, 3], [2, 2, 3]])\n layer = Points(coords, size=sizes, n_dimensional=False)\n\n layer._slice_dims([0, slice(None), slice(None)])\n assert np.all(\n layer._view_size == sizes[np.ix_([0, 1], layer._dims_displayed)]\n )\n\n layer._slice_dims([1, slice(None), slice(None)])\n assert np.all(\n layer._view_size == sizes[np.ix_([2], layer._dims_displayed)]\n )\n\n layer.n_dimensional = True\n assert len(layer._view_size) == 3\n\n # test a slice with no points\n layer.n_dimensional = False\n layer._slice_dims([2, slice(None), slice(None)])\n assert np.all(layer._view_size == [])\n\n\ndef test_view_colors():\n coords = [[0, 1, 1], [0, 2, 2], [1, 3, 3], [3, 3, 3]]\n face_color = np.array(\n [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [0, 0, 1, 1]]\n )\n edge_color = np.array(\n [[0, 0, 1, 1], [1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]\n )\n\n layer = Points(coords, face_color=face_color, edge_color=edge_color)\n layer._slice_dims([0, slice(None), slice(None)])\n assert np.all(layer._view_face_color == face_color[[0, 1]])\n assert np.all(layer._view_edge_color == edge_color[[0, 1]])\n\n layer._slice_dims([1, slice(None), slice(None)])\n assert np.all(layer._view_face_color == face_color[[2]])\n assert np.all(layer._view_edge_color == edge_color[[2]])\n\n # view colors should return empty array if there are no points\n layer._slice_dims([2, slice(None), slice(None)])\n assert len(layer._view_face_color) == 0\n assert len(layer._view_edge_color) == 0\n\n\ndef test_interaction_box():\n \"\"\"Test the boxes calculated for selected points\"\"\"\n data = [[3, 3]]\n size = 2\n layer = Points(data, size=size)\n\n # get a box with no points selected\n index = []\n box = layer.interaction_box(index)\n assert box is None\n\n # get a box with a point selected\n index = [0]\n expected_box = points_to_squares(data, size)\n box = layer.interaction_box(index)\n np.all([np.isin(p, expected_box) for p in box])\n\n\ndef test_world_data_extent():\n \"\"\"Test extent after applying transforms.\"\"\"\n data = [(7, -5, 0), (-2, 0, 15), (4, 30, 12)]\n min_val = (-2, -5, 0)\n max_val = (7, 30, 15)\n layer = Points(data)\n extent = np.array((min_val, max_val))\n check_layer_world_data_extent(layer, extent, (3, 1, 1), (10, 20, 5))\n\n\ndef test_slice_data():\n data = [\n (10, 2, 4),\n (10 + 2 * 1e-7, 4, 6),\n (8, 1, 7),\n (10.1, 7, 2),\n (10 - 2 * 1e-7, 1, 6),\n ]\n layer = Points(data)\n assert len(layer._slice_data((8, slice(None), slice(None)))[0]) == 1\n assert len(layer._slice_data((10, slice(None), slice(None)))[0]) == 3\n assert (\n len(layer._slice_data((10 + 2 * 1e-12, slice(None), slice(None)))[0])\n == 3\n )\n assert len(layer._slice_data((10.1, slice(None), slice(None)))[0]) == 1\n\n\ndef test_scale_init():\n layer = Points(None, scale=(1, 1, 1, 1))\n assert layer.ndim == 4\n layer1 = Points([], scale=(1, 1, 1, 1))\n assert layer1.ndim == 4\n layer2 = Points([])\n assert layer2.ndim == 2\n\n with pytest.raises(ValueError):\n Points([[1, 1, 1]], scale=(1, 1, 1, 1))\n\n\ndef test_update_none():\n layer = Points([(1, 2, 3), (1, 3, 2)])\n assert layer.ndim == 3\n assert layer.data.size == 6\n layer.data = None\n assert layer.ndim == 3\n assert layer.data.size == 0\n layer.data = [(1, 2, 3), (1, 3, 2)]\n assert layer.ndim == 3\n assert layer.data.size == 6\n\n\ndef test_prepare_properties():\n layer = Points([(1, 2, 3), (1, 3, 2)])\n properties, choices = layer._prepare_properties({\"aa\": [1, 2]})\n assert list(properties.keys()) == [\"aa\"]\n assert np.array_equal(properties[\"aa\"], [1, 2])\n assert list(choices.keys()) == [\"aa\"]\n assert np.array_equal(choices[\"aa\"], [1, 2])\n assert layer._prepare_properties({}) == ({}, {})\n assert layer._prepare_properties({}, {}) == ({}, {})\n properties, choices = layer._prepare_properties({}, {\"aa\": [1, 2]})\n assert list(properties.keys()) == [\"aa\"]\n assert np.array_equal(properties[\"aa\"], [None, None])\n assert list(choices.keys()) == [\"aa\"]\n assert np.array_equal(choices[\"aa\"], [1, 2])\n properties, choices = layer._prepare_properties(\n {\"aa\": [1, 3]}, {\"aa\": [1, 2]}\n )\n assert list(properties.keys()) == [\"aa\"]\n assert np.array_equal(properties[\"aa\"], [1, 3])\n assert list(choices.keys()) == [\"aa\"]\n assert np.array_equal(choices[\"aa\"], [1, 2, 3])\n properties, choices = layer._prepare_properties(\n {\"aa\": [1, 3]}, {\"aa\": [1, 2], \"bb\": [7, 6]}\n )\n assert list(properties.keys()) == [\"aa\"]\n assert np.array_equal(properties[\"aa\"], [1, 3])\n assert list(choices.keys()) == [\"aa\"]\n assert np.array_equal(choices[\"aa\"], [1, 2, 3])\n properties, choices = layer._prepare_properties(\n {\"aa\": [1, 3]}, {\"aa\": [1, 2], \"bb\": [7, 6]}, save_choices=True\n )\n assert list(properties.keys()) == [\"aa\", \"bb\"]\n assert np.array_equal(properties[\"aa\"], [1, 3])\n assert np.array_equal(properties[\"bb\"], [None, None])\n assert list(choices.keys()) == [\"aa\", \"bb\"]\n assert np.array_equal(choices[\"aa\"], [1, 2, 3])\n assert np.array_equal(choices[\"bb\"], [6, 7])\n\n layer = Points([(1, 2, 3), (1, 3, 2), (1, 3, 3)])\n properties, choices = layer._prepare_properties({\"aa\": [1, 2, 1]})\n assert np.array_equal(properties[\"aa\"], [1, 2, 1])\n assert np.array_equal(choices[\"aa\"], [1, 2])\n\n\ndef test_set_face_color_mode_after_set_properties():\n # See GitHub issue for more details:\n # https://github.com/napari/napari/issues/2755\n np.random.seed(0)\n num_points = 3\n points = Points(np.random.random((num_points, 2)))\n\n points.properties = {\n 'cat': np.random.randint(low=0, high=num_points, size=num_points),\n 'cont': np.random.random(num_points),\n }\n\n # Initially the color_mode is DIRECT, which means that the face ColorManager\n # has no color_properties, so the first property is used with a warning.\n with pytest.warns(UserWarning):\n points.face_color_mode = 'cycle'\n\n first_property_key, first_property_values = next(\n iter(points.properties.items())\n )\n expected_properties = ColorProperties(\n name=first_property_key,\n values=first_property_values,\n current_value=first_property_values[-1],\n )\n assert points._face.color_properties == expected_properties\n" ]
[ [ "numpy.asarray", "pandas.DataFrame", "numpy.all", "numpy.concatenate", "numpy.random.randint", "numpy.testing.assert_equal", "numpy.ix_", "numpy.unique", "numpy.repeat", "numpy.isin", "numpy.append", "numpy.testing.assert_allclose", "numpy.array", "numpy.random.random", "numpy.random.seed", "numpy.array_equal", "numpy.empty", "numpy.add", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
hpgit/HumanFoot
[ "f9a1a341b7c43747bddcd5584b8c98a0d1ac2973", "f9a1a341b7c43747bddcd5584b8c98a0d1ac2973", "f9a1a341b7c43747bddcd5584b8c98a0d1ac2973", "f9a1a341b7c43747bddcd5584b8c98a0d1ac2973", "f9a1a341b7c43747bddcd5584b8c98a0d1ac2973", "f9a1a341b7c43747bddcd5584b8c98a0d1ac2973" ]
[ "DartWalkingFoot/main_DartTrackingFoot1.py", "MomentumProject/foot_example_segfoot_constraint/mtOptimize.py", "DartMuscle/muscle.py", "PyCommon/modules/Simulator/hpQPSimulator.py", "DartWalkingFoot/main_TrackingFoot0.py", "PyCommon/externalLibs/qpOASES/interfaces/python/examples/example2.py" ]
[ "from fltk import *\nimport copy\nimport os.path\nfrom cPickle import load\n# import time\nimport numpy as np\n\nimport sys\nif \"..\" not in sys.path:\n sys.path.append(\"..\")\n\nfrom PyCommon.modules.ArticulatedBody import hpBipedFeedback as hbf\n\nfrom PyCommon.modules.Math import mmMath as mm\nfrom PyCommon.modules.Math import ysFunctionGraph as yfg\nfrom PyCommon.modules.Renderer import ysRenderer as yr\n# from PyCommon.modules.Simulator import ysVpUtil as yvu\nfrom PyCommon.modules.GUI import ysSimpleViewer_ori as ysv\nfrom PyCommon.modules.GUI import ysMultiViewer as ymv\n# from PyCommon.modules.ArticulatedBody import ysControl as yct\n# from PyCommon.modules.ArticulatedBody import ysReferencePoints as yrp\nfrom PyCommon.modules.Motion import ysMotionAnalysis as yma\nfrom PyCommon.modules.Motion import ysBipedAnalysis as yba\nfrom PyCommon.modules.Motion import ysMotion as ym\nfrom PyCommon.modules.Motion import ysMotionBlend as ymb\nfrom PyCommon.modules.Motion import ysMotionExtend as ymt\n# from PyCommon.modules.Motion import ysSkeletonEdit as yhe\nfrom PyCommon.modules.Motion import mmAnalyticIK as aik\nfrom PyCommon.modules.Util import ysMatplotEx as ymp\nfrom PyCommon.modules.Resource import ysMotionLoader as yf\nfrom PyCommon.modules.Simulator import ysPhysConfig as ypc\n\nfrom PyCommon.modules.Simulator import hpDartLCPSimulator as hdls\nfrom PyCommon.modules.GUI import hpSimpleViewer as hsv\nfrom PyCommon.modules.Util import ysPythonEx as ype\n\nfrom PyCommon.modules import pydart2 as pydart\nfrom PyCommon.modules.Simulator import csDartModel as cpm\nfrom pdcontroller import PDController\n\nfrom PyCommon.modules.ArticulatedBody import hpFootIK as hfi\n\nimport math\n# from matplotlib import collections\n\nimport multiprocessing as mp\nimport cma\n\ncurrent_path = os.path.dirname(os.path.abspath(__file__))\n\n# MOTION_COLOR = (128,128,128)\n# CHARACTER_COLOR = (102,102,153)\nMOTION_COLOR = (213, 111, 162)\nCHARACTER_COLOR = (20, 166, 188)\n\nMAX_FRAME = 1500\n\nSEGMENT_FOOT = True\nSEGMENT_FOOT_MAG = .03\nSEGMENT_FOOT_RAD = SEGMENT_FOOT_MAG * .5\n\ndef buildMassMap():\n massMap = {}\n massMap = massMap.fromkeys(['Head', 'Head_Effector', 'Hips',\n 'LeftArm', 'LeftFoot', 'LeftForeArm', 'LeftHand', 'LeftHand_Effector',\n 'LeftLeg', 'LeftShoulder1', 'LeftUpLeg',\n 'RightArm', 'RightFoot', 'RightForeArm', 'RightHand', 'RightHand_Effector',\n 'RightLeg', 'RightShoulder', 'RightUpLeg',\n 'Spine', 'Spine1',\n 'RightFoot_foot_0_0', 'RightFoot_foot_0_1', 'RightFoot_foot_0_1_Effector',\n 'RightFoot_foot_1_0', 'RightFoot_foot_1_1', 'RightFoot_foot_1_1_Effector',\n 'RightFoot_foot_2_0', 'RightFoot_foot_2_1', 'RightFoot_foot_2_1_Effector',\n 'LeftFoot_foot_0_0', 'LeftFoot_foot_0_1', 'LeftFoot_foot_0_1_Effector',\n 'LeftFoot_foot_1_0', 'LeftFoot_foot_1_1', 'LeftFoot_foot_1_1_Effector',\n 'LeftFoot_foot_2_0', 'LeftFoot_foot_2_1', 'LeftFoot_foot_2_1_Effector',\n ], 0.)\n\n # torso : 10\n massMap['Hips'] += 2.\n massMap['Spine'] += 8.\n\n # head : 3\n massMap['Spine1'] += 3.\n\n # right upper arm : 2\n massMap['RightArm'] += 2.\n\n # left upper arm : 2\n massMap['LeftArm'] += 2.\n\n # right lower arm : 1\n massMap['RightForeArm'] = 1.\n # massMap['RightForeArm'] = 2.\n\n # left lower arm : 1\n massMap['LeftForeArm'] = 1.\n # massMap['LeftForeArm'] = 2.\n\n # right thigh : 7\n massMap['Hips'] += 2.\n massMap['RightUpLeg'] += 5.\n\n # left thigh : 7\n massMap['Hips'] += 2.\n massMap['LeftUpLeg'] += 5.\n\n # right shin : 5\n massMap['RightLeg'] += 5.\n\n # left shin : 5\n massMap['LeftLeg'] += 5.\n\n # right foot : 4\n massMap['RightFoot'] += 2.\n # massMap['RightFoot'] += .4\n\n # left foot : 4\n massMap['LeftFoot'] += 2.\n # massMap['LeftFoot'] += .4\n '''\n massMap['RightFoot_foot_0_0'] = .3\n massMap['RightFoot_foot_0_1'] = .3\n massMap['RightFoot_foot_1_0'] = .3\n massMap['RightFoot_foot_1_1'] = .3\n massMap['RightFoot_foot_2_0'] = .3\n massMap['RightFoot_foot_2_1'] = .3\n massMap['LeftFoot_foot_0_0'] = .3\n massMap['LeftFoot_foot_0_1'] = .3\n massMap['LeftFoot_foot_1_0'] = .3\n massMap['LeftFoot_foot_1_1'] = .3\n massMap['LeftFoot_foot_2_0'] = .3\n massMap['LeftFoot_foot_2_1'] = .3\n #'''\n\n massMap['RightFoot_foot_0_0'] = .1\n massMap['RightFoot_foot_0_1'] = .1\n massMap['RightFoot_foot_0_0_0'] = .1\n massMap['RightFoot_foot_0_1_0'] = .1\n massMap['RightFoot_foot_1_0'] = .1\n massMap['RightFoot_foot_1_1'] = .1\n massMap['RightFoot_foot_1_2'] = .1\n massMap['LeftFoot_foot_0_0'] = .1\n massMap['LeftFoot_foot_0_1'] = .1\n massMap['LeftFoot_foot_0_0_0'] = .1\n massMap['LeftFoot_foot_0_1_0'] = .1\n massMap['LeftFoot_foot_1_0'] = .1\n massMap['LeftFoot_foot_1_1'] = .1\n massMap['LeftFoot_foot_1_2'] = .1\n\n return massMap\n\n\ndef buildMcfg():\n massMap = buildMassMap()\n mcfg = ypc.ModelConfig()\n mcfg.defaultDensity = 1000.\n mcfg.defaultBoneRatio = .9\n\n totalMass = 0.\n for name in massMap:\n node = mcfg.addNode(name)\n node.mass = massMap[name]\n # totalMass += node.mass\n\n # width : x axis on body frame\n # height: y axis on body frame\n # length: z axis on body frame\n node = mcfg.getNode('Hips')\n node.length = .2\n node.width = .25\n\n node = mcfg.getNode('Spine1')\n node.length = .2\n node.offset = (0,0,0.1)\n\n node = mcfg.getNode('Spine')\n node.width = .22\n\n node = mcfg.getNode('RightFoot')\n node.length = .25\n # node.length = .27\n # node.offset = (0,0,0.01)\n node.width = .1\n node.geom = 'MyFoot1'\n\n node = mcfg.getNode('LeftFoot')\n node.length = .25\n # node.length = .27\n # node.offset = (0,0,0.01)\n node.width = .1\n node.geom = 'MyFoot1'\n\n def capsulize(node_name):\n node_capsule = mcfg.getNode(node_name)\n node_capsule.geom = 'MyFoot4'\n node_capsule.width = 0.01\n node_capsule.density = 200.\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0., math.pi/4., 0.])], ypc.CapsuleMaterial(1000., .02, .2))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0., math.pi/4., 0.])], ypc.CapsuleMaterial(1000., .02, .1))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0., 0., 0.])], ypc.CapsuleMaterial(1000., .01, -1))\n # node.addGeom('MyFoot4', None, ypc.CapsuleMaterial(1000., .02, .1))\n\n # capsulize('RightFoot')\n # capsulize('LeftFoot')\n\n if SEGMENT_FOOT:\n node = mcfg.getNode('RightFoot')\n node.density = 200.\n node.geom = 'MyFoot5'\n node.width = 0.01\n node.jointType = 'B'\n\n node = mcfg.getNode('LeftFoot')\n node.density = 200.\n node.geom = 'MyFoot5'\n node.width = 0.01\n node.jointType = 'B'\n\n # bird foot\n # capsulize('RightFoot_foot_0_0')\n # capsulize('RightFoot_foot_0_1')\n # capsulize('RightFoot_foot_1_0')\n # capsulize('RightFoot_foot_1_1')\n # capsulize('RightFoot_foot_2_0')\n # capsulize('RightFoot_foot_2_1')\n # capsulize('LeftFoot_foot_0_0')\n # capsulize('LeftFoot_foot_0_1')\n # capsulize('LeftFoot_foot_1_0')\n # capsulize('LeftFoot_foot_1_1')\n # capsulize('LeftFoot_foot_2_0')\n # capsulize('LeftFoot_foot_2_1')\n\n\n # human foot\n if SEGMENT_FOOT:\n footJointType = 'B'\n capsulDensity = 400.\n\n # RightFoot_foot_0_0 : outside metatarsals\n capsulize('RightFoot_foot_0_0')\n node = mcfg.getNode('RightFoot_foot_0_0')\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([-0.3, 0., 2.5*0.25]), mm.exp([0., -math.atan2(1.2, 2.5), 0.])],\n ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, SEGMENT_FOOT_MAG*2.5 + 2.*SEGMENT_FOOT_RAD))\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([-0.3-1.2, 0., 2.5*0.25]), mm.exp([0., -math.atan2(1.2, 2.5), 0.])],\n ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, SEGMENT_FOOT_MAG*2.5 + 2.*SEGMENT_FOOT_RAD))\n # node.addGeom('MyFoot4', [0.02*np.array([-1.2, 0., 0.]), mm.exp([0., 0., 0.])], ypc.CapsuleMaterial(1000., .01, -1))\n node.jointType = footJointType\n\n # RightFoot_foot_0_0_0 : outside phalanges\n capsulize('RightFoot_foot_0_0_0')\n node = mcfg.getNode('RightFoot_foot_0_0_0')\n node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.addGeom('MyFoot4', [SEGMENT_FOOT_MAG*np.array([-1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n # RightFoot_foot_0_1 : inside metatarsals\n capsulize('RightFoot_foot_0_1')\n node = mcfg.getNode('RightFoot_foot_0_1')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity,SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n # RightFoot_foot_0_1_0 : inside phalanges\n capsulize('RightFoot_foot_0_1_0')\n node = mcfg.getNode('RightFoot_foot_0_1_0')\n node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.addGeom('MyFoot4', [SEGMENT_FOOT_MAG*np.array([1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n # RightFoot_foot_1_0 : center heel\n capsulize('RightFoot_foot_1_0')\n node = mcfg.getNode('RightFoot_foot_1_0')\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([0., 0., .7]), mm.exp([0.]*3)],\n ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, SEGMENT_FOOT_MAG*2. + SEGMENT_FOOT_RAD * 2.))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(1000., .01, -1))\n node.jointType = footJointType\n\n # RightFoot_foot_1_1 : inside heel\n capsulize('RightFoot_foot_1_1')\n node = mcfg.getNode('RightFoot_foot_1_1')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n # RightFoot_foot_1_2 : outside heel\n capsulize('RightFoot_foot_1_2')\n node = mcfg.getNode('RightFoot_foot_1_2')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n\n capsulize('LeftFoot_foot_0_0')\n node = mcfg.getNode('LeftFoot_foot_0_0')\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([0.3, 0., 2.5*0.25]), mm.exp([0., math.atan2(1.2, 2.5), 0.])],\n ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, SEGMENT_FOOT_MAG*2.5+2.*SEGMENT_FOOT_RAD))\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([0.3+1.2, 0., 2.5*0.25]), mm.exp([0., math.atan2(1.2, 2.5), 0.])],\n ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, SEGMENT_FOOT_MAG*2.5+2.*SEGMENT_FOOT_RAD))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(1000., .01, -1))\n node.jointType = footJointType\n\n capsulize('LeftFoot_foot_0_0_0')\n node = mcfg.getNode('LeftFoot_foot_0_0_0')\n node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.addGeom('MyFoot4', [SEGMENT_FOOT_MAG*np.array([1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n capsulize('LeftFoot_foot_0_1')\n node = mcfg.getNode('LeftFoot_foot_0_1')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([-1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n capsulize('LeftFoot_foot_0_1_0')\n node = mcfg.getNode('LeftFoot_foot_0_1_0')\n node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.addGeom('MyFoot4', [SEGMENT_FOOT_MAG*np.array([-1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n capsulize('LeftFoot_foot_1_0')\n node = mcfg.getNode('LeftFoot_foot_1_0')\n node.addGeom('MyFoot3', [SEGMENT_FOOT_MAG*np.array([0., 0., .7]), mm.exp([0.]*3)],\n ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, SEGMENT_FOOT_MAG*2.0+2.*SEGMENT_FOOT_RAD))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(1000., .01, -1))\n node.jointType = footJointType\n\n capsulize('LeftFoot_foot_1_1')\n node = mcfg.getNode('LeftFoot_foot_1_1')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n capsulize('LeftFoot_foot_1_2')\n node = mcfg.getNode('LeftFoot_foot_1_2')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(capsulDensity, SEGMENT_FOOT_RAD, -1))\n node.jointType = footJointType\n\n\n return mcfg\n\n\ndef walkings(params, isCma=True):\n \"\"\"\n\n :type params: list[float]\n :return:\n \"\"\"\n class ForceInfo:\n def __init__(self, startFrame, duration, force):\n self.startFrame = startFrame # frame\n self.duration = duration # sec\n self.force = force # Newton\n self.targetBody = None\n\n #===============================================================================\n # load motion\n #===============================================================================\n MULTI_VIEWER = False\n CAMERA_TRACKING = False\n TORQUE_PLOT = False\n NO_FOOT_SLIDING = True\n\n # global parameters\n # Kt = 50.\n Kt = 300.\n Dt = 2.*(Kt**.5)\n # Dt = Kt/900.\n Ks = 1000.\n Ds = 2.*(Ks**.5)\n mu = 1.\n # Dt = 0.\n\n # constants\n c_min_contact_vel = 100.\n # c_min_contact_vel = 2.\n c_min_contact_time = .7\n c_landing_duration = .2\n c_taking_duration = .3\n # c_swf_mid_offset = .02\n c_swf_mid_offset = .0\n c_locking_vel = .05\n\n c_swf_offset = .0\n # c_swf_offset = .01\n # c_swf_offset = .005\n K_stp_pos = 0.\n\n # c5 = .5; c6 = .01\n c5 = .5; c6 = .02\n # c5 = .5; c6 = .05\n # c5 = 1.; c6 = .05\n # c5 = .0; c6 = .0\n\n K_stb_vel = .1\n K_stb_pos = .1\n\n OLD_SWING_HEIGHT = False\n # OLD_SWING_HEIGHT = True\n # HIGHER_OFFSET = True\n HIGHER_OFFSET = False\n\n motionDir = current_path+'/ppmotion/'\n # motionDir = './ppmotion/'\n #\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = .3; K_swp_pos_cor = 0.\n # K_swp_vel_sag = .05; K_swp_vel_cor = .2; K_swp_pos_sag = .2; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n # filename = 'wd2_WalkSameSame01.bvh'\n ## filename = 'wd2_WalkSameSame01_REPEATED.bvh'\n\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = .3; K_swp_pos_cor = 0.\n # K_swp_vel_sag = .05; K_swp_vel_cor = .25; K_swp_pos_sag = .5; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n # filename = 'wd2_WalkForwardSlow01.bvh'\n ## filename = 'wd2_WalkForwardSlow01_REPEATED.bvh' # 3 frame diff\n\n # K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = 1.; K_swp_pos_cor = 0.\n # K_stp_pos = .6\n K_swp_vel_sag = .0; K_swp_vel_cor = .3; K_swp_pos_sag = 1.2; K_swp_pos_cor = .2\n # K_swp_vel_sag = .0; K_swp_vel_cor = 1.3; K_swp_pos_sag = 1.2; K_swp_pos_cor = 1.\n K_swp_pos_sag_faster = .05\n # filename = 'wd2_WalkForwardNormal00.bvh'\n filename = 'wd2_WalkForwardNormal00_REPEATED.bvh'\n if SEGMENT_FOOT:\n filename = 'segfoot_'+filename\n\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = .3; K_swp_pos_cor = 0.\n ## K_stp_pos = 0.\n # K_swp_vel_sag = .05; K_swp_vel_cor = .2; K_swp_pos_sag = .3; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n ## filename = 'wd2_WalkHandWav00.bvh'\n # filename = 'wd2_WalkHandWav00_REPEATED.bvh'\n\n # mu = 2.\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = .3; K_swp_pos_cor = 0.\n ## K_stp_pos = 0.\n # K_swp_vel_sag = .0; K_swp_vel_cor = .3; K_swp_pos_sag = .2; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .0\n ## filename = 'wd2_WalkAzuma01.bvh'\n # filename = 'wd2_WalkAzuma01_REPEATED.bvh' # 2 frame diff\n\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = 1.; K_swp_pos_cor = 0.\n ## K_stp_pos = 0.\n # K_swp_vel_sag = .0; K_swp_vel_cor = .3; K_swp_pos_sag = .2; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n ## filename = 'wd2_WalkSoldier00.bvh' # K_swp_pos_sag = .0\n # filename = 'wd2_WalkSoldier00_REPEATED.bvh'\n\n # mu = 2.\n # # K_swp_vel_sag = .2; K_swp_vel_cor = .4; K_swp_pos_sag = .5;K_swp_pos_cor = 0.\n # # K_stp_pos = 0.\n # K_swp_vel_sag = .05; K_swp_vel_cor = .3; K_swp_pos_sag = .5; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n # # filename = 'wd2_WalkForwardVFast00.bvh'\n # filename = 'wd2_WalkForwardVFast00_REPEATED.bvh'\n\n ## K_swp_vel_sag = .0; K_swp_vel_cor = .4; K_swp_pos_sag = .04; K_swp_pos_cor = .1\n ## K_swp_pos_sag_faster = .02\n ## K_stb_vel = .2\n # K_swp_vel_sag = .1; K_swp_vel_cor = .3; K_swp_pos_sag = 1.; K_swp_pos_cor = .3\n # K_swp_pos_sag_faster = .0\n # K_stb_vel = .3\n ## filename = 'wd2_WalkBackward00.bvh'\n # filename = 'wd2_WalkBackward00_REPEATED.bvh'\n\n\n # parameters\n if params is not None:\n _params = np.around(params, decimals=3)\n Ks = 1000.\n Ds = 2.*(Ks**.5)\n c_min_contact_vel = 100.\n # c_min_contact_vel = 2.\n c_min_contact_time = .7\n c_landing_duration = .2\n c_taking_duration = .3\n c_swf_mid_offset = .02\n # c_swf_mid_offset = .0\n c_locking_vel = .05\n\n c_swf_offset = .0\n # c_swf_offset = .01\n # c_swf_offset = .005\n K_stp_pos = _params[0]*_params[0]\n c5 = _params[1]*_params[1]\n c6 = _params[2]*_params[2]\n K_stb_vel = _params[3]*_params[3]\n K_stb_pos = _params[4]*_params[4]\n K_swp_vel_sag = _params[5]*_params[5]\n K_swp_vel_cor = _params[6]*_params[6]\n K_swp_pos_sag = _params[7]*_params[7]\n K_swp_pos_cor = _params[8]*_params[8]\n K_swp_pos_sag_faster = _params[9]*_params[9]\n\n # motion\n bvh = yf.readBvhFileAsBvh(motionDir+filename)\n\n if SEGMENT_FOOT:\n # partBvhFilePath = '../PyCommon/modules/samples/simpleJump_long_test2.bvh'\n partBvhFilePath = current_path+'/../PyCommon/modules/samples/simpleJump_long_test2.bvh'\n partBvh = yf.readBvhFileAsBvh(partBvhFilePath)\n bvh.replaceJointFromBvh('RightFoot', partBvh, SEGMENT_FOOT_MAG)\n partBvh = yf.readBvhFileAsBvh(partBvhFilePath)\n partBvh.mirror('YZ')\n bvh.replaceJointFromBvh('LeftFoot', partBvh, SEGMENT_FOOT_MAG)\n\n motion_ori = bvh.toJointMotion(1., False)\n\n # motion_ori = yf.readBvhFile(motionDir+filename)\n frameTime = 1/motion_ori.fps\n\n if 'REPEATED' in filename:\n REPEATED = True\n CAMERA_TRACKING = True\n else:\n REPEATED = False\n\n #===============================================================================\n # options\n #===============================================================================\n SEGMENT_EDITING = True\n STANCE_FOOT_STABILIZE = True\n MATCH_STANCE_LEG = True\n SWING_FOOT_PLACEMENT = True\n SWING_FOOT_HEIGHT = True\n\n SWING_FOOT_ORIENTATION = False\n\n STANCE_FOOT_PUSH = True\n STANCE_FOOT_BALANCING = True\n # STANCE_FOOT_BALANCING = False\n\n SWING_FOOT_CLEARANCE = True\n\n SEGMENT_GAIN_ADJUST = True\n\n stitch_func = lambda xx : 1. - yfg.hermite2nd(xx)\n stf_stabilize_func = yfg.concatenate([yfg.hermite2nd, yfg.one], [c_landing_duration])\n match_stl_func = yfg.hermite2nd\n swf_placement_func = yfg.hermite2nd\n # swf_placement_func = yfg.identity\n swf_height_func = yfg.hermite2nd\n swf_height_sine_func = yfg.sine\n # stf_balancing_func = yfg.concatenate([yfg.hermite2nd, yfg.one], [c_landing_duration])\n stf_balancing_func = yfg.hermite2nd\n # stf_balancing_func = yfg.hermite5th\n\n # forceInfos = [ForceInfo(70, .4, (100,0,0))]\n forceInfos = []\n\n #===============================================================================\n # initialize character\n #===============================================================================\n # mcfgfile = open(dir + 'mcfg', 'r')\n # mcfg = cPickle.load(mcfgfile)\n # mcfgfile.close()\n\n mcfg = buildMcfg()\n\n wcfg = ypc.WorldConfig()\n wcfg.planeHeight = 0.\n wcfg.useDefaultContactModel = False\n wcfg.lockingVel = c_locking_vel\n stepsPerFrame = 50\n wcfg.timeStep = frameTime/stepsPerFrame\n\n pydart.init()\n dartModel = cpm.DartModel(wcfg, motion_ori[0], mcfg, False)\n dartMotionModel = None # type: cpm.DartModel\n if not isCma:\n dartMotionModel = cpm.DartModel(wcfg, motion_ori[0], mcfg)\n # q = dartModel.skeleton.q\n # q[0:3] = mm.logSO3(motion_ori.getJointOrientationGlobal(0, 0))\n # q[3:6] = motion_ori.getJointPositionGlobal(0, 0)\n # dartModel.skeleton.set_positions(q)\n # q[3:6] = motion_ori.getJointPositionGlobal(0, 0)\n # pdController = PDController(dartModel.skeleton, wcfg.timeStep, Kt=1000., Dt=50.)\n pdController = PDController(dartModel.skeleton, wcfg.timeStep)\n # dartModel.skeleton.set_controller(pdController)\n # dartModel.world.set_gravity(np.array((0., 0., 0.)))\n dartModel.initializeHybridDynamics()\n dartModel.initializeForwardDynamics()\n\n # dartModel.skeleton.inv_mass_matrix()\n\n # print(dartModel.skeleton.coriolis_and_gravity_forces())\n\n # dartModel.getJoint('LeftFoot').set_actuator_type(pydart.Joint.FORCE)\n # dartModel.getJoint('RightFoot').set_actuator_type(pydart.Joint.FORCE)\n\n #===============================================================================\n # load segment info\n #===============================================================================\n skeleton = motion_ori[0].skeleton\n\n segname = os.path.splitext(filename)[0]+'.seg'\n segfile = open(motionDir+segname, 'r')\n seginfo = load(segfile)\n segfile.close()\n\n if not isCma:\n for seg in seginfo:\n print(seg)\n\n intervals = [info['interval'] for info in seginfo]\n states = [info['state'] for info in seginfo]\n temp_motion = copy.deepcopy(motion_ori)\n segments = yma.splitMotionIntoSegments(temp_motion, intervals)\n if not isCma:\n print(len(intervals), 'segments')\n for i in range(len(intervals)):\n print('%dth'%i, yba.GaitState.text[states[i]], intervals[i], ',',)\n print(\"\")\n\n motion_seg_orig = ym.JointMotion()\n motion_seg_orig += segments[0]\n motion_seg = ym.JointMotion()\n motion_seg += segments[0]\n motion_stitch = ym.JointMotion()\n motion_stitch += segments[0]\n\n motion_stf_stabilize = ym.JointMotion()\n motion_match_stl = ym.JointMotion()\n motion_swf_placement = ym.JointMotion()\n motion_swf_height = ym.JointMotion()\n motion_swf_orientation = ym.JointMotion()\n motion_stf_balancing = ym.JointMotion()\n motion_stf_push = ym.JointMotion()\n motion_control = ym.JointMotion()\n\n motion_debug1 = ym.JointMotion()\n motion_debug2 = ym.JointMotion()\n motion_debug3 = ym.JointMotion()\n\n P = ym.JointMotion()\n P_hat = ym.JointMotion()\n M_tc = ym.JointMotion()\n M_hat_tc_1 = ym.JointMotion()\n\n #===============================================================================\n # loop variable\n #===============================================================================\n seg_index = [0]\n acc_offset = [0]\n extended = [False]\n prev_R_swp = [None]\n stl_y_limit_num = [0]\n stl_xz_limit_num = [0]\n avg_dCM = [mm.O_Vec3()]\n # avg_stf_v = [mm.O_Vec3()]\n # avg_stf_av = [mm.O_Vec3()]\n\n # stf_push_func = [yfg.zero]\n step_length_cur = [0.]\n\n step_length_tar = [0.]\n step_axis = [mm.O_Vec3()]\n #===============================================================================\n # information\n #===============================================================================\n bodyIDsToCheck = range(dartModel.getBodyNum())\n # bodyIDsToCheck = [dartModel.getBody(\"LeftFoot\").index_in_skeleton(), dartModel.getBody(\"RightFoot\").index_in_skeleton()]\n mus = [mu]*len(bodyIDsToCheck)\n\n totalMass = dartModel.getTotalMass()\n # bodyMasses = controlModel.getBodyMasses()\n # totalMass = controlModel.getTotalMass()\n\n # hwangpil\n # extendedFootName = ['Foot_foot_0_0', 'Foot_foot_0_1', 'Foot_foot_1_0',\n # 'Foot_foot_1_1', 'Foot_foot_2_0', 'Foot_foot_2_1']\n\n extendedFootName = ['Foot_foot_0_0', 'Foot_foot_0_1', 'Foot_foot_0_0_0', 'Foot_foot_0_1_0', 'Foot_foot_1_0',\n 'Foot_foot_1_1', 'Foot_foot_1_2']\n\n # extendedFootName = ['Foot_foot_0_1', 'Foot_foot_1_1', 'Foot_foot_2_1']\n\n\n ToeName = ['Foot_foot_0_0_0', 'Foot_foot_0_1_0']\n HeelName = ['Foot_foot_1_0', 'Foot_foot_1_1', 'Foot_foot_1_2']\n\n lIDs = [skeleton.getJointIndex('Left'+name) for name in extendedFootName]\n rIDs = [skeleton.getJointIndex('Right'+name) for name in extendedFootName]\n\n lIDdic = {'Left'+name:skeleton.getJointIndex('Left'+name) for name in extendedFootName}\n rIDdic = {'Right'+name:skeleton.getJointIndex('Right'+name) for name in extendedFootName}\n footIdDic = lIDdic.copy()\n footIdDic.update(rIDdic)\n\n lToes = [skeleton.getJointIndex('Left'+name) for name in ToeName]\n rToes = [skeleton.getJointIndex('Right'+name) for name in ToeName]\n\n lHeels = [skeleton.getJointIndex('Left'+name) for name in HeelName]\n rHeels = [skeleton.getJointIndex('Right'+name) for name in HeelName]\n\n footDofNames = [] # type: list[str]\n footDofNames += sum(list(['j_Left'+name+'_x', 'j_Left'+name+'_y', 'j_Left'+name+'_z'] for name in extendedFootName), [])\n footDofNames += sum(list(['j_Right'+name+'_x', 'j_Right'+name+'_y', 'j_Right'+name+'_z'] for name in extendedFootName), [])\n\n footDofs = None\n if SEGMENT_FOOT:\n footDofs = dartModel.skeleton.dof_indices(footDofNames)\n LeftFootDofs = dartModel.skeleton.dof_indices(['j_LeftFoot_x','j_LeftFoot_y','j_LeftFoot_z'])\n RightFootDofs = dartModel.skeleton.dof_indices(['j_RightFoot_x','j_RightFoot_y','j_RightFoot_z'])\n\n # controlled foot joint dofs\n if SEGMENT_FOOT:\n variableDofIdx = dartModel.skeleton.dof_indices(footDofNames)\n # joint dofs except foot joint\n specifiedDofIdx = list(range(dartModel.getTotalDOF()))\n for dofidx in variableDofIdx:\n specifiedDofIdx.remove(dofidx)\n\n # for i in lIDs+rIDs:\n # controlModel.setHybridDynamics(i, \"DYNAMIC\")\n\n # each dof is whether KINEMATIC or not\n hdAccMask = [True]*dartModel.getTotalDOF()\n hdAccMask[:6] = [False]*6\n # for i in lIDs+rIDs:\n # hdAccMask[3+3*i : 6+3*i] = [False]*3\n\n # for i in range(1, len(dartModel.skeleton.joints)):\n # dartModel.skeleton.joints[i].set_actuator_type(pydart.Joint.ACCELERATION)\n\n\n lID = dartModel.skeleton.bodynode_index('LeftFoot')\n rID = dartModel.skeleton.bodynode_index('RightFoot')\n\n lUpLeg = skeleton.getJointIndex('LeftUpLeg');rUpLeg = skeleton.getJointIndex('RightUpLeg')\n lKnee = skeleton.getJointIndex('LeftLeg'); rKnee = skeleton.getJointIndex('RightLeg')\n lFoot = skeleton.getJointIndex('LeftFoot'); rFoot = skeleton.getJointIndex('RightFoot')\n spine = skeleton.getJointIndex('Spine')\n\n uppers = [skeleton.getJointIndex(name) for name in ['Hips', 'Spine', 'Spine1', 'LeftArm', 'LeftForeArm', 'RightArm', 'RightForeArm']]\n # upperMass = sum([bodyMasses[i] for i in uppers])\n lLegs = [skeleton.getJointIndex(name) for name in ['LeftUpLeg', 'LeftLeg', 'LeftFoot']]\n rLegs = [skeleton.getJointIndex(name) for name in ['RightUpLeg', 'RightLeg', 'RightFoot']]\n allJoints = set(range(skeleton.getJointNum()))\n\n\n '''\n footMass = sum([bodyMasses[i] for i in lIDs]) + bodyMasses[lID]\n HeelMass = sum([bodyMasses[i] for i in lHeels])\n ToeMass = sum([bodyMasses[i] for i in lToes])\n print('totalMass: ', totalMass)\n print('footMass: ', footMass)\n print('heelmass: ', HeelMass)\n print('ToeMass: ', ToeMass)\n #'''\n\n halfFootHeight = SEGMENT_FOOT_RAD\n if not SEGMENT_FOOT:\n halfFootHeight = dartModel.getBody(lFoot).shapenodes[0].shape.size()[1]/2.\n\n for fi in forceInfos:\n fi.targetBody = spine\n\n #hwangpil\n prev_contact_count = [0]\n\n #===========================================================================\n # data collection\n #===========================================================================\n rhip_torques = []\n rknee_torques = []\n rankle_torques = []\n rankle_torques = []\n\n #===============================================================================\n # rendering\n #===============================================================================\n rd_CM = [None]; rd_CP = [None]; rd_CMP = [None]\n rd_forces = [None]; rd_force_points = [None]\n rd_torques = []; rd_joint_positions = []\n\n rd_point1 = [None]\n rd_point2 = [None]\n rd_vec1 = [None]; rd_vecori1 = [None]\n rd_vec2 = [None]; rd_vecori2 = [None]\n rd_frame1 = [None]\n rd_frame2 = [None]\n\n rd_cForces = [None]\n rd_cPositions = [None]\n rd_cForcesControl = [None]\n rd_cPositionsControl = [None]\n\n viewer = None\n plot = None\n # plot = ymp.InteractivePlot()\n\n def getParamVal(paramname):\n return viewer.objectInfoWnd.getVal(paramname)\n\n # renderer settings\n if not isCma:\n if MULTI_VIEWER:\n viewer = ymv.MultiViewer(800, 655)\n # viewer = ymv.MultiViewer(800, 655, True)\n viewer.setRenderers1([yr.DartModelRenderer(dartMotionModel, MOTION_COLOR)])\n viewer.setRenderers2([yr.DartModelRenderer(dartModel, (200, 200, 0))])\n else:\n # viewer = ysv.SimpleViewer()\n # viewer = hsv.hpSimpleViewer(viewForceWnd=True)\n viewer = hsv.hpSimpleViewer(viewForceWnd=False)\n # viewer.record(False)\n if not isCma:\n viewer.doc.addRenderer('motionModel', yr.DartModelRenderer(dartMotionModel, (0,150,255), yr.POLYGON_LINE))\n viewer.doc.addRenderer('controlModel', yr.DartModelRenderer(dartModel, (50, 200, 200)))\n\n viewer.doc.addObject('motion_ori', motion_ori)\n viewer.doc.addObject('motion_stf_stabilize', motion_stf_stabilize)\n viewer.doc.addObject('motion_match_stl', motion_match_stl)\n viewer.doc.addObject('motion_swf_placement', motion_swf_placement)\n viewer.doc.addObject('motion_swf_height', motion_swf_height)\n viewer.doc.addObject('motion_swf_orientation', motion_swf_orientation)\n viewer.doc.addObject('motion_stf_push', motion_stf_push)\n viewer.doc.addObject('motion_stf_balancing', motion_stf_balancing)\n viewer.doc.addObject('motion_control', motion_control)\n\n viewer.doc.addRenderer('motion_ori', yr.JointMotionRenderer(motion_ori, (0,100,255), yr.LINK_BONE))\n motion_ori.resourceName = 'motion_ori'\n # viewer.doc.addRenderer('motion_seg_orig', yr.JointMotionRenderer(motion_seg_orig, (0,100,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_seg', yr.JointMotionRenderer(motion_seg, (0,150,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_stitch', yr.JointMotionRenderer(motion_stitch, (0,255,200), yr.LINK_BONE))\n\n viewer.doc.addRenderer('motion_match_stl', yr.JointMotionRenderer(motion_match_stl, (255,200,0), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_swf_placement', yr.JointMotionRenderer(motion_swf_placement, (255,100,255), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_swf_height', yr.JointMotionRenderer(motion_swf_height, (50,255,255), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_stf_push', yr.JointMotionRenderer(motion_stf_push, (50,255,200), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_stf_stabilize', yr.JointMotionRenderer(motion_stf_stabilize, (255,0,0), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_stf_balancing', yr.JointMotionRenderer(motion_stf_balancing, (255,100,255), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_control', yr.JointMotionRenderer(motion_control, (255,0,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_swf_orientation', yr.JointMotionRenderer(motion_swf_orientation, (255,100,0), yr.LINK_BONE))\n motion_stf_stabilize.resourceName = 'motion_stf_stabilize'\n motion_match_stl.resourceName = 'motion_match_stl'\n motion_swf_placement.resourceName = 'motion_swf_placement'\n motion_swf_height.resourceName = 'motion_swf_height'\n motion_swf_orientation.resourceName = 'motion_swf_orientation'\n motion_stf_push.resourceName = 'motion_stf_push'\n motion_stf_balancing.resourceName = 'motion_stf_balancing'\n motion_control.resourceName = 'motion_control'\n\n # viewer.doc.addRenderer('motion_debug1', yr.JointMotionRenderer(motion_debug1, (0,255,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_debug2', yr.JointMotionRenderer(motion_debug2, (255,0,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_debug3', yr.JointMotionRenderer(motion_debug3, (255,255,0), yr.LINK_BONE))\n\n # viewer.doc.addRenderer('M_tc', yr.JointMotionRenderer(M_tc, (255,255,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('P_hat', yr.JointMotionRenderer(P_hat, (255,255,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('P', yr.JointMotionRenderer(P, (255,255,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('M_hat_tc_1', yr.JointMotionRenderer(M_hat_tc_1, (255,255,0), yr.LINK_BONE))\n\n # viewer.doc.addRenderer('rd_CM', yr.PointsRenderer(rd_CM, (255,255,0)))\n # viewer.doc.addRenderer('rd_CP', yr.PointsRenderer(rd_CP, (255,0,0)))\n # viewer.doc.addRenderer('rd_CMP', yr.PointsRenderer(rd_CMP, (0,255,0)))\n # viewer.doc.addRenderer('forces', yr.ForcesRenderer(rd_forces, rd_force_points, (255,0,0), ratio=.01, fromPoint=False))\n # viewer.doc.addRenderer('torques', yr.VectorsRenderer(rd_torques, rd_joint_positions, (255,0,0)))\n\n viewer.doc.addRenderer('rd_contactForcesControl', yr.VectorsRenderer(rd_cForcesControl, rd_cPositionsControl, (255, 0, 0), .1, 'rd_c1'))\n viewer.doc.addRenderer('rd_contactForces', yr.VectorsRenderer(rd_cForces, rd_cPositions, (0, 255, 0), .1, 'rd_c2'))\n\n viewer.doc.addRenderer('rd_point1', yr.PointsRenderer(rd_point1, (0,255,0)))\n viewer.doc.addRenderer('rd_point2', yr.PointsRenderer(rd_point2, (255,0,0)))\n # viewer.doc.addRenderer('rd_vec1', yr.VectorsRenderer(rd_vec1, rd_vecori1, (255,0,0)))\n viewer.doc.addRenderer('rd_vec2', yr.VectorsRenderer(rd_vec2, rd_vecori2, (0,255,0)))\n # viewer.doc.addRenderer('rd_frame1', yr.FramesRenderer(rd_frame1, (0,200,200)))\n viewer.doc.addRenderer('rd_frame2', yr.FramesRenderer(rd_frame2, (200,200,0)))\n # viewer.setMaxFrame(len(motion_ori)-1)\n\n viewer.objectInfoWnd.add1DSlider(\"penalty_grf_gain\", 0., 5000., 10., Ks)\n viewer.objectInfoWnd.add1DSlider(\"c_min_contact_vel\", 0., 200., .2, 100.)\n viewer.objectInfoWnd.add1DSlider(\"c_min_contact_time\", 0., 5., .01, .7)\n viewer.objectInfoWnd.add1DSlider(\"c_landing_duration\", 0., 5., .01, .2)\n viewer.objectInfoWnd.add1DSlider(\"c_taking_duration\", 0., 5., .01, .3)\n viewer.objectInfoWnd.add1DSlider(\"c_swf_mid_offset\", -1., 1., .001, c_swf_mid_offset)\n viewer.objectInfoWnd.add1DSlider(\"c_locking_vel\", 0., 1., .001, .05)\n\n viewer.objectInfoWnd.add1DSlider(\"c_swf_offset\", -1., 1., .001, .01)\n viewer.objectInfoWnd.add1DSlider(\"K_stp_pos\", 0., 1., .01, 0.)\n\n viewer.objectInfoWnd.add1DSlider(\"c5\", 0., 5., .01, c5)\n viewer.objectInfoWnd.add1DSlider(\"c6\", 0., 1., .01, c6)\n viewer.objectInfoWnd.add1DSlider(\"K_stb_vel\", 0., 1., .01, K_stb_vel)\n viewer.objectInfoWnd.add1DSlider(\"K_stb_pos\", 0., 1., .01, K_stb_pos)\n viewer.objectInfoWnd.add1DSlider(\"K_swp_vel_sag\", 0., 5., .01, K_swp_vel_sag)\n viewer.objectInfoWnd.add1DSlider(\"K_swp_vel_cor\", 0., 5., .01, K_swp_vel_cor)\n viewer.objectInfoWnd.add1DSlider(\"K_swp_pos_sag\", 0., 5., .01, K_swp_pos_sag)\n viewer.objectInfoWnd.add1DSlider(\"K_swp_pos_cor\", 0., 5., .01, K_swp_pos_cor)\n viewer.objectInfoWnd.add1DSlider(\"K_swp_pos_sag_faster\",0., 1., .01, K_swp_pos_sag_faster)\n\n viewer.objectInfoWnd.add1DSlider(\"LeftFootKp\", 0., 500., 10., 300.)\n viewer.objectInfoWnd.add1DSlider(\"LeftFootKd\", 0., 100., 1., 30.)\n viewer.objectInfoWnd.add1DSlider(\"RightFootKp\", 0., 500., 10., 300.)\n viewer.objectInfoWnd.add1DSlider(\"RightFootKd\", 0., 100., 1., 30.)\n\n if viewer.cForceWnd is not None:\n viewer.cForceWnd.addDataSet('expForce', FL_BLACK)\n viewer.cForceWnd.addDataSet('desForceMin', FL_RED)\n viewer.cForceWnd.addDataSet('desForceMax', FL_RED)\n viewer.cForceWnd.addDataSet('realForce', FL_GREEN)\n\n if not REPEATED:\n viewer.setMaxFrame(len(motion_ori)-1)\n else:\n viewer.setMaxFrame(MAX_FRAME)\n\n if CAMERA_TRACKING:\n if MULTI_VIEWER:\n cameraTargets1 = [None] * (viewer.getMaxFrame()+1)\n cameraTargets2 = [None] * (viewer.getMaxFrame()+1)\n else:\n cameraTargets = [None] * (viewer.getMaxFrame()+1)\n\n if TORQUE_PLOT:\n rhip_torques = [0.]*viewer.getMaxFrame()\n rknee_torques = [0.]*viewer.getMaxFrame()\n rankle_torques = [0.]*viewer.getMaxFrame()\n\n\n # ===============================================================================\n # viewer setting for parameter setting\n # ===============================================================================\n\n # pt = [0.]\n def postFrameCallback_Always(frame):\n # if frame==1: pt[0] = time.time()\n # if frame==31: print 'elapsed time for 30 frames:', time.time()-pt[0]\n if CAMERA_TRACKING:\n if MULTI_VIEWER:\n if cameraTargets1[frame] is None:\n # cameraTargets1[frame] = motionModel.getBodyPositionGlobal(0)\n cameraTargets1[frame] = dartMotionModel.getBodyPositionGlobal(0)\n # cameraTargets1[frame] = motion_ori[frame].getJointPositionGlobal(0)\n viewer.setCameraTarget1(cameraTargets1[frame])\n\n if cameraTargets2[frame] is None:\n # cameraTargets2[frame] = controlModel.getJointPositionGlobal(0)\n cameraTargets2[frame] = dartModel.getJointPositionGlobal(0)\n viewer.setCameraTarget2(cameraTargets2[frame])\n\n else:\n if cameraTargets[frame] is None:\n cameraTargets[frame] = dartModel.getJointPositionGlobal(0)\n # cameraTargets[frame] = controlModel.getJointPositionGlobal(0)\n viewer.setCameraTarget(cameraTargets[frame])\n if plot is not None:\n plot.updateVline(frame)\n\n\n if not isCma:\n viewer.setPostFrameCallback_Always(postFrameCallback_Always)\n\n if plot is not None:\n plot.setXlimit(0, len(motion_ori))\n plot.setYlimit(-0.05, .05)\n plot.addDataSet('zero')\n plot.addDataSet('diff')\n plot.addDataSet('debug1')\n plot.addDataSet('debug2')\n\n\n def viewer_onClose(data):\n if plot is not None:\n plot.close()\n viewer.onClose(data)\n viewer.callback(viewer_onClose)\n\n if not isCma:\n for bodynode in dartModel.skeleton.bodynodes:\n print(bodynode.name, bodynode.mass())\n\n feedback = hbf.HpBipedFeedback(dartModel, motion_ori, seginfo)\n\n def simulateCallback(frame):\n if not isCma:\n print('frame: ', frame)\n # c_min_contact_vel, c_min_contact_time, c_landing_duration, \\\n # c_taking_duration, c_swf_mid_offset, c_locking_vel, c_swf_offset, \\\n # K_stp_pos, c5, c6, K_stb_vel, K_stb_pos, K_swp_vel_sag, K_swp_vel_cor, \\\n # K_swp_pos_sag, K_swp_pos_cor, K_swp_pos_sag_faster = viewer.objectInfoWnd.getVals()\n if not isCma:\n # if not isCma and params is None:\n Ks = getParamVal(\"penalty_grf_gain\")\n Ds = 2.*(Ks**.5)\n c_min_contact_vel = getParamVal(\"c_min_contact_vel\")\n c_min_contact_time = getParamVal(\"c_min_contact_time\")\n c_landing_duration = getParamVal(\"c_landing_duration\")\n c_taking_duration = getParamVal(\"c_taking_duration\")\n c_swf_mid_offset = getParamVal(\"c_swf_mid_offset\")\n c_locking_vel = getParamVal(\"c_locking_vel\")\n c_swf_offset = getParamVal(\"c_swf_offset\")\n K_stp_pos = getParamVal(\"K_stp_pos\")\n c5 = getParamVal(\"c5\")\n c6 = getParamVal(\"c6\")\n K_stb_vel = getParamVal(\"K_stb_vel\")\n K_stb_pos = getParamVal(\"K_stb_pos\")\n K_swp_vel_sag = getParamVal(\"K_swp_vel_sag\")\n K_swp_vel_cor = getParamVal(\"K_swp_vel_cor\")\n K_swp_pos_sag = getParamVal(\"K_swp_pos_sag\")\n K_swp_pos_cor = getParamVal(\"K_swp_pos_cor\")\n K_swp_pos_sag_faster = getParamVal(\"K_swp_pos_sag_faster\")\n elif params is not None:\n _params = np.around(params, decimals=3)\n Ks = 1000.\n Ds = 2. * (Ks ** .5)\n c_min_contact_vel = 100.\n # c_min_contact_vel = 2.\n c_min_contact_time = .7\n c_landing_duration = .2\n c_taking_duration = .3\n c_swf_mid_offset = .02\n c_locking_vel = .05\n\n # c_swf_offset = .0\n c_swf_offset = .01\n # c_swf_offset = .005\n K_stp_pos = _params[0] * _params[0]\n c5 = _params[1] * _params[1]\n c6 = _params[2] * _params[2]\n K_stb_vel = _params[3] * _params[3]\n K_stb_pos = _params[4] * _params[4]\n K_swp_vel_sag = _params[5] * _params[5]\n K_swp_vel_cor = _params[6] * _params[6]\n K_swp_pos_sag = _params[7] * _params[7]\n K_swp_pos_cor = _params[8] * _params[8]\n K_swp_pos_sag_faster = _params[9] * _params[9]\n\n # feedback.refresh_frame_dyn_information(motion_seg, frame, avg_dCM)\n\n # seginfo\n segIndex = seg_index[0]\n curState = seginfo[segIndex]['state']\n cur_interval = yma.offsetInterval(acc_offset[0], seginfo[segIndex]['interval'])\n stanceLegs = seginfo[segIndex]['stanceHips']\n swingLegs = seginfo[segIndex]['swingHips']\n stanceFoots = seginfo[segIndex]['stanceFoots']\n swingFoots = seginfo[segIndex]['swingFoots']\n swingKnees = seginfo[segIndex]['swingKnees']\n groundHeight = seginfo[segIndex]['ground_height']\n maxStfPushFrame = seginfo[segIndex]['max_stf_push_frame']\n\n # hwangpil\n # temporary change\n for legList in (stanceLegs, swingLegs):\n for legIdx in range(len(legList)):\n if legList[legIdx] == 10:\n legList[legIdx] = skeleton.getJointIndex('RightUpLeg')\n\n for footList in (stanceFoots, swingFoots):\n for footIdx in range(len(footList)):\n if footList[footIdx] == 12:\n footList[footIdx] = skeleton.getJointIndex('RightFoot')\n\n stanceToes = []\n if skeleton.getJointIndex('LeftFoot') in stanceFoots:\n stanceToes.extend(lToes)\n if skeleton.getJointIndex('RightFoot') in stanceFoots:\n stanceToes.extend(rToes)\n\n stanceHeels = []\n if skeleton.getJointIndex('LeftFoot') in stanceFoots:\n stanceHeels.extend(lHeels)\n if skeleton.getJointIndex('RightFoot') in stanceFoots:\n stanceHeels.extend(rHeels)\n\n swingToes = []\n if skeleton.getJointIndex('LeftFoot') in swingFoots:\n swingToes.extend(lToes)\n if skeleton.getJointIndex('RightFoot') in swingFoots:\n swingToes.extend(rToes)\n\n swingHeels = []\n if skeleton.getJointIndex('LeftFoot') in swingFoots:\n swingHeels.extend(lHeels)\n if skeleton.getJointIndex('RightFoot') in swingFoots:\n swingHeels.extend(rHeels)\n\n prev_frame = frame-1 if frame>0 else 0\n\n # information\n dCM_tar = motion_seg.getJointVelocityGlobal(0, prev_frame)\n CM_tar = motion_seg.getJointPositionGlobal(0, prev_frame)\n stf_tar = motion_seg.getJointPositionGlobal(stanceFoots[0], prev_frame)\n CMr_tar = CM_tar - stf_tar\n\n # dCM : average velocity of root of controlModel over 1 frame\n dCM = avg_dCM[0]\n CM = dartModel.getBody(\"Hips\").com()\n CMreal = dartModel.getCOM()\n stf = dartModel.getJointPositionGlobal(stanceFoots[0])\n CMr = CM - stf\n\n # diff_dCM : diff of velocity of COM between current and desired\n diff_dCM = mm.projectionOnPlane(dCM-dCM_tar, (1,0,0), (0,0,1))\n # diff_dCM_axis : perpendicular of diff_dCM\n diff_dCM_axis = np.cross((0,1,0), diff_dCM)\n rd_vec1[0] = diff_dCM\n rd_vecori1[0] = CM_tar\n\n diff_CMr = mm.projectionOnPlane(CMr-CMr_tar, (1,0,0), (0,0,1))\n diff_CMr_axis = np.cross((0,1,0), diff_CMr)\n\n direction = mm.normalize2(mm.projectionOnPlane(dCM_tar, (1,0,0), (0,0,1)))\n directionAxis = np.cross((0,1,0), direction)\n\n diff_dCM_sag, diff_dCM_cor = mm.projectionOnVector2(diff_dCM, direction)\n diff_dCM_sag_axis = np.cross((0,1,0), diff_dCM_sag)\n diff_dCM_cor_axis = np.cross((0,1,0), diff_dCM_cor)\n\n diff_CMr_sag, diff_CMr_cor = mm.projectionOnVector2(diff_CMr, direction)\n diff_CMr_sag_axis = np.cross((0,1,0), diff_CMr_sag)\n diff_CMr_cor_axis = np.cross((0,1,0), diff_CMr_cor)\n\n t = (frame-cur_interval[0])/float(cur_interval[1]-cur_interval[0])\n t_raw = t\n if t>1.: t=1.\n\n\n p_root = motion_stitch[frame].getJointPositionGlobal(0)\n R_root = motion_stitch[frame].getJointOrientationGlobal(0)\n\n motion_seg_orig.goToFrame(frame)\n motion_seg.goToFrame(frame)\n motion_stitch.goToFrame(frame)\n\n motion_debug1.append(motion_stitch[frame].copy())\n motion_debug1.goToFrame(frame)\n motion_debug2.append(motion_stitch[frame].copy())\n motion_debug2.goToFrame(frame)\n motion_debug3.append(motion_stitch[frame].copy())\n motion_debug3.goToFrame(frame)\n\n # paper implementation\n M_tc.append(motion_stitch[prev_frame])\n M_tc.goToFrame(frame)\n P_hat.append(M_tc[frame].copy())\n P_hat.goToFrame(frame)\n\n # p_temp = ym.JointPosture(skeleton)\n # p_temp.rootPos = controlModel.getJointPositionGlobal(0)\n # p_temp.setJointOrientationsLocal(controlModel.getJointOrientationsLocal())\n # P.append(p_temp)\n # P.goToFrame(frame)\n\n '''\n # Jacobian Transpose Balance Control\n balanceKp = 100.\n balanceKd = 100.\n balanceDiff = dartMotionModel.getCOM() - dartModel.getCOM()\n balanceDiff[1] = 0.\n balanceVelDiff = -dartModel.skeleton.com_velocity()\n balanceVelDiff[1] = 0.\n balanceTorque = np.dot(dartModel.getBody('RightFoot').world_jacobian()[3:6].T,\n balanceKp*balanceDiff + balanceKd*balanceVelDiff)\n balanceTorque[:6] = np.array([0.]*6)\n '''\n\n '''\n # stance foot stabilize\n motion_stf_stabilize.append(motion_stitch[frame].copy())\n motion_stf_stabilize.goToFrame(frame)\n if STANCE_FOOT_STABILIZE:\n for stanceFoot in stanceFoots:\n R_target_foot = motion_seg[frame].getJointOrientationGlobal(stanceFoot)\n R_current_foot = motion_stf_stabilize[frame].getJointOrientationGlobal(stanceFoot)\n motion_stf_stabilize[frame].setJointOrientationGlobal(stanceFoot, mm.slerp(R_current_foot, R_target_foot , stf_stabilize_func(t)))\n # motion_stf_stabilize[frame].setJointOrientationGlobal(stanceFoot, cm.slerp(R_current_foot, R_target_foot , stf_stabilize_func(t)))\n # R_target_foot = motion_seg[frame].getJointOrientationLocal(stanceFoot)\n # R_current_foot = motion_stf_stabilize[frame].getJointOrientationLocal(stanceFoot)\n # motion_stf_stabilize[frame].setJointOrientationLocal(stanceFoot, cm.slerp(R_current_foot, R_target_foot , stf_stabilize_func(t)))\n #'''\n\n # match stance leg\n # motion_match_stl.append(motion_stf_stabilize[frame].copy())\n motion_match_stl.append(motion_stitch[frame].copy())\n motion_match_stl.goToFrame(frame)\n if MATCH_STANCE_LEG:\n # hbf.match_stance_leg(t, dartModel, motion_match_stl, frame, curState, stanceLegs)\n if curState!=yba.GaitState.STOP:\n for stanceLegIdx in range(len(stanceLegs)):\n stanceLeg = stanceLegs[stanceLegIdx]\n # stanceFoot = stanceFoots[stanceLegIdx]\n\n # motion stance leg -> character stance leg as time goes\n R_motion = motion_match_stl[frame].getJointOrientationGlobal(stanceLeg)\n R_character = dartModel.getJointOrientationGlobal(stanceLeg)\n motion_match_stl[frame].setJointOrientationGlobal(stanceLeg, mm.slerp(R_motion, R_character, match_stl_func(t)))\n\n\n # swing foot placement\n # TODO:\n # in segment foot case, hip has noise slitly\n motion_swf_placement.append(motion_match_stl[frame].copy())\n motion_swf_placement.goToFrame(frame)\n if SWING_FOOT_PLACEMENT:\n t_swing_foot_placement = swf_placement_func(t)\n\n if extended[0]:\n R_swp_sag = prev_R_swp[0][0]\n R_swp_cor = prev_R_swp[0][1]\n else:\n clampAngle = math.pi/6.\n R_swp_sag = mm.I_SO3(); R_swp_cor = mm.I_SO3()\n # R_swp_sag = np.dot(R_swp_sag, mm.exp(diff_dCM_sag_axis * K_swp_vel_sag * -t_swing_foot_placement))\n # R_swp_cor = np.dot(R_swp_cor, mm.exp(diff_dCM_cor_axis * K_swp_vel_cor * -t_swing_foot_placement))\n # if np.dot(direction, diff_CMr_sag) < 0:\n # R_swp_sag = np.dot(R_swp_sag, mm.exp(diff_CMr_sag_axis * K_swp_pos_sag * -t_swing_foot_placement))\n # else:\n # R_swp_sag = np.dot(R_swp_sag, mm.exp(diff_CMr_sag_axis * K_swp_pos_sag_faster * -t_swing_foot_placement))\n # R_swp_cor = np.dot(R_swp_cor, mm.exp(diff_CMr_cor_axis * K_swp_pos_cor * -t_swing_foot_placement))\n R_swp_sag = np.dot(R_swp_sag, mm.clampExp(diff_dCM_sag_axis * K_swp_vel_sag * -t_swing_foot_placement, clampAngle))\n R_swp_cor = np.dot(R_swp_cor, mm.clampExp(diff_dCM_cor_axis * K_swp_vel_cor * -t_swing_foot_placement, clampAngle))\n if np.dot(direction, diff_CMr_sag) < 0:\n R_swp_sag = np.dot(R_swp_sag, mm.clampExp(diff_CMr_sag_axis * K_swp_pos_sag * -t_swing_foot_placement, clampAngle))\n else:\n R_swp_sag = np.dot(R_swp_sag, mm.clampExp(diff_CMr_sag_axis * K_swp_pos_sag_faster * -t_swing_foot_placement, clampAngle))\n R_swp_cor = np.dot(R_swp_cor, mm.clampExp(diff_CMr_cor_axis * K_swp_pos_cor * -t_swing_foot_placement, clampAngle))\n\n for i in range(len(swingLegs)):\n swingLeg = swingLegs[i]\n swingFoot = swingFoots[i]\n\n # save swing foot global orientation\n R_swf = motion_swf_placement[frame].getJointOrientationGlobal(swingFoot)\n\n # rotate swing leg\n motion_swf_placement[frame].mulJointOrientationGlobal(swingLeg, R_swp_sag)\n motion_swf_placement[frame].mulJointOrientationGlobal(swingLeg, R_swp_cor)\n\n # hwangpil\n # temporal code.... for heel strike and ankle pushup\n # motion_swf_placement[frame].mulJointOrientationGlobal(swingFoot, mm.exp([0., 0., -0.17*t_swing_foot_placement]))\n # motion_swf_placement[frame].mulJointOrientationGlobal(swingFoot, mm.exp([0.2*t_swing_foot_placement, 0., 0.]))\n\n # hwangpil\n # foot placement based on difference\n # CM = dartModel.getBody(\"Hips\").com()\n swf = dartModel.getJointPositionGlobal(swingFoot)\n CMr_swf = CM - swf\n\n # CM_tar = motion_seg.getJointPositionGlobal(0, prev_frame)\n swf_tar = motion_seg[frame].getJointPositionGlobal(swingFoot)\n CMr_swf_tar = CM_tar - swf_tar\n\n CMr_swf_proj = mm.projectionOnPlane(CMr_swf, mm.unitX(), mm.unitY())\n CMr_swf_tar_proj = mm.projectionOnPlane(CMr_swf_tar, mm.unitX(), mm.unitY())\n\n angle = mm.getAngleFromVectors(CMr_swf_proj, CMr_swf_tar_proj)\n\n motion_swf_placement[frame].mulJointOrientationGlobal(swingLeg, mm.exp(mm.unitZ(), -.2*angle))\n\n # diff_CMr_swf = mm.projectionOnPlane(CMr_swf-CMr_swf_tar, (1,0,0), (0,0,1))\n #\n # newPosition = motion_swf_placement[frame].getJointPositionGlobal(swingFoot)\n # # newPosition += (diff_CMr_swf + diff_dCM)*t_swing_foot_placement\n # newPosition += 0.1*diff_CMr_swf * t_swing_foot_placement\n # aik.ik_analytic(motion_swf_placement[frame], swingFoot, newPosition)\n\n # restore swing foot global orientation\n motion_swf_placement[frame].setJointOrientationGlobal(swingFoot, R_swf)\n\n prev_R_swp[0] = (R_swp_sag, R_swp_cor)\n\n # swing foot height\n # TODO:\n # in segment foot case, hip has noise largely\n toe_offset = 0.\n motion_swf_height.append(motion_swf_placement[frame].copy())\n motion_swf_height.goToFrame(frame)\n if SWING_FOOT_HEIGHT:\n for swingFoot in swingFoots:\n stanceFoot = stanceFoots[0]\n\n # save foot global orientation\n R_foot = motion_swf_height[frame].getJointOrientationGlobal(swingFoot)\n R_stance_foot = motion_swf_height[frame].getJointOrientationGlobal(stanceFoot)\n\n d_height_tar = 0\n if OLD_SWING_HEIGHT:\n height_tar = motion_swf_height[frame].getJointPositionGlobal(swingFoot)[1] \\\n - motion_swf_height[frame].getJointPositionGlobal(stanceFoot)[1]\n else:\n height_tar = motion_swf_height[prev_frame].getJointPositionGlobal(swingFoot)[1] - groundHeight\n d_height_tar = motion_swf_height.getJointVelocityGlobal(swingFoot, prev_frame)[1]\n\n # rotate\n motion_swf_height[frame].rotateByTarget(dartModel.getJointOrientationGlobal(0))\n\n d_height_cur = 0\n if OLD_SWING_HEIGHT:\n height_cur = motion_swf_height[frame].getJointPositionGlobal(swingFoot)[1] \\\n - motion_swf_height[frame].getJointPositionGlobal(stanceFoot)[1]\n else:\n height_cur = dartModel.getJointPositionGlobal(swingFoot)[1] - halfFootHeight - c_swf_offset\n # height_cur = dartModel.getJointPositionGlobal(swingFoot)[1] - halfFootHeight\n d_height_cur = dartModel.getJointVelocityGlobal(swingFoot)[1]\n\n if OLD_SWING_HEIGHT:\n offset_height = (height_tar - height_cur) * swf_height_func(t) * c5\n else:\n offset_height = ((height_tar - height_cur) * c5\n + (d_height_tar - d_height_cur) * c6) * swf_height_func(t)\n\n offset_sine = c_swf_mid_offset * swf_height_sine_func(t)\n\n offset = 0.\n offset += offset_height\n offset += offset_sine\n\n if offset > 0.:\n newPosition = motion_swf_height[frame].getJointPositionGlobal(swingFoot)\n newPosition[1] += offset\n aik.ik_analytic(motion_swf_height[frame], swingFoot, newPosition)\n else:\n if HIGHER_OFFSET:\n newPosition = motion_swf_height[frame].getJointPositionGlobal(stanceFoot)\n newPosition[1] -= offset\n aik.ik_analytic(motion_swf_height[frame], stanceFoot, newPosition)\n\n motion_swf_height[frame].rotateByTarget(R_root)\n\n # restore foot global orientation\n motion_swf_height[frame].setJointOrientationGlobal(swingFoot, R_foot)\n motion_swf_height[frame].setJointOrientationGlobal(stanceFoot, R_stance_foot)\n\n toe_offset = offset\n\n if plot is not None:\n plot.addDataPoint('debug1', frame, offset_height)\n # plot.addDataPoint('debug2', frame, height_cur)\n # plot.addDataPoint('diff', frame, diff)\n\n # stance foot push\n motion_stf_push.append(motion_swf_height[frame].copy())\n motion_stf_push.goToFrame(frame)\n if STANCE_FOOT_PUSH:\n # for swingFoot in swingFoots:\n for stanceFoot in stanceFoots:\n stf_push_func = yfg.concatenate([yfg.sine, yfg.zero], [c_taking_duration*2])\n\n R_swp_sag = mm.exp((step_length_tar[0] - step_length_cur[0])*step_axis[0] * K_stp_pos * -stf_push_func(t))\n\n motion_stf_push[frame].mulJointOrientationGlobal(stanceFoot, R_swp_sag)\n\n # '''\n # stance foot stabilize\n motion_stf_stabilize.append(motion_stf_push[frame].copy())\n motion_stf_stabilize.goToFrame(frame)\n if STANCE_FOOT_STABILIZE:\n for stanceFoot in stanceFoots:\n R_target_foot = motion_stf_push[frame].getJointOrientationGlobal(stanceFoot)\n R_current_foot = motion_stf_stabilize[frame].getJointOrientationGlobal(stanceFoot)\n motion_stf_stabilize[frame].setJointOrientationGlobal(stanceFoot,\n mm.slerp(R_current_foot, R_target_foot, stf_stabilize_func(t)))\n #'''\n\n # stance foot balancing\n # motion_stf_balancing.append(motion_stf_push[frame].copy())\n # TODO:\n # in segment foot case, stance foot unstable\n motion_stf_balancing.append(motion_stf_stabilize[frame].copy())\n motion_stf_balancing.goToFrame(frame)\n if STANCE_FOOT_BALANCING:\n R_stb = mm.exp(diff_dCM_axis * K_stb_vel * stf_balancing_func(t))\n R_stb = np.dot(R_stb, mm.exp(diff_CMr_axis * K_stb_pos * stf_balancing_func(t)))\n for stanceFoot in stanceFoots:\n if frame < 5: break\n if t > 0.5: break #hwangpil\n motion_stf_balancing[frame].mulJointOrientationGlobal(stanceFoot, R_stb)\n\n\n # hwangpil\n if SEGMENT_FOOT:\n if SWING_FOOT_CLEARANCE:\n print(t)\n if 0.5 < t < 0.8:\n for swingToe in swingToes:\n toeAngle = -math.pi/6.\n motion_stf_balancing[frame].mulJointOrientationGlobal(swingToe, mm.rotZ(toeAngle))\n elif t<0.2:\n for swingToe in swingToes:\n toeAngle = math.pi/6.\n motion_stf_balancing[frame].mulJointOrientationGlobal(swingToe, mm.rotZ(toeAngle))\n\n # hwangpil\n # stance foot parallelizing with ground when contact is made\n if 0.1 < t < 0.9:\n pos_toe = [dartModel.getJointPositionGlobal(stanceToe) for stanceToe in stanceToes]\n pos_heel = dartModel.getJointPositionGlobal(stanceHeels[0])\n up_vec = np.cross(pos_toe[1] - pos_heel, pos_toe[0] - pos_heel)\n R_foot_diff = mm.getSO3FromVectors(mm.unitY(), up_vec)\n # R_foot_diff = mm.getSO3FromVectors(up_vec, mm.unitY())\n R_foot = mm.slerp(mm.I_SO3(), R_foot_diff, 0.05)\n motion_stf_balancing[frame].mulJointOrientationGlobal(stanceFoots[0], R_foot)\n\n # hwangpil\n # swing foot height control\n if False:\n for swing_foot in swingFoots:\n new_position = motion_seg[frame].getJointPositionGlobal(swing_foot)\n aik.ik_analytic(motion_stf_balancing[frame], swing_foot, new_position)\n\n # hwangpil\n # hip adjustizing\n if True:\n # get hip orientation on coronal plane\n hip_ori_cur = dartModel.getJointOrientationGlobal(0)\n hip_ori_tar = motion_stf_balancing[frame].getJointOrientationGlobal(0)\n\n hip_ori_cur_x = np.dot(hip_ori_cur, mm.unitX())\n hip_ori_cur_y = np.dot(hip_ori_cur, mm.unitY())\n hip_ori_cur_z = np.dot(hip_ori_cur, mm.unitZ())\n hip_ori_cur_xy_2 = (hip_ori_cur_x + hip_ori_cur_y) * .5\n hip_ori_cur_yz_2 = (hip_ori_cur_y + hip_ori_cur_z) * .5\n hip_ori_cur_xz_2 = (hip_ori_cur_x + hip_ori_cur_z) * .5\n\n hip_ori_tar_x = np.dot(hip_ori_tar, mm.unitX())\n hip_ori_tar_y = np.dot(hip_ori_tar, mm.unitY())\n hip_ori_tar_z = np.dot(hip_ori_tar, mm.unitZ())\n hip_ori_tar_xy_2 = (hip_ori_tar_x + hip_ori_tar_y) * .5\n hip_ori_tar_yz_2 = (hip_ori_tar_y + hip_ori_tar_z) * .5\n hip_ori_tar_xz_2 = (hip_ori_tar_x + hip_ori_tar_z) * .5\n\n # hip_ori_cur_xy_2_projected = mm.projectionOnPlane(hip_ori_cur_xy_2, hip_ori_tar_x, hip_ori_tar_y)\n # hip_ori_cur_yz_2_projected = mm.projectionOnPlane(hip_ori_cur_yz_2, hip_ori_tar_y, hip_ori_tar_z)\n hip_ori_cur_xy_2_projected = mm.projectionOnPlane(hip_ori_cur_xy_2, mm.unitZ(), mm.unitY())\n hip_ori_cur_yz_2_projected = mm.projectionOnPlane(hip_ori_cur_yz_2, hip_ori_tar_y, hip_ori_tar_z)\n\n cor_angle = mm.getAngleFromVectors(hip_ori_cur_xy_2_projected, hip_ori_tar_xy_2)\n sag_angle = mm.getAngleFromVectors(hip_ori_cur_yz_2_projected, hip_ori_tar_yz_2)\n\n for stance_leg in stanceLegs:\n if stance_leg == motion_ori[0].skeleton.getJointIndex('LeftUpLeg'):\n motion_stf_balancing[frame].mulJointOrientationGlobal(stance_leg, mm.exp(hip_ori_tar_z, 1.*cor_angle))\n # motion_stf_balancing[frame].mulJointOrientationGlobal(stance_leg, mm.exp(hip_ori_tar_z, 1.5*cor_angle))\n else:\n motion_stf_balancing[frame].mulJointOrientationGlobal(stance_leg, mm.exp(hip_ori_tar_z, -1.*cor_angle))\n # motion_stf_balancing[frame].mulJointOrientationGlobal(stance_leg, mm.exp(hip_ori_tar_z, -1.5*cor_angle))\n # motion_stf_balancing[frame].mulJointOrientationGlobal(stance_leg, mm.exp(hip_ori_tar_x, sag_angle))\n\n for swing_leg in swingLegs:\n if swing_leg == motion_ori[0].skeleton.getJointIndex('LeftUpLeg'):\n motion_stf_balancing[frame].mulJointOrientationGlobal(swing_leg, mm.exp(hip_ori_tar_z, 1.*cor_angle))\n else:\n motion_stf_balancing[frame].mulJointOrientationGlobal(swing_leg, mm.exp(hip_ori_tar_z, -1.*cor_angle))\n\n # ankle push\n if False:\n for swing_foot in swingFoots:\n if t < 0.2:\n if swing_foot == motion_ori[0].skeleton.getJointIndex('LeftFoot'):\n motion_stf_balancing[frame].mulJointOrientationGlobal(swing_foot, mm.rotZ((1.-t/.2) * math.pi/6.))\n # motion_stf_balancing[frame].mulJointOrientationGlobal(swing_foot, mm.rotZ(math.pi/2.))\n\n # hwangpil\n # ankle push\n if False:\n for swing_foot in swingFoots:\n if t < 0.2:\n motion_stf_balancing[frame].mulJointOrientationGlobal(swing_foot, mm.rotZ((1.-t/.2) * math.pi/6.))\n # motion_stf_balancing[frame].mulJointOrientationGlobal(swing_foot, mm.rotZ(math.pi/2.))\n\n # hwangpil\n # stance foot tilting\n if True:\n for stance_foot in stanceFoots:\n if t > 0.5:\n R_stf_cur = dartModel.getJointOrientationGlobal(stance_foot)\n R_stf_tar = motion_stf_balancing[frame].getJointOrientationGlobal(stance_foot)\n diff_stf = mm.logSO3(np.dot(R_stf_tar, R_stf_cur.T))\n print('diff_stf: ', diff_stf)\n diff_stf[0] = 0.\n diff_stf[1] = 0.\n R_diff_stf = mm.exp(diff_stf)\n # motion_stf_balancing[frame].mulJointOrientationGlobal(stance_foot, R_diff_stf)\n\n\n\n # hwangpil\n # swing foot parallelizing with ground\n def swf_par_func(_x):\n if _x<.5:\n return -.5*math.pow(1.-2.*_x, 1./3.) + .5\n else:\n return .5*math.pow(2.*_x-1., 1./3.) + .5\n\n if False:\n for swingFoot in swingFoots:\n swingBody = dartModel.getBody(swingFoot)\n for shapeNode in swingBody.shapenodes:\n if shapeNode.has_collision_aspect():\n geomType = shapeNode.shape.shape_type_name()\n geomT = np.dot(swingBody.world_transform(), shapeNode.relative_transform())\n if geomType == \"BOX\":\n shape = shapeNode.shape # type: pydart.BoxShape\n data = shape.size() * .5\n footVec = np.dot(geomT[:3, :3], np.array((0., 1., 0.)))\n R_swf_current = np.eye(3)\n R_swf_par = mm.getSO3FromVectors(footVec, np.array((0., 1., 0.)))\n motion_stf_balancing[frame].mulJointOrientationGlobal(swingFoot,\n mm.slerp(R_swf_current, R_swf_par, swf_par_func(t)))\n\n '''\n # swing foot heel strike adjustment\n # make heel as flat as possible to ground\n swf_heel_func = yfg.hermite2nd\n for swingHeel in swingHeels:\n joint_vec_cur = np.dot(dartModel.getJointOrientationGlobal(swingHeel), np.array((0., 0., 1.)))\n joint_vec_tar = copy.deepcopy(joint_vec_cur)\n joint_vec_tar[1] = 0.\n R_target_heel = mm.exp(swf_heel_func(t)*mm.logSO3(mm.getSO3FromVectors(joint_vec_cur, joint_vec_tar)))\n motion_stf_balancing[frame].mulJointOrientationGlobal(swingHeel, R_target_heel)\n # stance foot ankle pushup adjustment\n # stf_ankle_func = yfg.hermite2nd\n stf_ankle_func = lambda x: -2*(x**2)+3*(x**3)\n if len(stanceFoots) == 1:\n for stanceFoot in stanceFoots:\n R_target_ankle = mm.exp(stf_ankle_func(t)*mm.deg2Rad(30.)*np.array([1., 0., 0.]))\n motion_stf_balancing[frame].mulJointOrientationLocal(stanceFoot, R_target_ankle)\n # stance foot toe adjustment\n # stf_toe_func = yfg.hermite2nd\n stf_toe_func = lambda x: -2*(x**8)+3*(x**9)\n if len(stanceFoots) == 1:\n for stanceToe in stanceToes:\n # joint_vec_cur = np.dot(controlModel.getJointOrientationGlobal(stanceToe), np.array((0., 0., 1.)))\n ## joint_vec_cur = np.dot(motion_stf_balancing[frame].getJointOrientationGlobal(stanceToe), np.array((0., 0., 1.)))\n # joint_vec_tar = copy.deepcopy(joint_vec_cur)\n # joint_vec_tar[1] = 0.\n ## R_target_toe = mm.exp(stf_toe_func(t)*mm.logSO3(mm.getSO3FromVectors(joint_vec_cur, joint_vec_tar)))\n # R_target_toe = mm.getSO3FromVectors(joint_vec_cur, joint_vec_tar)\n # motion_stf_balancing[frame].mulJointOrientationGlobal(stanceToe, R_target_toe)\n R_target_toe = mm.exp(stf_toe_func(t)*mm.deg2Rad(-30.)*np.array([1., 0., 0.]))\n motion_stf_balancing[frame].mulJointOrientationLocal(stanceToe, R_target_toe)\n #'''\n\n # foot adjustment\n if SEGMENT_FOOT and False:\n # hfi.footAdjust(motion_stf_balancing[frame], footIdDic, SEGMENT_FOOT_MAG, SEGMENT_FOOT_RAD, .03)\n hfi.footAdjust(motion_stf_balancing[frame], footIdDic, SEGMENT_FOOT_MAG, SEGMENT_FOOT_RAD, toe_offset)\n\n\n # control trajectory\n # motion_control.append(motion_stitch[frame].copy())\n # motion_control.append(motion_swf_height[frame].copy())\n # motion_control.append(motion_match_stl[frame].copy())\n motion_control.append(motion_stf_balancing[frame].copy())\n motion_control.goToFrame(frame)\n\n #=======================================================================\n # tracking with inverse dynamics\n #=======================================================================\n\n weightMap = [1.] * (skeleton.getJointNum())\n\n if False:\n toeWeights = 0.001\n\n for jointIdx in lIDs:\n weightMap[jointIdx] = toeWeights\n\n for jointIdx in rIDs:\n weightMap[jointIdx] = toeWeights\n\n th_r = motion_control.getDOFPositions(frame)\n # th_r = motion_stitch.getDOFPositions(frame)\n # th_r = motion_ori.getDOFPositions(frame)\n th = dartModel.skeleton.q\n\n dth_r = motion_control.getDOFVelocities(frame)\n # dth_r = motion_ori.getDOFVelocities(frame)\n dth = dartModel.skeleton.dq\n\n ddth_r = motion_control.getDOFAccelerations(frame)\n # ddth_r = motion_ori.getDOFAccelerations(frame)\n # ddth_des = yct.getDesiredDOFAccelerations(th_r, th, dth_r, dth, ddth_r, Kt, Dt, weightMap)\n\n totalDOF = dartModel.getTotalDOF()\n ddth_des_flat = ype.makeFlatList(totalDOF)\n dth_r_flat = ype.makeFlatList(totalDOF)\n # ype.flatten(ddth_des, ddth_des_flat)\n # ype.flatten(dth_r, dth_r_flat)\n\n # print dartModel.skeleton.q[:6]\n # print dartModel.getBody(0).com(), dartModel.skeleton.joint(0).position_in_world_frame(), dartModel.skeleton.q[:6]\n\n\n del rd_frame2[:]\n rd_frame2.append(dartModel.getBody(0).world_transform())\n\n #=======================================================================\n # simulation\n #=======================================================================\n CP = mm.v3(0.,0.,0.)\n F = mm.v3(0.,0.,0.)\n avg_dCM[0] = mm.v3(0.,0.,0.)\n\n # external force rendering info\n if not isCma:\n del rd_forces[:]; del rd_force_points[:]\n for fi in forceInfos:\n if fi.startFrame <= frame and frame < fi.startFrame + fi.duration*(1/frameTime):\n rd_forces.append(fi.force)\n # rd_force_points.append(controlModel.getBodyPositionGlobal(fi.targetBody))\n rd_force_points.append(dartModel.getBodyPositionGlobal(fi.targetBody))\n\n contactPositions = None\n # dartModel.update(motion_ori[frame])\n pdController.setTartgetPose(th_r)\n\n # bodyIDs = [body.index_in_skeleton for body in dartModel.world.collision_result.contacted_bodies]\n\n if not isCma and not SEGMENT_GAIN_ADJUST:\n # change foot Kd and Kp\n if SEGMENT_FOOT:\n for dofs in footDofs:\n # pdController.setKpKd(dofs, 500., 20.)\n pdController.setKpKd(dofs, 50., 5.)\n\n for dofs in LeftFootDofs:\n pdController.setKpKd(dofs, getParamVal('LeftFootKp'), getParamVal('LeftFootKd'))\n\n for dofs in RightFootDofs:\n pdController.setKpKd(dofs, getParamVal('RightFootKp'), getParamVal('RightFootKd'))\n\n elif not isCma and SEGMENT_GAIN_ADJUST:\n # change foot Kd and Kp\n if SEGMENT_FOOT:\n if stanceFoots[0] == rID and t>0.2:\n for dof in lIDs:\n pdController.setKpKd(dof, 50., 5.)\n for dof in rIDs:\n pdController.setKpKd(dof, 500., 20.)\n elif stanceFoots[0] == lID and t > 0.2:\n for dof in rIDs:\n pdController.setKpKd(dof, 50., 5.)\n for dof in lIDs:\n pdController.setKpKd(dof, 500., 20.)\n else:\n for dof in footDofs:\n pdController.setKpKd(dof, 50., 5.)\n\n for dofs in LeftFootDofs:\n pdController.setKpKd(dofs, getParamVal('LeftFootKp'), getParamVal('LeftFootKd'))\n\n for dofs in RightFootDofs:\n pdController.setKpKd(dofs, getParamVal('RightFootKp'), getParamVal('RightFootKd'))\n\n elif True:\n # change foot Kd and Kp\n for dofs in LeftFootDofs:\n pdController.setKpKd(dofs, 300., 30.)\n\n for dofs in RightFootDofs:\n pdController.setKpKd(dofs, 300., 30.)\n\n for dofs in footDofs:\n pdController.setKpKd(dofs, 500., 20.)\n\n else:\n # change foot Kd and Kp\n for dofs in LeftFootDofs:\n pdController.setKpKd(dofs, 80., 10.)\n\n for dofs in RightFootDofs:\n pdController.setKpKd(dofs, 80., 10.)\n\n simulContactForces = np.zeros(3)\n cForcesControl = []\n cPointsControl = []\n\n if frame > 40:\n for i in range(stepsPerFrame):\n # bodyIDs, contactPositions, contactPositionLocals, contactForces = dartModel.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)\n bodyIDs = dartModel.skeleton.self_collision_check()\n\n # _tau = np.zeros(dartModel.skeleton.q.shape)\n _tau = pdController.compute()\n bodyIDs, contactPositions, contactPositionLocals, contactForces, timeStamp = \\\n hdls.calcLCPForces(motion_ori, dartModel.world, dartModel, bodyIDsToCheck, 1., _tau)\n # dartModel.applyPenaltyForce(bodyIDs, contactPositions, contactForces, localForce=False)\n # print('penalty force sum: ', sum(contactForce for contactForce in contactForces))\n\n _ddq = pdController.compute()\n controlTau = None\n if False and SEGMENT_FOOT:\n _ddq = pdController.compute()\n _ddq0 = _ddq[specifiedDofIdx]\n temp1, cPointsControl, temp3, cForcesControl, controlTau = hdls.calcLCPbasicControl(\n motion_ori, dartModel.world, dartModel, bodyIDsToCheck, mu, np.array([0., 300., 0.]), [1., 1., 1.],\n tau0=_ddq, variableDofIdx=footDofs)\n if not isCma:\n print('controlTau: ', controlTau)\n # dartModel.skeleton.set_accelerations(_ddq)\n\n dartModel.skeleton.set_forces(pdController.compute())\n # dartModel.skeleton.set_forces(pdController.compute()+balanceTorque)\n dartModel.step()\n sumForce = sum([(-contact.force if contact.bodynode1.name == 'ground' else contact.force)\n for contact in dartModel.world.collision_result.contacts])\n simulContactForces += sumForce\n '''\n if False and i % 5 == 0:\n # bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)\n bodyIDs, contactPositions, contactPositionLocals, contactForces, timeStamp \\\n = hls.calcLCPForcesHD(motion_ori, vpWorld, dartModel, bodyIDsToCheck, 1., ddth_des_flat, ddth_des_flat, solver='qp', hdAccMask=hdAccMask)\n\n if contactForces is not None:\n lContactNum = sum([sum([j==i for j in bodyIDs]) for i in lIDs])\n rContactNum = sum([sum([j==i for j in bodyIDs]) for i in rIDs])\n if 1 <= lContactNum <= 2:\n lbodyIDbs = [any([j==i for i in lIDs])for j in bodyIDs]\n lbodyIDs = [i for i, x in enumerate(lbodyIDbs) if x]\n for i in reversed(lbodyIDs):\n bodyIDs.pop(i)\n contactPositions.pop(i)\n contactPositionLocals.pop(i)\n contactForces.pop(i)\n\n if 1 <= rContactNum <= 2:\n rbodyIDbs = [any([j==i for i in rIDs])for j in bodyIDs]\n rbodyIDs = [i for i, x in enumerate(rbodyIDbs) if x]\n for i in reversed(rbodyIDs):\n bodyIDs.pop(i)\n contactPositions.pop(i)\n contactPositionLocals.pop(i)\n contactForces.pop(i)\n\n if contactForces is not None:\n vpWorld.applyPenaltyForce(bodyIDs, contactPositionLocals, contactForces)\n\n # print contactForces\n\n # apply external force\n for fi in forceInfos:\n if fi.startFrame <= frame and frame < fi.startFrame + fi.duration*(1/frameTime):\n # controlModel.applyBodyForceGlobal(fi.targetBody, fi.force)\n dartModel.getBody(fi.targetBody).add_ext_force(fi.force)\n\n # for i in rIDs+lIDs:\n # controlModel.setJointTorqueLocal(i, ddth_des[i])\n # controlModel.setDOFAccelerations(ddth_des)\n # controlModel.solveHybridDynamics()\n\n # if TORQUE_PLOT:\n # rhip_torques[frame] += mm.length(controlModel.getJointTorqueLocal(rUpLeg))\n # rknee_torques[frame] += mm.length(controlModel.getJointTorqueLocal(rKnee))\n # rankle_torques[frame] += mm.length(controlModel.getJointTorqueLocal(rFoot))\n\n # rd_torques[:] = [controlModel.getJointTorqueLocal(j)/100. for j in range(1, skeleton.getJointNum())]\n # rd_joint_positions[:] = controlModel.getJointPositionsGlobal()\n\n # vpWorld.step()\n # yvu.align2D(controlModel)\n '''\n\n\n '''\n if contactForces is not None and len(contactForces) > 0:\n CP += yrp.getCP(contactPositions, contactForces)\n F += sum(contactForces)\n '''\n avg_dCM[0] += dartModel.getJointVelocityGlobal(0)\n # avg_dCM[0] += yrp.getCM(controlModel.getJointVelocitiesGlobal(), bodyMasses, upperMass, uppers)\n # avg_dCM[0] += yrp.getCM(controlModel.getJointVelocitiesGlobal(), bodyMasses, totalMass)\n\n # if len(stanceFoots)>0:\n # avg_stf_v[0] += controlModel.getJointVelocityGlobal(stanceFoots[0])\n # avg_stf_av[0] += controlModel.getJointAngVelocityGlobal(stanceFoots[0])\n\n\n bodyIDs, contactPositions, contactPositionLocals, velocities = dartModel.getContactPoints(bodyIDsToCheck)\n\n contactPoints = [contact.point for contact in dartModel.world.collision_result.contacts]\n contactForces = [(-contact.force if contact.bodynode1.name == 'ground' else contact.force)\n for contact in dartModel.world.collision_result.contacts]\n\n sumForce = sum(contactForces)\n\n if not isCma and viewer.cForceWnd is not None:\n # graph calculated force\n viewer.cForceWnd.insertData('realForce', frame, simulContactForces[1]/stepsPerFrame)\n\n if not isCma:\n del rd_cForces[:]\n del rd_cPositions[:]\n for idx in range(len(contactPoints)):\n rd_cForces.append(contactForces[idx] / 50.)\n rd_cPositions.append(contactPoints[idx])\n\n del rd_cForcesControl[:]\n del rd_cPositionsControl[:]\n for idx in range(len(cForcesControl)):\n rd_cForces.append(cForcesControl[idx] / 50.)\n rd_cPositions.append(cPointsControl[idx])\n\n # bodyIDs = [body.index_in_skeleton() for body in contacted_bodies]\n # contacted_bodies = dartModel.world.collision_result.contacted_bodies # type: list[pydart.BodyNode]\n # bodyIDs = []\n # for body in contacted_bodies:\n # ground_skeleton = body.skeleton # type: pydart.Skeleton\n # if ground_skeleton.name == \"grount skeleton\":\n # print(\"hehe\")\n\n if not isCma:\n del rd_point2[:]\n if contactPositions is not None:\n rd_point2.extend(contactPositions)\n\n if not isCma:\n del rd_point1[:]\n rd_point1.append(dartModel.getCOM())\n\n if not isCma:\n del rd_point2[:]\n rd_point2.append(dartMotionModel.getCOM())\n\n\n CP /= stepsPerFrame\n F /= stepsPerFrame\n avg_dCM[0] /= stepsPerFrame\n\n # if len(stanceFoots)>0:\n # avg_stf_v[0] /= stepsPerFrame\n # avg_stf_av[0] /= stepsPerFrame\n # rd_vec1[0] = avg_stf_av[0]; rd_vec1[0][0] = 0.; rd_vec1[0][2] = 0.\n # rd_vecori1[0]= controlModel.getJointPositionGlobal(stanceFoots[0])\n\n #=======================================================================\n # segment editing\n #=======================================================================\n lastFrame = False\n\n # print curState\n # print bodyIDs\n\n if SEGMENT_EDITING:\n if curState==yba.GaitState.STOP:\n if frame == len(motion_seg)-1:\n lastFrame = True\n\n elif (curState==yba.GaitState.LSWING or curState==yba.GaitState.RSWING) and t > c_min_contact_time:\n contact = False\n\n if not SEGMENT_FOOT:\n # original box foot\n swingID = lID if curState==yba.GaitState.LSWING else rID\n\n if swingID in bodyIDs:\n minContactVel = 1000.\n for i in range(len(bodyIDs)):\n if bodyIDs[i]==swingID:\n vel = dartModel.getBodyVelocityGlobal(swingID, contactPositionLocals[i])\n vel[1] = 0\n contactVel = mm.length(vel)\n if contactVel < minContactVel: minContactVel = contactVel\n if minContactVel < c_min_contact_vel: contact = True\n\n else:\n # segmented foot\n swingIDs = copy.deepcopy(lIDs) if curState==yba.GaitState.LSWING else copy.deepcopy(rIDs)\n\n contact = False\n contact_count = 0\n\n for swingID in swingIDs:\n if swingID in bodyIDs:\n minContactVel = 1000.\n for idx in range(len(bodyIDs)):\n if bodyIDs[idx] == swingID:\n vel = dartModel.getBodyVelocityGlobal(swingID, contactPositionLocals[idx])\n vel[1] = 0\n contactVel = mm.length(vel)\n contact_count += 1\n if contactVel < minContactVel:\n minContactVel = contactVel\n if minContactVel < c_min_contact_vel and contact_count > 2:\n contact = True\n elif minContactVel < c_min_contact_vel and contact_count > 1 and prev_contact_count[0] > 1 :\n contact = True\n\n prev_contact_count[0] = contact_count\n\n extended[0] = False\n\n if contact:\n if not isCma:\n print(frame, 'foot touch')\n lastFrame = True\n acc_offset[0] += frame - cur_interval[1]\n\n elif frame == len(motion_seg)-1:\n if not isCma:\n print(frame, 'extend frame', frame+1)\n\n preserveJoints = []\n # preserveJoints = [lFoot, rFoot]\n # preserveJoints = [lFoot, rFoot, lKnee, rKnee]\n # preserveJoints = [lFoot, rFoot, lKnee, rKnee, lUpLeg, rUpLeg]\n stanceKnees = [rKnee] if curState==yba.GaitState.LSWING else [lKnee]\n preserveJoints = [stanceFoots[0], stanceKnees[0], stanceLegs[0]]\n\n diff = 3\n motion_seg_orig.extend([motion_seg_orig[-1]])\n motion_seg.extend(ymt.extendByIntegration_root(motion_seg, 1, diff))\n\n motion_stitch.extend(ymt.extendByIntegration_constant(motion_stitch, 1, preserveJoints, diff))\n\n extended[0] = True\n else:\n if frame == len(motion_seg)-1: lastFrame = True\n\n if lastFrame:\n if segIndex < len(segments)-1:\n if not isCma:\n print('%d (%d): end of %dth seg (%s, %s)'%(frame, frame-cur_interval[1],segIndex, yba.GaitState.text[curState], cur_interval))\n if plot is not None:\n plot.addDataPoint('diff', frame, (frame-cur_interval[1])*.01)\n\n if len(stanceFoots)>0 and len(swingFoots)>0:\n step_cur = dartModel.getJointPositionGlobal(0) - dartModel.getJointPositionGlobal(stanceFoots[0])\n step_tar = motion_seg[cur_interval[1]].getJointPositionGlobal(0) - motion_seg[cur_interval[1]].getJointPositionGlobal(stanceFoots[0])\n\n step_cur = mm.projectionOnPlane(step_cur, (1,0,0), (0,0,1))\n step_tar = mm.projectionOnPlane(step_tar, (1,0,0), (0,0,1))\n\n step_cur_sag, step_cur_cor = mm.projectionOnVector2(step_cur, direction)\n step_tar_sag, step_tar_cor = mm.projectionOnVector2(step_tar, direction)\n\n step_length_tar[0] = mm.length(step_tar_sag)\n if np.inner(step_tar_sag, step_cur_sag) > 0:\n step_length_cur[0] = mm.length(step_cur_sag)\n else:\n step_length_cur[0] = -mm.length(step_cur_sag)\n\n step_axis[0] = directionAxis\n\n seg_index[0] += 1\n curSeg = segments[seg_index[0]]\n stl_y_limit_num[0] = 0\n stl_xz_limit_num[0] = 0\n\n del motion_seg_orig[frame+1:]\n motion_seg_orig.extend(ymb.getAttachedNextMotion(curSeg, motion_seg_orig[-1], False, False))\n\n del motion_seg[frame+1:]\n del motion_stitch[frame+1:]\n transitionLength = len(curSeg)-1\n\n d = motion_seg[-1] - curSeg[0]\n d.rootPos[1] = 0.\n motion_seg.extend(ymb.getAttachedNextMotion(curSeg, d, True, False))\n\n if NO_FOOT_SLIDING:\n if segIndex == len(segments)-2:\n Rl = motion_control[-1].getJointOrientationLocal(lUpLeg)\n Rr = motion_control[-1].getJointOrientationLocal(rUpLeg)\n Rlk = motion_control[-1].getJointOrientationLocal(lKnee)\n Rrk = motion_control[-1].getJointOrientationLocal(rKnee)\n Rlf = motion_control[-1].getJointOrientationLocal(lFoot)\n Rrf = motion_control[-1].getJointOrientationLocal(rFoot)\n for p in curSeg:\n p.setJointOrientationLocal(lUpLeg, Rl, False)\n p.setJointOrientationLocal(rUpLeg, Rr, False)\n p.setJointOrientationLocal(lKnee, Rlk, False)\n p.setJointOrientationLocal(rKnee, Rrk, False)\n p.setJointOrientationLocal(lFoot, Rlf, False)\n p.setJointOrientationLocal(rFoot, Rrf, False)\n p.updateGlobalT()\n\n d = motion_control[-1] - curSeg[0]\n d.rootPos[1] = 0.\n motion_stitch.extend(ymb.getStitchedNextMotion(curSeg, d, transitionLength, stitch_func, True, False))\n\n else:\n motion_seg_orig.append(motion_seg_orig[-1])\n motion_seg.append(motion_seg[-1])\n motion_stitch.append(motion_control[-1])\n\n\n # rendering\n # motionModel.update(motion_ori[frame])\n if not isCma:\n # dartMotionModel.update(motion_stitch[frame])\n # dartMotionModel.update(motion_stf_balancing[frame])\n dartMotionModel.update(motion_seg[frame])\n # dartMotionModel.update(motion_ori[frame])\n # motionModel.update(motion_seg[frame])\n\n rd_CP[0] = CP\n # rd_CMP[0] = (CMreal[0] - (F[0]/F[1])*CMreal[1], 0, CMreal[2] - (F[2]/F[1])*CMreal[1])\n\n if plot is not None:\n plot.addDataPoint('zero', frame, 0)\n plot.updatePoints()\n\n if not isCma:\n viewer.setSimulateCallback(simulateCallback)\n\n if MULTI_VIEWER:\n viewer.startTimer(frameTime / 1.4)\n else:\n viewer.startTimer(frameTime * .1)\n viewer.show()\n\n Fl.run()\n else:\n objectiveSum = 0\n successSum = 0\n comSum = 0\n velSum = 0\n dirSum = 0\n\n for i in range(MAX_FRAME):\n simulateCallback(i)\n\n _com = dartModel.getCOM()\n\n if i > 50:\n successSum -= 1\n\n comSum += _com[2] * _com[2]\n\n _com_vel = dartModel.skeleton.com_velocity()\n _com_vel[1] = 0.\n velSum += (np.linalg.norm(_com_vel) - 0.7)*(np.linalg.norm(_com_vel)-0.7)\n\n dirDiff = mm.normalize(_com_vel) - np.array((-1., 0., 0.))\n dirSum += np.dot(dirDiff, dirDiff)\n\n if _com[1] < 0.65 or _com[1] > 1.0:\n break\n if i % 50 == 0 and (np.isnan(velSum) or np.isnan(dirSum)):\n break\n\n # objectiveSum = successSum + .3*comSum + velSum\n objectiveSum = successSum + velSum + .3*dirSum\n # print(objectiveSum, successSum, velSum, .3*dirSum, params)\n del motion_stitch[:]\n del motion_debug1[:]\n del motion_debug2[:]\n del motion_debug3[:]\n del motion_control[:]\n del motion_stf_balancing[:]\n del motion_match_stl[:]\n del motion_ori[:]\n del motion_seg[:]\n del motion_seg_orig[:]\n del motion_stf_push[:]\n del motion_stf_stabilize[:]\n del motion_swf_height[:]\n del motion_swf_placement[:]\n del motion_swf_orientation[:]\n return float(objectiveSum), float(successSum), float(velSum), float(.3*dirSum)\n # return float(objectiveSum)\n\n\nif __name__ == '__main__':\n # c_min_contact_vel = 100.\n # c_min_contact_time = .7\n # c_landing_duration = .2\n # c_taking_duration = .3\n # c_swf_mid_offset = .0\n # c_locking_vel = .05\n # c_swf_offset = .01\n\n # K_stp_pos = 0.\n # c5 = .7\n # c6 = .02\n # K_stb_vel = .1\n # K_stb_pos = .1\n # K_swp_vel_sag = .0\n # K_swp_vel_cor = 1.3\n # K_swp_pos_sag = 1.2\n # K_swp_pos_cor = 1.\n # K_swp_pos_sag_faster = .05\n\n # viewer.objectInfoWnd.add1DSlider(\"c_min_contact_vel\", 0., 200., .2, 100.)\n # viewer.objectInfoWnd.add1DSlider(\"c_min_contact_time\", 0., 5., .01, .7)\n # viewer.objectInfoWnd.add1DSlider(\"c_landing_duration\", 0., 5., .01, .2)\n # viewer.objectInfoWnd.add1DSlider(\"c_taking_duration\", 0., 5., .01, .3)\n # viewer.objectInfoWnd.add1DSlider(\"c_swf_mid_offset\", -1., 1., .001, 0.)\n # viewer.objectInfoWnd.add1DSlider(\"c_locking_vel\", 0., 1., .001, .05)\n # viewer.objectInfoWnd.add1DSlider(\"c_swf_offset\", -1., 1., .001, .01)\n\n # viewer.objectInfoWnd.add1DSlider(\"K_stp_pos\", 0., 1., .01, 0.)\n # viewer.objectInfoWnd.add1DSlider(\"c5\", 0., 5., .01, .7)\n # viewer.objectInfoWnd.add1DSlider(\"c6\", 0., 1., .01, .02)\n # viewer.objectInfoWnd.add1DSlider(\"K_stb_vel\", 0., 1., .01, .1)\n # viewer.objectInfoWnd.add1DSlider(\"K_stb_pos\", 0., 1., .01, .1)\n # viewer.objectInfoWnd.add1DSlider(\"K_swp_vel_sag\", 0., 5., .01, 0.)\n # viewer.objectInfoWnd.add1DSlider(\"K_swp_vel_cor\", 0., 5., .01, 1.3)\n # viewer.objectInfoWnd.add1DSlider(\"K_swp_pos_sag\", 0., 5., .01, 1.2)\n # viewer.objectInfoWnd.add1DSlider(\"K_swp_pos_cor\", 0., 5., .01, 1.)\n # viewer.objectInfoWnd.add1DSlider(\"K_swp_pos_sag_faster\",0., 1., .01, .05)\n\n\n # walkings(None, False)\n\n\n # hand tuning\n # params = [0., .7, .02, .1, .1, .0, 1.3, 1.2, 1., .05]\n # 325 frames success, Ks = 600.\n params = [ 0.01918975, 0.86622863, 0.15111008, 0.50972221, 0.09746768, -0.09129272, 1.12736657, 1.2873114 , 0.84409227, 0.38928674]\n\n # 347 frames success, Ks = 600. ????????\n # params = [-0.0096717475861028673, 0.51455174209881782, 0.1414213562373095, 0.31622776601683794, 0.19555994814530026, 0.0, 1.1401754250991381, 1.457290633087426, 0.78654212710618387, 0.61027611069961429]\n\n # 287 frames success, Ks = 1000.\n # params = [-0.15744347, 0.67592998, 0.14142136, 0.31622777, 0.35696289, 0., 1.14017543, 1.27637941, 0.95735647, 0.23835687]\n\n\n\n # 400 frames success, box foot, LCP, Kp = 200, Kd = 20\n # params = [-0.11523854, 0.56103475, 0.14142136, 0.31622777, 0.13175649, 0. , 1.14017543, 1.18703622, 0.77193057, 0.20490717]\n\n # infinite frames success, box foot, LCP, Kp = 200, Kd = 20, foot Kp = 80, foot Kd = 10\n params = [-0.13880733, 0.3439617, 0.14142136, 0.31622777, -0.18792631, 0., 1.14017543, 1.53473264, 1.07681499, 0.22992996]\n\n\n # 1220 frames success, parameter rounding, box foot, LCP, Kp = 200, Kd = 20, foot Kp = 80, foot Kd = 10,\n params = [-0.11608721, 0.42672724, 0.14142136, 0.31622777, -0.12770363, 0., 1.14017543, 1.63989139, 1.01964141, 0.18439344]\n\n # 1850 frames success, parameter rounding, box foot, LCP, Kp = 200, Kd = 20, foot Kp = 80, foot Kd = 10,\n params = [-0.10540525, 0.40167391, 0.14142136, 0.31622777, -0.06906434, 0., 1.14017543, 1.57445634, 1.01106981, 0.23834485]\n\n # infinite frames success, parameter rounding, box foot, LCP, Kp = 200, Kd = 20, foot Kp = 80, foot Kd = 10,\n # params = [-0.03424024, 0.32955692, 0.0850351 , 0.28576747, -0.10735104, 0.00185764, 1.36932697, 1.27616424, 0.97477866, 0.29608671]\n\n params = [ 0.23265769, 1.04283873, -0.29465862, 0.3544647, 0.2997252, -0.17338881, 2.08012922, 1.09571025, 0.6792339, -0.35920458]\n\n # DartTrackingFoot0 result, c_swf_mid_offset = 0.02\n params = [ 0.00745384, -0.56053261, 0.00921962, 0.42575388, 1.03165526, 0.69931117, 1.42782163, 1.65119398, 1.1237301 , 0.5327249 ]\n\n params = [0., .7, .02, .1, .1, .0, 1.3, 1.2, 1., .05]\n params = [ 0.52572998, 0.15153905, -0.59859175, 0.93952107, 0.49886098, -0.1271257, 0.7328913, 0.87975694, 1.73943837, -0.97777014]\n\n # 120 frames success\n params = [-0.03373822, 0.21621505, -0.46121163, 0.97844009, 1.26921316, 0.07107696, 1.43362972, 0.10045292, 1.40123327, -0.67596869]\n\n # 195 frames success\n params = [-0.156885745146, 0.224351871531, -0.651388957459, 0.803834992348, 1.05714177435, 0.00542880291931, 1.56462249867, -0.111631227361, 1.37037255808, -1.00517210154]\n isCma = False\n\n params = [-0.156885745146, 0.224351871531, 0., 0.803834992348, 1.05714177435, 0.00542880291931, 1.56462249867, -0.111631227361, 1.37037255808, -1.00517210154]\n if len(sys.argv) == 1 and not isCma:\n walkings(params, False)\n elif len(sys.argv) == 2 and sys.argv[1] == '-view' and not isCma:\n walkings(params, False)\n elif (len(sys.argv) == 2 and sys.argv[1] == '-cma') or isCma:\n # from PyCommon.modules.Math.Nomalizer import Normalizer\n # normalizer = Normalizer([0.]*10., [1., 5., .2, 1., 1., 3., 3., 3., 3., .5], [1.]*10, [-1.]*10)\n # c6, K_stb_vel, K_swp_vel_sag, K_swp_vel_cor is velocity gain\n # cmaOption = cma.CMAOptions('fixed_variables')\n # cmaOption.set('fixed_variables', {2:math.sqrt(.02), 3:math.sqrt(.1), 5:math.sqrt(0.), 6:math.sqrt(1.3)})\n # cma.fmin(walkings, np.sqrt([0., .5, .02, .1, .1, .0, 0.3, 1.2, .5, .05]).tolist(), .1, args=(True,), options=cmaOption)\n # cma.fmin(walkings, params, .1, args=(True,), options=cmaOption)\n # cma.fmin(walkings, params, .1, args=(True,))\n\n from datetime import datetime\n filename = datetime.now().strftime('%Y%m%d%H%M')+\".opt\"\n fout = open(filename, \"w\")\n fout.write(os.path.basename(__file__)+'\\n')\n es = cma.CMAEvolutionStrategy(params, .1,\n {'maxiter':100})\n fout.close()\n # {'maxiter':2, 'fixed_variables':{2:math.sqrt(.02), 3:math.sqrt(.1), 5:math.sqrt(0.), 6:math.sqrt(1.3)}})\n pool = mp.Pool(es.popsize)\n cmaCount = 0\n while not es.stop():\n fout = open(filename, \"a\")\n X = es.ask()\n f_values = pool.map_async(walkings, X).get()\n obj_values = [f_value[0] for f_value in f_values]\n es.tell(X, obj_values)\n es.disp()\n es.logger.add()\n\n print(cmaCount, min(f_values), X[np.argmin(obj_values)])\n fout.write(str(cmaCount)+' '+str(min(f_values)))\n for x in X[np.argmin(obj_values)]:\n fout.write(' '+str(x)+',')\n fout.write('\\n')\n cmaCount += 1\n fout.close()\n\n print(\"------------best-----------\")\n print(\"eval: \", es.best.evals)\n print(\"f: \", es.best.f)\n print(\"x: \", es.best.x)\n", "import numpy as np\nimport numpy.linalg as npl\n\nimport sys\nif '../PyCommon/modules' not in sys.path:\n sys.path.append('../PyCommon/modules')\nimport PyCommon.modules.Math.mmMath as mm\nimport PyCommon.modules.Util.ysPythonEx as ype\n\ndef getTrackingWeight(DOFs, skeleton, weightMap, rootPositionWeight=0.):\n weights = [1.]*skeleton.getJointNum()\n for name, weight in weightMap.items():\n index = skeleton.getJointIndex(name)\n if index is not None:\n weights[index] = weight\n\n totalDOF = 0\n for dof in DOFs:\n totalDOF += dof\n\n weights_ext = [None]*totalDOF\n ype.repeatListElements(weights, weights_ext, DOFs)\n weights_ext[0:3] = [rootPositionWeight, rootPositionWeight, rootPositionWeight]\n\n return weights_ext\n\n\ndef addTrackingTerms(problem, totalDOF, weight, jointWeights, ddth_des_flat):\n # minimize | Wt(ddth_des - ddth) |^2\n problem.addObjective_matrix(np.diag( [jointWeights[i] for i in range(len(jointWeights))] ), np.array([jointWeights[i]*ddth_des_flat[i] for i in range(len(jointWeights))]), weight )\n\n\ndef addLinearTerms(problem, totalDOF, weight, dL_des, R, r_bias):\n # minimize | dL_des - (R*ddth + r_bias) |^2\n problem.addObjective_matrix(R, dL_des - r_bias, weight)\n\n\ndef addAngularTerms(problem, totalDOF, weight, dH_des, S, s_bias):\n # minimize | dH_des - (S*ddth + s_bias) |^2\n problem.addObjective_matrix(S, dH_des - s_bias, weight)\n\n\ndef addEndEffectorTerms(problem, totalDOF, weight, J, dJ, dth, ddP_des):\n # minimize | ddP_des - (J*ddth + dJ)|^2\n problem.addObjective_matrix(J, ddP_des - dJ, weight)\n\n\ndef addSoftPointConstraintTerms(problem, totalDOF, weight, ddP_des, Q, q_bias):\n # minimize | ddP_des - (Q*ddth + q_bias) |^2\n problem.addObjective_matrix(Q, ddP_des - q_bias, weight)\n\n\ndef setConstraint(problem, totalDOF, J, dJ, dth_flat, a_sup):\n # subject to J_sup*ddth + dJ_sup*dth_flat = a_sup\n problem.setConstraint_matrix(J, a_sup - np.dot(dJ, dth_flat))\n\n\ndef addConstraint(problem, totalDOF, J, dJ, dth_flat, a_sup):\n # subject to J_sup*ddth + dJ_sup*dth_flat = a_sup\n problem.addConstraint_matrix(J, a_sup - np.dot(dJ, dth_flat))\n\n\ndef addConstraint2(problem, totalDOF, J, dJdq, dth_flat, a_sup):\n # subject to J_sup*ddth + dJ_sup*dth_flat = a_sup\n problem.addConstraint_matrix(J, a_sup - dJdq)\n\n\n# Quadratic Programming \n# x = [ddotq tau lambda]\n\ndef addQPTrackingTerms(qp, totalProblemSize, si, totalDOF, weight, jointWeights, ddth_des_flat):\n # minimize | Wt(ddth_des - ddth) |^2\n #jointWeights[0] = .5\n #jointWeights[1] = 1.1\n #jointWeights[2] = .5\n #jointWeights[3] = 1.1\n #jointWeights[4] = 1.1\n #jointWeights[5] = 1.1\n\n jointWeights[0] = .5\n jointWeights[1] = .1\n jointWeights[2] = .5\n jointWeights[3] = .0001\n jointWeights[4] = .0001\n jointWeights[5] = .0001\n\n aaa = .5\n #A = np.diag( np.append([jointWeights[i] for i in range(len(jointWeights))], np.zeros(totalProblemSize-totalDOF)) )\n A = np.diag( np.append([aaa for i in range(len(jointWeights))], np.zeros(totalProblemSize-totalDOF)) )\n #b = np.append(np.array([jointWeights[i]*ddth_des_flat[i] for i in range(len(jointWeights))]), np.zeros(totalProblemSize-totalDOF))\n b = np.append(np.array([aaa*ddth_des_flat[i] for i in range(len(jointWeights))]), np.zeros(totalProblemSize-totalDOF))\n qp.addObjective(A,b,weight)\n\ndef addQPTorqueTerms(qp, totalProblemSize, si, totalActuator, weight, jointTorqueWeights):\n # minimize |tau|^2\n A = np.diag(np.append(np.append(np.zeros((si)), 1.*np.ones((totalActuator))), np.zeros(totalProblemSize-si-totalActuator)))\n b = np.zeros(totalProblemSize)\n qp.addObjective(A,b,weight)\n\ndef addQPContactForceTerms(qp, totalProblemSize, si, totalContact, weight):\n # minimize |lambda|^2\n A = np.diag(np.append(np.zeros(si), 1.*np.ones(totalContact)))\n b = np.zeros(totalProblemSize)\n qp.addObjective(A,b,weight)\n\ndef addQPEqualityEomConstraint(qp, totalProblemSize, totalDOF, totalActuator, totalContact, M, c, JcTVc_append):\n # subject to Mq'' -tau - JcTVclambda = -b\n # tau[0:6) = 0\n # [M -I -JcTVc]\n S = np.diag(np.append(np.zeros(6), 1.*np.ones(totalActuator-6)))\n A = np.vstack(( np.zeros((6,totalProblemSize)), np.hstack(( M,-S,-JcTVc_append )) ))\n for i in range(0, 6):\n A[i, totalDOF+i] = 1.\n b = np.append(np.zeros(6), -c)\n qp.addEqualityConstraint(A, b)\n\ndef addQPEqualityInverseEomConstraint(qp, totalProblemSize, totalDOF, totalActuator, totalContact, invM, invMc, JcTVc_append):\n # subject to Mq'' -tau - JcTVclambda = -b\n # tau[0:6) = 0\n # [I -M^-1 -M^-1*JcTVc]\n S = np.diag(np.append(np.zeros(6), 1.*np.ones(totalActuator-6)))\n #A = np.vstack(( np.zeros((6,totalProblemSize)), np.hstack(( np.eye(totalDOF),-np.dot(invM, S),np.dot(invM, -JcTVc_append) )) ))\n A = np.vstack(( np.zeros((6,totalProblemSize)), np.hstack(( np.eye(totalDOF),-invM,-np.dot(invM, JcTVc_append) )) ))\n for i in range(0, 6):\n A[i, totalDOF+i] = 1.\n b = np.append(np.zeros(6), -invMc)\n #print(A[totalDOF:totalDOF+6])\n qp.addEqualityConstraint(A, b)\n\ndef addQPEqualityContactConstraint(qp, totalProblemSize, totalDOF, totalActuator, totalContact, Jc, dJc, dth, a_c):\n # subject to Jc q'' = -dJc q' + a_sup\n A = np.hstack( (Jc , np.zeros((6, totalActuator+totalContact))) )\n b = -np.dot(dJc, dth) + a_c\n qp.addEqualityConstraint(A, b)\n\ndef addQPInequalityTorqueConstraint(qp, totalProblemSize, totalDOF, totalActuator, totalContact, torqueMax, torqueMin):\n # subject to tau <= max and -tau <= min\n G_max = np.hstack((np.zeros((totalActuator-6, totalDOF+6)), np.diag(1.*np.ones(totalActuator-6)), np.zeros((totalActuator-6, totalContact)) ))\n G_min = -G_max\n G = np.vstack((G_max, G_min))\n h = np.append( torqueMax, -torqueMin)\n if G.shape[0] != h.shape[0]:\n print('Inequality Torque : G and h shapes are not matched')\n\n qp.addInequalityConstraint(G, h)\n\ndef addQPInequalityContactForceConstraint(qp, totalProblemSize, totalDOF, totalActuator, totalContact):\n # subject to -lambda <= 0\n G = -np.hstack((np.zeros((totalContact, totalDOF+totalActuator)), np.diag(1.*np.ones(totalContact)) ))\n h = np.zeros(totalContact)\n if G.shape[0] != h.shape[0]:\n print('Inequality Contact : G and h shapes are not matched')\n qp.addInequalityConstraint(G, h)\n\ndef addQPInequalityVelocityConstraint(qp, totalProblemSize, totalDOF, totalActuator, totalContact, VcTJc_list, VcTdJc_list, dVcTJc_list, dq, ac_offset_list, invdt):\n # subject to -(VcTJcq'' + VcTJc'q' + VcT'Jcq') <= 0\n #TODO:\n # assume that Vc' = 0 <- is it right? check it!\n G = None\n h = None\n for i in range(len(VcTJc_list)):\n G_temp = np.hstack( (-VcTJc_list[i], np.zeros((4, totalActuator+totalContact))) )\n #h_temp = np.dot(VcTdJc_list[i], dq) + (-.05)*np.ones(4)\n #h_temp = (-1/30.)*np.dot(VcTJc_list[i], dq)+ np.dot(VcTdJc_list[i], dq) + (-1.)*ac_offset_list[i]\n #h_temp = (-1/30.)*np.dot(VcTJc_list[i], dq)+ np.dot(VcTdJc_list[i], dq)\n h_temp = np.dot(dVcTJc_list[i], dq) + np.dot(VcTdJc_list[i], dq) + np.dot(VcTJc_list[i], dq) * invdt + (-1.)*ac_offset_list[i]\n if G == None:\n G = G_temp.copy()\n h = h_temp.copy()\n else:\n G = np.vstack( (G, G_temp) )\n h = np.append( h, h_temp )\n\n if G.shape[0] != h.shape[0]:\n print('Inequality Velocity : G and h shapes are not matched')\n qp.addInequalityConstraint(G, h)\n", "from math import exp, sin, cos, asin, pi, sqrt\nfrom scipy.optimize import minimize_scalar\n\n# muscle specific parameters\nf_m_o, l_m_o, l_t_sl, alpha_opt = 0, 0, 0, 0\n\n# all muslces share these parameters\neps_t_o = 0.033\neps_m_o = 0.6\nk_pe = 4.0\ngamma = 0.5\ndot_l_m_max_tilde = 10.\nf_m_len_tilde = 1.8\nA_f = 0.3\n\n# Force-Length Relationship of Tendon\nf_t_toe_tilde = 0.33\nk_toe = 3.0\nk_lin = 1.712 / eps_t_o\neps_t_toe = 0.609 * eps_t_o\n\ndef g_t_tilde(l_t):\n eps_t = l_t / l_t_sl - 1.\n if eps_t <= eps_t_toe:\n return f_t_toe_tilde * (exp(k_toe * eps_t / eps_t_toe - 1.) - 1.) \\\n / \\\n (exp(k_toe) - 1.)\n else:\n return k_lin * (eps_t - eps_t_toe) + f_t_toe_tilde\n\n\n# Passive Force-Length Relationship of Muscle\ndef g_pl_tilde(l_m):\n l_m_tilde = l_m / l_m_o\n if l_m_tilde <= 1:\n return 0\n else:\n return (exp(k_pe * (l_m_tilde - 1)/eps_m_o) - 1) \\\n / \\\n (exp(k_pe) - 1)\n\n\n# Active Force-Length Relationship of Muscle\ndef g_al_tilde(l_m):\n l_m_tilde = l_m / l_m_o\n return exp(-(l_m_tilde-1)*(l_m_tilde-1)/gamma)\n\n\n# Force-Velocity Relationship of Muscle\ndef g_vl_tilde(dot_l_m):\n dot_l_m_tilde = dot_l_m / l_m_o\n if dot_l_m_tilde <= 0:\n return (dot_l_m_tilde + dot_l_m_max_tilde) \\\n / \\\n (dot_l_m_max_tilde - dot_l_m_tilde/A_f)\n else:\n _a = dot_l_m_tilde * (2. + 2./A_f)\n _b = dot_l_m_max_tilde * (f_m_len_tilde - 1.)\n return (f_m_len_tilde * _a + _b) / (_a + _b)\n\n\ndef compute_activation_deriv_scalar(u, a, tau_act, tau_deact):\n tau_total = 0.\n if u < a:\n tau_total = tau_deact / (0.5 + 1.5*a)\n else:\n tau_total = tau_act * (0.5 + 1.5*a)\n dadt = (u-a) / tau_total\n return dadt\n\n\ndef compute_cos_pennation_scalar(l_m, l_m_opt, pa_opt):\n pa = 0.\n\n if l_m < 0.:\n l_m = 0.\n\n if l_m < 1e-6:\n pa = asin(1.)\n else:\n pa = asin( l_m_opt * sin(pa_opt) / l_m )\n\n if pa > pi/4.:\n pa = pi/4.\n\n return cos(pa)\n\n\ndef compute_norm_tendon_force_scalar(eps_t, eps_t_o):\n f_t_norm = 0.\n if eps_t > eps_t_toe:\n f_t_norm = k_lin * (eps_t - eps_t_toe) + f_t_toe_tilde\n elif eps_t > 0.:\n f_t_norm = (f_t_toe_tilde / (exp(k_toe)-1.)) * (exp(k_toe * eps_t / eps_t_toe) - 1.)\n else:\n f_t_norm = 0.\n\n return f_t_norm\n\ndef compute_norm_passive_fiber_force_by_length_scalar(l_m_norm, eps_m_o, k_pe):\n f_p_norm = 0.\n if l_m_norm > 1.:\n f_p_norm = (exp(k_pe * (l_m_norm - 1.) / eps_m_o) - 1.) / (exp(k_pe) - 1.)\n else:\n f_p_norm = 0\n return f_p_norm\n\n\ndef compute_norm_active_fiber_force_by_length_scalar(l_m_norm, gamma):\n return exp(-(l_m_norm-1.)*(l_m_norm-1.) / gamma)\n\n\ndef compute_norm_active_fiber_force_by_velocity_scalar(dl_mdt_norm, a_f, f_m_len, v_m_max):\n gv_norm = 0.\n if dl_mdt_norm <= 0.:\n gv_norm = (dl_mdt_norm + v_m_max) / (v_m_max - dl_mdt_norm/a_f)\n else:\n lm_term = dl_mdt_norm*(2.+2./a_f)\n lmax_term = v_m_max*(f_m_len-1.)\n gv_norm = (f_m_len*lm_term + lmax_term) / (lm_term + lmax_term)\n return gv_norm\n\n\ndef compute_norm_fiber_length_deriv_scalar(f_m_norm, a, f_l, a_f, f_m_len, damping, v_m_max, option=None):\n a_f_l = a * f_l\n if damping > 0.:\n d = damping\n k = 1.\n\n if f_m_norm <= a_f_l:\n _a = d/a_f\n _b = -(a_f_l + f_m_norm/a_f + k*d)\n _c = k*(f_m_norm - a_f_l)\n else:\n _a = -(2.+2./a_f) * d / f_m_len\n _b = -((2.+2./a_f) * (a_f_l*f_m_len - f_m_norm)/(f_m_len-1.) + k*d)\n _c = k*(f_m_norm - a_f_l)\n\n det = _b*_b - 4*_a*_c\n dl_mdt_unit = (-_b-sqrt(det))/(2.*_a)\n else:\n if f_m_norm <= a_f_l:\n _b = a_f_l + (f_m_norm / a_f)\n else:\n _b = ( (2. + 2. /a_f) * (a_f_l * f_m_len - f_m_norm ) ) / (f_m_len - 1.)\n\n if _b > 0.:\n dl_mdt_unit = (f_m_norm - a_f_l) / _b\n else:\n dl_mdt_unit = 0.\n\n return v_m_max * dl_mdt_unit\n\n\ndef get_fiber_length_deriv_scalar(a, l_m, l_mt, l_m_opt, pa_opt, l_t_sl, eps_t_o, eps_m_o,\n k_pe, gamma, a_f, f_m_len, damping, v_m_max, option=None):\n cos_pa = compute_cos_pennation_scalar(l_m, l_m_opt, pa_opt)\n\n l_t = l_mt - l_m * cos_pa\n eps_t = (l_t - l_t_sl) / l_t_sl\n f_t_norm = compute_norm_tendon_force_scalar(eps_t, eps_t_o)\n\n l_m_norm = l_m / l_m_opt\n f_p_norm = compute_norm_passive_fiber_force_by_length_scalar(l_m_norm, eps_m_o, k_pe)\n f_l = compute_norm_active_fiber_force_by_length_scalar(l_m_norm, gamma)\n\n f_m_norm = f_t_norm / cos_pa - f_p_norm\n dl_mdt_norm = compute_norm_fiber_length_deriv_scalar(f_m_norm, a, f_l, a_f, f_m_len, damping, v_m_max, option)\n\n dl_mdt = l_m_opt * dl_mdt_norm\n\n return dl_mdt\n\n\ndef get_isometric_fiber_length(a, l_m, l_mt, l_m_opt, pa_opt, l_t_sl, eps_t_o, eps_m_o,\n k_pe, gamma, a_f, f_m_len, damping, v_m_max, option=None):\n def obj_dl_m(_l_m):\n dl_mdt = get_fiber_length_deriv_scalar(a, _l_m, l_mt, l_m_opt, pa_opt, l_t_sl, eps_t_o, eps_m_o,\n k_pe, gamma, a_f, f_m_len, damping, v_m_max, 'modified_damping')\n return dl_mdt * dl_mdt\n\n ub = max(0., l_mt - l_t_sl)\n\n result = minimize_scalar(obj_dl_m, bounds=(0., ub), method='bounded')\n return result.x\n\n\nclass Muscle(object):\n def __init__(self):\n pass", "from fltk import *\nimport copy\nimport numpy as np\nimport numpy.linalg as npl\n\nimport sys\nif '..' not in sys.path:\n sys.path.append('..')\nimport Math.mmMath as mm\nimport Resource.ysMotionLoader as yf\nimport Simulator.csVpWorld as cvw\nimport Simulator.csVpModel as cvm\nimport GUI.ysSimpleViewer as ysv\nimport Optimization.ysAnalyticConstrainedOpt as yac\nimport ArticulatedBody.ysJacobian as yjc\nimport Util.ysPythonEx as ype\nimport ArticulatedBody.ysReferencePoints as yrp\nimport ArticulatedBody.ysMomentum as ymt\nimport ArticulatedBody.ysControl as yct\n\ndef getPartJacobian(_Jsys, _jIdx):\n # warning : only Jsys works.\n return _Jsys[6*_jIdx : 6*_jIdx+6].copy()\n\nclass QPSimulator:\n def __init__(self):\n self.qp = yac.QP()\n self.qp.clear()\n\n # parameter\n self.Kt = 200.\n self.Dt = 2*(self.Kt**.5)\n self.Kl = 1000.\n self.Dl = 2*(self.Kl**.5)\n self.Kh = 1000.\n self.Dh = 2*(self.Kh**.5)\n #self.Ke = 100\n #self.De = 2*(self.Ke**.5)\n self.Ke = 10\n self.De = 2*(self.Ke**.5)\n\n self.Bt = 10.\n self.Btau = 1.\n self.Bcon = 1.\n self.Bl = 100.\n self.Bh = 100.\n self.Be = 100.\n\n # constants\n self.mu = 1.\n self.contactPerSide = 4 # vertices of boxes always checked\n\n # flat data structure\n self.ddth_des_flat = None\n self.dth_flat = None\n self.dth_r_flat = None\n self.ddth_r_flat = None\n self.ddth_sol = None\n self.totalDOF = 0\n\n self.extForce = None\n self.extForceBody = None\n self.extDuration = 0\n\n self.Vc_tmp = np.zeros((4,3))\n self.Vc_tmp[0] = mm.normalize2(( self.mu, 1, 0))\n self.Vc_tmp[1] = mm.normalize2((-self.mu, 1, 0))\n self.Vc_tmp[2] = mm.normalize2(( 0, 1, self.mu))\n self.Vc_tmp[3] = mm.normalize2(( 0, 1, -self.mu))\n\n self.CP_old = None\n\n #self.dH_des = None\n #self.dL_des_plane = None\n\n # jacobian\n self.Jsys = None\n self.dJsys = None\n\n def setupWeight(self, Kt, Kl, Kh, Ke, Bt, Btau, Bcon, Bl=0., Bh=0., Be=0.):\n self.Kt = Kt\n self.Dt = 2*(self.Kt**.5)\n self.Kl = Kl\n self.Dl = 2*(self.Kl**.5)\n self.Kh = Kh\n self.Dh = 2*(self.Kh**.5)\n self.Ke = Ke\n self.De = 2*(self.Ke**.5)\n\n self.Bt = Bt\n self.Btau = Btau\n self.Bcon = Bcon\n self.Bl = Bl\n self.Bh = Bh\n self.Be = Be\n\n def addExternalForces(self, force, forceBody, duration):\n self.extForce = np.hstack( (force, np.zeros(3)) )\n self.extForceBody = forceBody\n self.extDuration = duration\n\n def setupQP(self, frame, motion, mcfg, model, world, config, timestep):\n motionModel = cvm.VpMotionModel(world, motion[frame], mcfg)\n\n # constants\n invdt = 1./timestep\n\n # dofs and flat data structure\n totalDOF = model.getTotalDOF()\n DOFs = model.getDOFs()\n self.totalDOF = totalDOF\n self.ddth_des_flat = ype.makeFlatList(totalDOF)\n self.ddth_r_flat = ype.makeFlatList(totalDOF)\n self.dth_flat = ype.makeFlatList(totalDOF)\n self.dth_r_flat = ype.makeFlatList(totalDOF)\n self.ddth_sol = ype.makeNestedList(DOFs)\n\n # momentum matrix\n linkMasses = model.getBodyMasses()\n totalMass = model.getTotalMass()\n TO = ymt.make_TO(linkMasses)\n dTO = ymt.make_dTO(len(linkMasses))\n\n # optimization\n self.qp.clear()\n\n\n Vc_tmp = self.Vc_tmp\n\n\n # tracking\n w = self.getTrackingWeight(DOFs, motion[0].skeleton, config['weightMap'])\n\n th_r = motion.getDOFPositionsLocal(frame)\n th = model.getDOFPositionsLocal()\n dth_r = motion.getDOFVelocitiesLocal(frame)\n dth = model.getDOFVelocitiesLocal()\n ddth_r = motion.getDOFAccelerationsLocal(frame)\n ddth_des = yct.getDesiredDOFAccelerations(th_r, th, dth_r, dth, ddth_r, self.Kt, self.Dt)\n\n linkPositions = model.getBodyPositionsGlobal()\n linkVelocities = model.getBodyVelocitiesGlobal()\n linkAngVelocities = model.getBodyAngVelocitiesGlobal()\n linkInertias = model.getBodyInertiasGlobal()\n\n jointPositions = model.getJointPositionsGlobal()\n jointAxeses = model.getDOFAxesesLocal()\n\n #linkPositions_ref = motionModel.getBodyPositionsGlobal()\n #linkVelocities_ref = motionModel.getBodyVelocitiesGlobal()\n #linkAngVelocities_ref = motionModel.getBodyAngVelocitiesGlobal()\n #linkInertias_ref = motionModel.getBodyInertiasGlobal()\n\n #jointPositions_ref = motionModel.getJointPositionsGlobal()\n #jointAxeses_ref = motionModel.getDOFAxesesLocal()\n\n ype.flatten(ddth_des, self.ddth_des_flat)\n ype.flatten(dth, self.dth_flat)\n ype.flatten(dth_r, self.dth_r_flat)\n ype.flatten(ddth_r, self.ddth_r_flat)\n\n # get CoM\n CM = yrp.getCM(linkPositions, linkMasses, totalMass)\n dCM = yrp.getCM(linkVelocities, linkMasses, totalMass)\n CM_plane = copy.copy(CM); CM_plane[1]=0.\n dCM_plane = copy.copy(dCM); dCM_plane[1]=0.\n\n P = ymt.getPureInertiaMatrix(TO, linkMasses, linkPositions, CM, linkInertias)\n dP = ymt.getPureInertiaMatrixDerivative(dTO, linkMasses, linkVelocities, dCM, linkAngVelocities, linkInertias)\n\n\n # jacobian\n Jsup = yjc.makeEmptyJacobian(DOFs, 1)\n dJsup = Jsup.copy()\n Jsys_old = None\n\n if self.Jsys != None:\n Jsys_old = self.Jsys.copy()\n\n if self.Jsys == None:\n self.Jsys = yjc.makeEmptyJacobian(DOFs, model.getBodyNum())\n self.dJsys = self.Jsys.copy()\n\n allLinkJointMasks = yjc.getAllLinkJointMasks(motion[0].skeleton)\n\n yjc.computeJacobian2(self.Jsys, DOFs, jointPositions, jointAxeses, linkPositions, allLinkJointMasks)\n if Jsys_old ==None:\n self.dJsys = self.Jsys-self.Jsys\n else:\n self.dJsys = (self.Jsys - Jsys_old)*invdt\n #yjc.computeJacobianDerivative2(self.dJsys, DOFs, jointPositions, jointAxeses, linkAngVelocities, linkPositions, allLinkJointMasks)\n\n #CM_ref = yrp.getCM(linkPositions_ref, linkMasses, totalMass)\n #dCM_ref = yrp.getCM(linkVelocities_ref, linkMasses, totalMass)\n #CM_ref_plane = copy.copy(CM_ref); CM_ref_plane[1]=0.\n #dCM_ref_plane = copy.copy(dCM_ref); dCM_ref_plane[1]=0.\n\n #P_ref = ymt.getPureInertiaMatrix(TO, linkMasses, linkPositions_ref, CM_ref, linkInertias_ref)\n #dP_ref = ymt.getPureInertiaMatrixDerivative(dTO, linkMasses, linkVelocities_ref, dCM_ref, linkAngVelocities_ref, linkInertias_ref)\n\n # get EoM\n totalActuator = totalDOF\n\n invM = np.zeros((totalActuator,totalDOF))\n invMc = np.zeros(totalDOF)\n model.getInverseEquationOfMotion(invM, invMc)\n #print invMc\n\n # contact detection\n Ks = 1\n Ds = 1\n supsupR = motion[0].skeleton.getJointIndex('RightLeg')\n supsupL = motion[0].skeleton.getJointIndex('LeftLeg')\n supR = motion[0].skeleton.getJointIndex('RightFoot')\n supL = motion[0].skeleton.getJointIndex('LeftFoot')\n bodyIDsToCheck = range(world.getBodyNum())\n #print bodyIDsToCheck\n #bodyIDsToCheck = [supsupR, supsupL]\n #bodyIDsToCheck = [supR, supL]\n mus = [.5]*len(bodyIDsToCheck)\n bodyIDs, contactPositions, contactPositionLocals, contactForces, contactVelocities = world.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)\n #bodyIDs, contactPositions, contactPositionLocals, contactForces, contactVelocities = world.calcManyPenaltyForce(self.contactPerSide, bodyIDsToCheck, mus, Ks, Ds)\n #bodyIDs, contactPositions, contactPositionLocals, contactForces, contactVelocities = world.calcOnePenaltyForce(bodyIDsToCheck, mus, Ks, Ds)\n\n #print bodyIDs, contactPositions\n\n footCenterL = model.getBodyPositionGlobal(supL)\n footCenterR = model.getBodyPositionGlobal(supR)\n footCenter = footCenterL.copy()\n\n footRefCenterL = motionModel.getBodyPositionGlobal(supL)\n footRefCenterR = motionModel.getBodyPositionGlobal(supR)\n #if supL in bodyIDs:\n #if supR in bodyIDs:\n #footCenter = footCenterL + (footCenterR-footCenterL)/2.\n #else:\n #footCenter = footCenterL.copy()\n #else:\n #if supR in bodyIDs:\n #footCenter = footCenterR.copy()\n #else:\n #footCenter = np.array((0,0,0))\n\n contactL = 1\n contactR = 1\n\n if footRefCenterL[1] < 0.2:\n if footRefCenterR[1] < 0.2:\n footCenter = footCenterL + (footCenterR-footCenterL)/2.\n else:\n footCenter = footCenterL.copy()\n contactR = 0\n else:\n contactL = 0\n if footRefCenterR[1] < 0.2:\n footCenter = footCenterR.copy()\n else:\n footCenter = np.array((0,0,0))\n contactR = 0\n #print(contactR, contactL)\n footCenter[1] = 0.\n\n # linear momentum\n CM_ref = footCenter\n #CM_ref =\n #dL_des_plane = self.Kl*totalMass*(CM_ref - CM) + self.Dl*totalMass*(dCM_ref - dCM)\n dL_des_plane = self.Kl*totalMass*(CM_ref - CM) + self.Dl*totalMass*(-dCM)\n dL_des_plane[1] = 0.\n\n # angular momentum\n CP_ref = footCenter\n #bodyIDs, contactPositions, contactPositionLocals, contactForces = world.calcManyPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)\n #CP = yrp.getCP(contactPositions, contactForces)\n CP = yrp.getSimpleCP(contactPositions)\n if self.CP_old==None or CP==None:\n dCP = None\n else:\n dCP = (CP - self.CP_old[0])/(1/30.)\n self.CP_old = CP\n\n if CP!=None and dCP!=None:\n ddCP_des = self.Kh*(CP_ref - CP) - self.Dh*(dCP)\n CP_des = CP + dCP*(1/30.) + .5*ddCP_des*((1/30.)**2)\n dH_des = np.cross((CP_des - CM), (dL_des_plane + totalMass*np.array((0,-9.8,0))))\n #if contactChangeCount >0: # and contactChangeType == 'DtoS':\n ##dH_des *= (maxContactChangeCount - contactChangeCount)/(maxContactChangeCount*10)\n #dH_des *= (self.maxContactChangeCount - self.contactChangeCount)/(self.maxContactChangeCount)\n ##dH_des *= (contactChangeCount)/(maxContactChangeCount)*.9+.1\n else:\n dH_des = None\n H = np.dot(P, np.dot(self.Jsys, self.dth_flat))\n dH_des = -self.Kh*H[3:]\n\n\n\n # equality constraints\n JcTVc_append = np.zeros((totalDOF, 0))\n VcTJc_list = []\n VcTdJc_list = []\n dVcTJc_list = []\n ac_offset_list = []\n totalContact = 4*len(bodyIDs)\n totalProblem = totalDOF+totalActuator+totalContact\n\n preSup = -1\n for i in range(len(contactPositions)):\n sup = bodyIDs[i]\n supJointMasks = [yjc.getLinkJointMask(motion[0].skeleton, sup)]\n\n if preSup != sup:\n bodyPos = linkPositions[sup]\n bodyVel = linkVelocities[sup]\n #yjc.computeJacobian2(Jsup, DOFs, jointPositions, jointAxeses, [bodyPos], supJointMasks)\n #yjc.computeJacobianDerivative2(dJsup, DOFs, jointPositions, jointAxeses, linkAngVelocities, [bodyPos], supJointMasks)\n Jsup = getPartJacobian(self.Jsys, sup)\n dJsup = getPartJacobian(self.dJsys, sup)\n\n R_dAd = np.hstack( (np.vstack( (np.eye(3), mm.getCrossMatrixForm(-bodyPos)) ), np.vstack( (np.zeros((3,3)), np.eye(3)) ) ) )\n dR_dAd = np.hstack( (np.vstack( (np.eye(3), mm.getCrossMatrixForm(-bodyVel)) ), np.vstack( (np.zeros((3,3)), np.eye(3)) ) ) )\n #R_dAd = np.hstack( (np.vstack( (np.eye(3), mm.getCrossMatrixForm(-contactPositions[i])) ), np.vstack( (np.zeros((3,3)), np.eye(3)) ) ) )\n #dR_dAd = np.hstack( (np.vstack( (np.eye(3), mm.getCrossMatrixForm(-contactVelocities[i])) ), np.vstack( (np.zeros((3,3)), np.eye(3)) ) ) )\n\n p = contactPositions[i]\n dotp = contactVelocities[i]\n VcT_tmp = np.zeros((4,6))\n dVcT_tmp = VcT_tmp.copy()\n for ii in range(4):\n n = Vc_tmp[ii]\n pcn = np.cross(contactPositions[i], Vc_tmp[ii])\n VcT_tmp[ii][:3] =n\n VcT_tmp[ii][3:] =pcn\n dotpcn = np.cross(contactVelocities[i], Vc_tmp[ii])\n dVcT_tmp[ii][3:] = dotpcn\n\n Vc = np.dot(R_dAd, VcT_tmp.T)\n dVc = np.dot(R_dAd, dVcT_tmp.T) + np.dot(dR_dAd, VcT_tmp.T)\n\n JcTVc = np.dot( Jsup.T, Vc)\n JcTVc_append = np.hstack((JcTVc_append, JcTVc))\n VcTJc_list.append( JcTVc.T )\n VcTdJc_list.append( np.dot(Vc.T, dJsup) )\n dVcTJc_list.append( np.dot(dVc.T, Jsup) )\n\n #TODO:\n # when friction cones and velocity cones differ?\n #JcTVc = np.dot( Jsup.T, VcT.T)\n #JcTVc_append = np.hstack((JcTVc_append, JcTVc))\n #VcTJc_list.append( JcTVc.T )\n #VcTdJc_list.append( np.dot(VcT, dJsup) )\n #dVcTJc_list.append( np.dot(dVcT, Jsup) )\n\n penDepth = -0.05-contactPositions[i][1]\n if penDepth < 0.:\n penDepth = 0.\n #penDepth = 0.\n ac_offset = 1000.*penDepth*np.ones(4)\n ac_offset_list.append(ac_offset)\n preSup = sup\n\n extForce = np.zeros(totalActuator)\n if self.extDuration > 0:\n Jext = yjc.makeEmptyJacobian(DOFs, 1)\n extForcePos = model.getBodyPositionGlobal(self.extForceBody)\n extJointMasks = [yjc.getLinkJointMask(motion[0].skeleton, self.extForceBody)]\n yjc.computeJacobian2(Jext, DOFs, jointPositions, jointAxeses, [extForcePos], extJointMasks)\n extForce = np.dot(Jext.T, self.extForce)\n\n self.extDuration -= timestep\n if self.extDuration < 0:\n self.extDuration = 0\n\n self.addQPEqualityInverseEomConstraint(totalProblem, totalDOF, totalActuator, totalContact, invM, invMc, JcTVc_append, extForce)\n\n # inequality constraints\n\n if totalContact> 0:\n self.addQPInequalityContactForceConstraint(totalProblem, totalDOF, totalActuator, totalContact)\n self.addQPInequalityVelocityConstraint(totalProblem, totalDOF, totalActuator, totalContact, VcTJc_list, VcTdJc_list,dVcTJc_list, self.dth_flat, ac_offset_list, invdt)\n #self.addQPInequalityVelocityConstraint(totalProblem, totalDOF, totalActuator, totalContact, VcTJc_vel_list, VcTdJc_vel_list,dVcTJc_vel_list, self.dth_flat, ac_offset_list, 30.)\n torqueMax = 1000.*np.ones(totalActuator-6)\n torqueMin = -torqueMax\n self.addQPInequalityTorqueConstraint(totalProblem, totalDOF, totalActuator, totalContact, torqueMax, torqueMin)\n\n # objectives\n self.addQPTrackingTerms(totalProblem, 0, totalDOF, self.Bt, w, self.ddth_des_flat)\n self.addQPTorqueTerms(totalProblem, totalDOF, totalActuator, self.Btau, w)\n if totalContact > 0:\n self.addQPContactForceTerms(totalProblem, totalDOF+totalActuator, totalContact, self.Bcon)\n #if dH_des !=None:\n #\tallLinkJointMasks = yjc.getAllLinkJointMasks(motion[0].skeleton)\n #\tyjc.computeJacobian2(Jsys, DOFs, jointPositions, jointAxeses, linkPositions, allLinkJointMasks)\n #\tyjc.computeJacobianDerivative2(dJsys, DOFs, jointPositions, jointAxeses, linkAngVelocities, linkPositions, allLinkJointMasks)\n #\tself.addLinearAndAngularBalancigTerms(totalProblem, 0, totalDOF, self.Bl, self.Bh, P, self.Jsys, self.dth_flat, dP, self.dJsys, dL_des_plane, dH_des)\n\n # end effector\n #TODO:\n eeList = [supR, supL]\n #eeList = []\n\n #if totalContact > 0:\n for ee in eeList:\n eeCenter = model.getBodyPositionGlobal(ee)\n eeJointMasks = [yjc.getLinkJointMask(motion[0].skeleton, ee)]\n yjc.computeJacobian2(Jsup, DOFs, jointPositions, jointAxeses, [eeCenter], eeJointMasks)\n yjc.computeJacobianDerivative2(dJsup, DOFs, jointPositions, jointAxeses, linkAngVelocities, [eeCenter], eeJointMasks, False)\n ee_genvel_ref = np.dot(Jsup, self.dth_r_flat)\n ee_genacc_ref = np.dot(Jsup, self.ddth_r_flat) + np.dot(dJsup, self.dth_r_flat)\n\n ee_pos_ref = motionModel.getBodyPositionGlobal(ee)\n ee_pos = model.getBodyPositionGlobal(ee)\n ee_vel_ref = ee_genvel_ref[:3]\n ee_vel = model.getBodyVelocityGlobal(ee)\n ee_acc_ref = ee_genacc_ref[:3]\n ddp_des_pos = self.Ke*( (ee_pos_ref-th_r[0][0]) - (ee_pos-th[0][0]) )\n ddp_des_pos += self.De*(ee_vel_ref - ee_vel)\n ddp_des_pos += ee_acc_ref\n\n eeOri = model.getBodyOrientationGlobal(ee)\n eeAngVel = model.getBodyAngVelocityGlobal(ee)\n ee_angvel_ref = ee_genvel_ref[3:]\n ee_angacc_ref = ee_genacc_ref[3:]\n a_ori_diff = mm.logSO3(mm.getSO3FromVectors(np.dot(eeOri, np.array([0,1,0])), np.array([0,1,0])))\n ddp_des_ang = self.Ke*a_ori_diff + self.De*(-eeAngVel)\n #ddp_des_ang = self.Ke*a_ori_diff + self.De*(ee_angvel_ref-eeAngVel)\n #ddp_des_ang += ee_angacc_ref\n\n ddp_des = np.hstack( (ddp_des_pos, ddp_des_ang) )\n\n #self.addEndEffectorTerms(totalProblem, 0, totalDOF, Jsup, dJsup, self.dth_flat, ddp_des, self.Be)\n self.addEqualityEndEffectorTerms(totalProblem, 0, totalDOF, Jsup, dJsup, self.dth_flat, ddp_des, self.Be)\n\n return contactPositions, CP, CM, footCenter, dL_des_plane, CM_ref\n\n\n def stepQP(self, model, timestep):\n totalDOF = self.totalDOF\n # optimization\n x = self.qp.solve()\n ype.nested(np.array(x[:totalDOF].T).flatten(), self.ddth_sol)\n test_ddq = np.array(x[:totalDOF].T).flatten()\n test_tau = np.array(x[totalDOF:2*totalDOF].T).flatten()\n test_lambda = np.array(x[2*totalDOF:].T).flatten()\n\n # integrate\n if self.ddth_sol != None:\n model.stepKinematics(timestep, self.ddth_sol)\n self.ddth_des_flat = None\n self.dth_flat = None\n self.ddth_sol = None\n #np.dot(Vc_tmp.T, np.array(test_lambda[4*i:4*i+4]))\n if test_lambda != []:\n contactForces = []\n for i in range(len(test_lambda)/4):\n contactForces.append( np.dot(self.Vc_tmp.T, test_lambda[4*i:4*i+4]))\n\n return x, contactForces\n else:\n return x, None\n else:\n print(\"setup QP first!\")\n return None\n\n # objectives\n def getTrackingWeight(self, DOFs, skeleton, weightMap, rootPositionWeight=0.):\n weights = [1.]*skeleton.getJointNum()\n for name, weight in weightMap.items():\n index = skeleton.getJointIndex(name)\n weights[index] = weight\n\n totalDOF = 0\n for dof in DOFs:\n totalDOF += dof\n\n weights_ext = [None]*totalDOF\n ype.repeatListElements(weights, weights_ext, DOFs)\n weights_ext[0:3] = [rootPositionWeight, rootPositionWeight, rootPositionWeight]\n\n return weights_ext\n\n def addQPTrackingTerms(self, totalProblemSize, si, totalDOF, weight, jointWeights, ddth_des_flat):\n # minimize | Wt(ddth_des - ddth) |^2\n #jointWeights[0] = 1.\n #jointWeights[1] = 1.\n #jointWeights[2] = 1.\n A = np.diag( np.append([jointWeights[i] for i in range(len(jointWeights))], np.zeros(totalProblemSize-totalDOF)) )\n b = np.append(np.array([jointWeights[i]*ddth_des_flat[i] for i in range(len(jointWeights))]), np.zeros(totalProblemSize-totalDOF))\n self.qp.addObjective(A,b,weight)\n\n def addQPTorqueTerms(self, totalProblemSize, si, totalActuator, weight, jointTorqueWeights):\n # minimize |tau|^2\n A = np.diag(np.append(np.append(np.zeros((si)), 1.*np.ones((totalActuator))), np.zeros(totalProblemSize-si-totalActuator)))\n b = np.zeros(totalProblemSize)\n self.qp.addObjective(A,b,weight)\n\n def addQPContactForceTerms(self, totalProblemSize, si, totalContact, weight):\n # minimize |lambda|^2\n A = np.diag(np.append(np.zeros(si), 1.*np.ones(totalContact)))\n b = np.zeros(totalProblemSize)\n self.qp.addObjective(A,b,weight)\n\n def addLinearAndAngularBalancigTerms(self, totalProblemSize, si, totalDOF, linearWeight, angularWeight, P, Jsys, dth, dotP, dotJsys, dotLdes, dotHdes):\n #minimize |Bl(dotL - dotLdes)|^2 and |Bh(dotH - dotHdes)|^2\n Wl = linearWeight ** .5\n Wh = angularWeight ** .5\n W = np.diag( np.append( Wl*np.ones(3), Wh*np.ones(3) ) )\n A = np.hstack( (np.dot(W, np.dot(P, Jsys)), np.zeros( (6, totalProblemSize - totalDOF))))\n b = np.hstack( (dotLdes, dotHdes) ) - np.dot(dotP, np.dot(Jsys, dth)) - np.dot(P, np.dot(dotJsys, dth))\n self.qp.addObjective(A, np.dot(W,b))\n\n def addEndEffectorTerms(self, totalProblemSize, si, totalDOF, Jee, dJee, dth, ddp_des, weight):\n #minimize |ddp - ddp_des|^2 = |J * ddth - (ddp_des-dJ * dth)|^2\n #foot should be parallel to ground\n A = np.hstack( ( Jee.copy(), np.zeros((6, totalProblemSize-totalDOF)) ) )\n b = ddp_des - np.dot(dJee, dth)\n #self.qp.addEqualityConstraint(A[3:], b[3:])\n self.qp.addObjective(A, b, weight)\n #self.qp.addObjective(A[:0], b[:0], weight)\n #self.qp.addObjective(A[2:], b[2:], weight)\n\n # constraints\n def addQPEqualityEomConstraint(self, totalProblemSize, totalDOF, totalActuator, totalContact, M, c, JcTVc_append):\n # subject to Mq'' -tau - JcTVclambda = -b\n # tau[0:6) = 0\n # [M -I -JcTVc]\n S = np.diag(np.append(np.zeros(6), 1.*np.ones(totalActuator-6)))\n A = np.vstack(( np.zeros((6,totalProblemSize)), np.hstack(( M,-S,-JcTVc_append )) ))\n for i in range(0, 6):\n A[i, totalDOF+i] = 1.\n b = np.append(np.zeros(6), -c)\n self.qp.addEqualityConstraint(A, b)\n\n def addEqualityEndEffectorTerms(self, totalProblemSize, si, totalDOF, Jee, dJee, dth, ddp_des, weight):\n #minimize J * ddth = ddp_des - dJ * dth\n #foot should be parallel to ground\n A = np.hstack( ( Jee.copy(), np.zeros((6, totalProblemSize-totalDOF)) ) )\n b = ddp_des - np.dot(dJee, dth)\n self.qp.addEqualityConstraint(A[3:], b[3:])\n #self.qp.addEqualityConstraint(A, b)\n\n def addQPEqualityInverseEomConstraint(self, totalProblemSize, totalDOF, totalActuator, totalContact, invM, invMc, JcTVc_append, extForce):\n # subject to Mq'' -tau - JcTVclambda = -b\n # tau[0:6) = 0\n # [I -M^-1 -M^-1*JcTVc]\n S = np.diag(np.append(np.zeros(6), 1.*np.ones(totalActuator-6)))\n #A = np.vstack(( np.zeros((6,totalProblemSize)), np.hstack(( np.eye(totalDOF),-np.dot(invM, S),np.dot(invM, -JcTVc_append) )) ))\n A = np.vstack(( np.zeros((6,totalProblemSize)), np.hstack(( np.eye(totalDOF),-invM,-np.dot(invM, JcTVc_append) )) ))\n for i in range(0, 6):\n A[i, totalDOF+i] = 1.\n b = np.append(np.zeros(6), -invMc)\n b += np.append(np.zeros(6), np.dot(invM, extForce))\n #print(A[totalDOF:totalDOF+6])\n self.qp.addEqualityConstraint(A, b)\n\n def addQPEqualityContactConstraint(self, totalProblemSize, totalDOF, totalActuator, totalContact, Jc, dJc, dth, a_c):\n # subject to Jc q'' = -dJc q' + a_sup\n A = np.hstack( (Jc , np.zeros((6, totalActuator+totalContact))) )\n b = -np.dot(dJc, dth) + a_c\n self.qp.addEqualityConstraint(A, b)\n\n def addQPInequalityTorqueConstraint(self, totalProblemSize, totalDOF, totalActuator, totalContact, torqueMax, torqueMin):\n # subject to tau <= max and -tau <= min\n G_max = np.hstack((np.zeros((totalActuator-6, totalDOF+6)), np.diag(1.*np.ones(totalActuator-6)), np.zeros((totalActuator-6, totalContact)) ))\n G_min = -G_max\n G = np.vstack((G_max, G_min))\n h = np.append( torqueMax, -torqueMin)\n if G.shape[0] != h.shape[0]:\n print('Inequality Torque : G and h shapes are not matched')\n\n self.qp.addInequalityConstraint(G, h)\n\n def addQPInequalityContactForceConstraint(self, totalProblemSize, totalDOF, totalActuator, totalContact):\n # subject to -lambda <= 0\n G = -np.hstack((np.zeros((totalContact, totalDOF+totalActuator)), np.diag(1.*np.ones(totalContact)) ))\n h = np.zeros(totalContact)\n if G.shape[0] != h.shape[0]:\n print('Inequality Contact : G and h shapes are not matched')\n self.qp.addInequalityConstraint(G, h)\n\n def addQPInequalityVelocityConstraint(self, totalProblemSize, totalDOF, totalActuator, totalContact, VcTJc_list, VcTdJc_list, dVcTJc_list, dq, ac_offset_list, invdt):\n # subject to -(VcTJcq'' + VcTJc'q' + VcT'Jcq') <= 0\n #TODO:\n # assume that Vc' = 0 <- is it right? check it!\n G = None\n h = None\n for i in range(len(VcTJc_list)):\n G_temp = np.hstack( (-VcTJc_list[i], np.zeros((4, totalActuator+totalContact))) )\n #h_temp = np.dot(VcTdJc_list[i], dq) + (-.05)*np.ones(4)\n #h_temp = (-1/30.)*np.dot(VcTJc_list[i], dq)+ np.dot(VcTdJc_list[i], dq) + (-1.)*ac_offset_list[i]\n #h_temp = (-1/30.)*np.dot(VcTJc_list[i], dq)+ np.dot(VcTdJc_list[i], dq)\n h_temp = np.dot(dVcTJc_list[i], dq) + np.dot(VcTdJc_list[i], dq) + np.dot(VcTJc_list[i], dq) * invdt + (-1.)*ac_offset_list[i]\n if G == None:\n G = G_temp.copy()\n h = h_temp.copy()\n else:\n G = np.vstack( (G, G_temp) )\n h = np.append( h, h_temp )\n\n if G.shape[0] != h.shape[0]:\n print('Inequality Velocity : G and h shapes are not matched')\n self.qp.addInequalityConstraint(G, h)\n", "from fltk import *\nimport copy\nimport os.path\ntry:\n import pickle\nexcept ImportError:\n import cPickle as pickle\nimport time\nimport numpy as np\n\nimport PyCommon.modules.Math.mmMath as mm\nfrom PyCommon.modules.Math import mmMath as mm\nfrom PyCommon.modules.Math import csMath as cm\nfrom PyCommon.modules.Math import ysFunctionGraph as yfg\nfrom PyCommon.modules.Renderer import ysRenderer as yr\nfrom PyCommon.modules.Simulator import ysVpUtil as yvu\nfrom PyCommon.modules.GUI import ysSimpleViewer as ysv\nfrom PyCommon.modules.GUI import ysMultiViewer as ymv\nfrom PyCommon.modules.ArticulatedBody import ysControl as yct\nfrom PyCommon.modules.ArticulatedBody import ysReferencePoints as yrp\nfrom PyCommon.modules.Motion import ysMotionAnalysis as yma\nfrom PyCommon.modules.Motion import ysBipedAnalysis as yba\nfrom PyCommon.modules.Motion import ysMotion as ym\nfrom PyCommon.modules.Motion import ysMotionBlend as ymb\nfrom PyCommon.modules.Motion import ysMotionExtend as ymt\nfrom PyCommon.modules.Motion import ysSkeletonEdit as yhe\nfrom PyCommon.modules.Motion import mmAnalyticIK as aik\n# from PyCommon.modules.Util import ysMatplotEx as ymp\nfrom PyCommon.modules.Resource import ysMotionLoader as yf\nfrom PyCommon.modules.Simulator import ysPhysConfig as ypc\n\nfrom PyCommon.modules.Simulator import hpLCPSimul2 as hls\nfrom PyCommon.modules.GUI import hpSimpleViewer as hsv\nfrom PyCommon.modules.Util import ysPythonEx as ype\n\nfrom PyCommon.modules.Simulator import csVpModel_py as pcvm\nfrom PyCommon.modules.Simulator import csVpWorld_py as pcvw\n\n\nimport math\n# from matplotlib import pyplot as plt\n# from matplotlib import collections\n\ncurrent_path = os.path.dirname(os.path.abspath(__file__))\n\n#MOTION_COLOR = (128,128,128)\n#CHARACTER_COLOR = (102,102,153)\nMOTION_COLOR = (213,111,162)\nCHARACTER_COLOR = (20,166,188)\n\ndef buildMassMap():\n massMap = {}\n massMap = massMap.fromkeys(['Head', 'Head_Effector', 'Hips',\n 'LeftArm', 'LeftFoot', 'LeftForeArm', 'LeftHand', 'LeftHand_Effector',\n 'LeftLeg', 'LeftShoulder1', 'LeftUpLeg',\n 'RightArm', 'RightFoot', 'RightForeArm', 'RightHand', 'RightHand_Effector',\n 'RightLeg', 'RightShoulder', 'RightUpLeg',\n 'Spine', 'Spine1',\n 'RightFoot_foot_0_0', 'RightFoot_foot_0_1', 'RightFoot_foot_0_1_Effector',\n 'RightFoot_foot_1_0', 'RightFoot_foot_1_1', 'RightFoot_foot_1_1_Effector',\n 'RightFoot_foot_2_0', 'RightFoot_foot_2_1', 'RightFoot_foot_2_1_Effector',\n 'LeftFoot_foot_0_0', 'LeftFoot_foot_0_1', 'LeftFoot_foot_0_1_Effector',\n 'LeftFoot_foot_1_0', 'LeftFoot_foot_1_1', 'LeftFoot_foot_1_1_Effector',\n 'LeftFoot_foot_2_0', 'LeftFoot_foot_2_1', 'LeftFoot_foot_2_1_Effector',\n ], 0.)\n\n # torso : 10\n massMap['Hips'] += 2.\n massMap['Spine'] += 8.\n\n # head : 3\n massMap['Spine1'] += 3.\n\n # right upper arm : 2\n massMap['RightArm'] += 2.\n\n # left upper arm : 2\n massMap['LeftArm'] += 2.\n\n # right lower arm : 1\n massMap['RightForeArm'] = 1.\n # massMap['RightForeArm'] = 2.\n\n # left lower arm : 1\n massMap['LeftForeArm'] = 1.\n # massMap['LeftForeArm'] = 2.\n\n # right thigh : 7\n massMap['Hips'] += 2.\n massMap['RightUpLeg'] += 5.\n\n # left thigh : 7\n massMap['Hips'] += 2.\n massMap['LeftUpLeg'] += 5.\n\n # right shin : 5\n massMap['RightLeg'] += 5.\n\n # left shin : 5\n massMap['LeftLeg'] += 5.\n\n # right foot : 4\n # massMap['RightFoot'] += 2.\n massMap['RightFoot'] += .4\n\n # left foot : 4\n # massMap['LeftFoot'] += 2.\n massMap['LeftFoot'] += .4\n '''\n massMap['RightFoot_foot_0_0'] = .3\n massMap['RightFoot_foot_0_1'] = .3\n massMap['RightFoot_foot_1_0'] = .3\n massMap['RightFoot_foot_1_1'] = .3\n massMap['RightFoot_foot_2_0'] = .3\n massMap['RightFoot_foot_2_1'] = .3\n massMap['LeftFoot_foot_0_0'] = .3\n massMap['LeftFoot_foot_0_1'] = .3\n massMap['LeftFoot_foot_1_0'] = .3\n massMap['LeftFoot_foot_1_1'] = .3\n massMap['LeftFoot_foot_2_0'] = .3\n massMap['LeftFoot_foot_2_1'] = .3\n #'''\n\n massMap['RightFoot_foot_0_0'] = .3\n massMap['RightFoot_foot_0_1'] = .3\n massMap['RightFoot_foot_0_0_0'] = .3\n massMap['RightFoot_foot_0_1_0'] = .3\n massMap['RightFoot_foot_1_0'] = .3\n massMap['RightFoot_foot_1_1'] = .3\n massMap['RightFoot_foot_1_2'] = .3\n massMap['LeftFoot_foot_0_0'] = .3\n massMap['LeftFoot_foot_0_1'] = .3\n massMap['LeftFoot_foot_0_0_0'] = .3\n massMap['LeftFoot_foot_0_1_0'] = .3\n massMap['LeftFoot_foot_1_0'] = .3\n massMap['LeftFoot_foot_1_1'] = .3\n massMap['LeftFoot_foot_1_2'] = .3\n\n return massMap\n\ndef buildMcfg():\n massMap = buildMassMap()\n mcfg = ypc.ModelConfig()\n mcfg.defaultDensity = 1000.\n mcfg.defaultBoneRatio = .9\n\n totalMass = 0.\n for name in massMap:\n node = mcfg.addNode(name)\n node.mass = massMap[name]\n # totalMass += node.mass\n\n node = mcfg.getNode('Hips')\n node.length = .2\n node.width = .25\n\n node = mcfg.getNode('Spine1')\n node.length = .2\n node.offset = (0,0,0.1)\n\n node = mcfg.getNode('Spine')\n node.width = .22\n\n node = mcfg.getNode('RightFoot')\n node.length = .25\n # node.length = .27\n # node.offset = (0,0,0.01)\n node.width = .1\n node.geom = 'MyFoot1'\n\n node = mcfg.getNode('LeftFoot')\n node.length = .25\n # node.length = .27\n # node.offset = (0,0,0.01)\n node.width = .1\n node.geom = 'MyFoot1'\n\n def capsulize(node_name):\n node = mcfg.getNode(node_name)\n node.geom = 'MyFoot4'\n node.width = 0.01\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0., math.pi/4., 0.])], ypc.CapsuleMaterial(1000., .02, .2))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0., math.pi/4., 0.])], ypc.CapsuleMaterial(1000., .02, .1))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0., 0., 0.])], ypc.CapsuleMaterial(1000., .01, -1))\n # node.addGeom('MyFoot4', None, ypc.CapsuleMaterial(1000., .02, .1))\n\n # capsulize('RightFoot')\n # capsulize('LeftFoot')\n\n node = mcfg.getNode('RightFoot')\n node.density = 200.\n node.geom = 'MyFoot5'\n node.width = 0.01\n # node.jointType = 'U'\n\n node = mcfg.getNode('LeftFoot')\n node.density = 200.\n node.geom = 'MyFoot5'\n node.width = 0.01\n # node.jointType = 'U'\n\n # bird foot\n # capsulize('RightFoot_foot_0_0')\n # capsulize('RightFoot_foot_0_1')\n # capsulize('RightFoot_foot_1_0')\n # capsulize('RightFoot_foot_1_1')\n # capsulize('RightFoot_foot_2_0')\n # capsulize('RightFoot_foot_2_1')\n # capsulize('LeftFoot_foot_0_0')\n # capsulize('LeftFoot_foot_0_1')\n # capsulize('LeftFoot_foot_1_0')\n # capsulize('LeftFoot_foot_1_1')\n # capsulize('LeftFoot_foot_2_0')\n # capsulize('LeftFoot_foot_2_1')\n\n\n # human foot\n capsulize('RightFoot_foot_0_0')\n node = mcfg.getNode('RightFoot_foot_0_0')\n node.addGeom('MyFoot3', [0.02*np.array([-0.3, 0., 2.5*0.25]), mm.exp([0., -math.atan2(1.2, 2.5), 0.])], ypc.CapsuleMaterial(400., .01, 0.02*2.5+0.02))\n node.addGeom('MyFoot3', [0.02*np.array([-0.3-1.2, 0., 2.5*0.25]), mm.exp([0., -math.atan2(1.2, 2.5), 0.])], ypc.CapsuleMaterial(400., .01, 0.02*2.5+0.02))\n # node.addGeom('MyFoot4', [0.02*np.array([-1.2, 0., 0.]), mm.exp([0., 0., 0.])], ypc.CapsuleMaterial(1000., .01, -1))\n node.jointType = 'R'\n\n capsulize('RightFoot_foot_0_0_0')\n node = mcfg.getNode('RightFoot_foot_0_0_0')\n node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.addGeom('MyFoot4', [0.02*np.array([-1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.jointType = 'R'\n\n capsulize('RightFoot_foot_0_1')\n node = mcfg.getNode('RightFoot_foot_0_1')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.addGeom('MyFoot3', [0.02*np.array([1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(400.,.01, -1))\n node.jointType = 'R'\n\n capsulize('RightFoot_foot_0_1_0')\n node = mcfg.getNode('RightFoot_foot_0_1_0')\n node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.addGeom('MyFoot4', [0.02*np.array([1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.jointType = 'R'\n\n capsulize('RightFoot_foot_1_0')\n node = mcfg.getNode('RightFoot_foot_1_0')\n node.addGeom('MyFoot3', [0.02*np.array([0., 0., .7]), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, 0.02*2.0+0.02))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(1000., .01, -1))\n node.jointType = 'R'\n\n capsulize('RightFoot_foot_1_1')\n node = mcfg.getNode('RightFoot_foot_1_1')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.jointType = 'R'\n\n capsulize('RightFoot_foot_1_2')\n node = mcfg.getNode('RightFoot_foot_1_2')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.jointType = 'R'\n\n\n capsulize('LeftFoot_foot_0_0')\n node = mcfg.getNode('LeftFoot_foot_0_0')\n node.addGeom('MyFoot3', [0.02*np.array([0.3, 0., 2.5*0.25]), mm.exp([0., math.atan2(1.2, 2.5), 0.])], ypc.CapsuleMaterial(400., .01, 0.02*2.5+0.02))\n node.addGeom('MyFoot3', [0.02*np.array([0.3+1.2, 0., 2.5*0.25]), mm.exp([0., math.atan2(1.2, 2.5), 0.])], ypc.CapsuleMaterial(400., .01, 0.02*2.5+0.02))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(1000., .01, -1))\n node.jointType = 'R'\n\n capsulize('LeftFoot_foot_0_0_0')\n node = mcfg.getNode('LeftFoot_foot_0_0_0')\n node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.addGeom('MyFoot4', [0.02*np.array([1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.jointType = 'R'\n\n capsulize('LeftFoot_foot_0_1')\n node = mcfg.getNode('LeftFoot_foot_0_1')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.addGeom('MyFoot3', [0.02*np.array([-1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.jointType = 'R'\n\n capsulize('LeftFoot_foot_0_1_0')\n node = mcfg.getNode('LeftFoot_foot_0_1_0')\n node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.addGeom('MyFoot4', [0.02*np.array([-1.2, 0., 0.]), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.jointType = 'R'\n\n capsulize('LeftFoot_foot_1_0')\n node = mcfg.getNode('LeftFoot_foot_1_0')\n node.addGeom('MyFoot3', [0.02*np.array([0., 0., .7]), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, 0.02*2.0+0.02))\n # node.addGeom('MyFoot4', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(1000., .01, -1))\n node.jointType = 'R'\n\n capsulize('LeftFoot_foot_1_1')\n node = mcfg.getNode('LeftFoot_foot_1_1')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.jointType = 'R'\n\n capsulize('LeftFoot_foot_1_2')\n node = mcfg.getNode('LeftFoot_foot_1_2')\n node.addGeom('MyFoot3', [np.array([0.]*3), mm.exp([0.]*3)], ypc.CapsuleMaterial(400., .01, -1))\n node.jointType = 'R'\n\n\n return mcfg\n\n\ndef walkings():\n class ForceInfo:\n def __init__(self, startFrame, duration, force):\n self.startFrame = startFrame # frame\n self.duration = duration # sec\n self.force = force # Newton\n self.targetBody = None\n\n #===============================================================================\n # load motion\n #===============================================================================\n MULTI_VIEWER = False\n CAMERA_TRACKING = False\n TORQUE_PLOT = False\n NO_FOOT_SLIDING = True\n\n # global parameters\n Kt = 20.\n Dt = 2.*(Kt**.5)\n # Dt = Kt/900.\n Ks = 2000.\n Ds = 2.*(Ks**.5)\n mu = 1.\n # Dt = 0.\n\n # constaants\n c_min_contact_vel = 100.\n # c_min_contact_vel = 2.\n c_min_contact_time = .7\n c_landing_duration = .2\n c_taking_duration = .3\n c_swf_mid_offset = .0\n c_locking_vel = .05\n\n # c_swf_offset = .0\n c_swf_offset = .01\n # c_swf_offset = .005\n K_stp_pos = 0.\n\n # c5 = .5; c6 = .01\n c5 = .5; c6 = .02\n # c5 = .5; c6 = .05\n # c5 = 1.; c6 = .05\n # c5 = .0; c6 = .0\n\n K_stb_vel = .1\n K_stb_pos = .1\n\n OLD_SWING_HEIGHT = False\n # OLD_SWING_HEIGHT = True\n HIGHER_OFFSET = True\n # HIGHER_OFFSET = False\n\n dir = current_path+'/ppmotion/'\n # dir = './ppmotion/'\n #\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = .3; K_swp_pos_cor = 0.\n # K_swp_vel_sag = .05; K_swp_vel_cor = .2; K_swp_pos_sag = .2; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n # filename = 'wd2_WalkSameSame01.bvh'\n ## filename = 'wd2_WalkSameSame01_REPEATED.bvh'\n\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = .3; K_swp_pos_cor = 0.\n # K_swp_vel_sag = .05; K_swp_vel_cor = .25; K_swp_pos_sag = .5; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n # filename = 'wd2_WalkForwardSlow01.bvh'\n ## filename = 'wd2_WalkForwardSlow01_REPEATED.bvh' # 3 frame diff\n\n # K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = 1.; K_swp_pos_cor = 0.\n # K_stp_pos = .6\n K_swp_vel_sag = .0; K_swp_vel_cor = .3; K_swp_pos_sag = 1.2; K_swp_pos_cor = .2\n K_swp_pos_sag_faster = .05\n filename = 'wd2_WalkForwardNormal00.bvh'\n # filename = 'wd2_WalkForwardNormal00_REPEATED.bvh'\n\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = .3; K_swp_pos_cor = 0.\n ## K_stp_pos = 0.\n # K_swp_vel_sag = .05; K_swp_vel_cor = .2; K_swp_pos_sag = .3; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n ## filename = 'wd2_WalkHandWav00.bvh'\n # filename = 'wd2_WalkHandWav00_REPEATED.bvh'\n\n # mu = 2.\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = .3; K_swp_pos_cor = 0.\n ## K_stp_pos = 0.\n # K_swp_vel_sag = .0; K_swp_vel_cor = .3; K_swp_pos_sag = .2; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .0\n ## filename = 'wd2_WalkAzuma01.bvh'\n # filename = 'wd2_WalkAzuma01_REPEATED.bvh' # 2 frame diff\n\n ## K_swp_vel_sag = .1; K_swp_vel_cor = .4; K_swp_pos_sag = 1.; K_swp_pos_cor = 0.\n ## K_stp_pos = 0.\n # K_swp_vel_sag = .0; K_swp_vel_cor = .3; K_swp_pos_sag = .2; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n ## filename = 'wd2_WalkSoldier00.bvh' # K_swp_pos_sag = .0\n # filename = 'wd2_WalkSoldier00_REPEATED.bvh'\n\n # mu = 2.\n ## K_swp_vel_sag = .2; K_swp_vel_cor = .4; K_swp_pos_sag = .5;K_swp_pos_cor = 0.\n ## K_stp_pos = 0.\n # K_swp_vel_sag = .05; K_swp_vel_cor = .3; K_swp_pos_sag = .5; K_swp_pos_cor = .2\n # K_swp_pos_sag_faster = .05\n ## filename = 'wd2_WalkForwardVFast00.bvh'\n # filename = 'wd2_WalkForwardVFast00_REPEATED.bvh'\n\n ## K_swp_vel_sag = .0; K_swp_vel_cor = .4; K_swp_pos_sag = .04; K_swp_pos_cor = .1\n ## K_swp_pos_sag_faster = .02\n ## K_stb_vel = .2\n # K_swp_vel_sag = .1; K_swp_vel_cor = .3; K_swp_pos_sag = 1.; K_swp_pos_cor = .3\n # K_swp_pos_sag_faster = .0\n # K_stb_vel = .3\n ## filename = 'wd2_WalkBackward00.bvh'\n # filename = 'wd2_WalkBackward00_REPEATED.bvh'\n\n # motion\n #TODO:\n bvh = yf.readBvhFileAsBvh(dir+filename)\n # motion_ori = bvh.toJointMotion(1.0, False)\n\n # partBvhFilePath = '../PyCommon/modules/samples/simpleJump_long_test2.bvh'\n partBvhFilePath = current_path+'/../PyCommon/modules/samples/simpleJump_long_test2.bvh'\n partBvh = yf.readBvhFileAsBvh(partBvhFilePath)\n bvh.replaceJointFromBvh('RightFoot', partBvh, .02)\n partBvh = yf.readBvhFileAsBvh(partBvhFilePath)\n partBvh.mirror('YZ')\n bvh.replaceJointFromBvh('LeftFoot', partBvh, .02)\n\n motion_ori = bvh.toJointMotion(1., False)\n\n # motion_ori = yf.readBvhFile(dir+filename)\n frameTime = 1/motion_ori.fps\n\n if 'REPEATED' in filename:\n REPEATED = True\n CAMERA_TRACKING = True\n else:\n REPEATED = False\n\n\n #===============================================================================\n # options\n #===============================================================================\n SEGMENT_EDITING = True\n STANCE_FOOT_STABILIZE = True\n MATCH_STANCE_LEG = True\n SWING_FOOT_PLACEMENT = True\n SWING_FOOT_HEIGHT = True\n\n SWING_FOOT_ORIENTATION = False\n\n STANCE_FOOT_PUSH = True\n STANCE_FOOT_BALANCING = True\n\n stitch_func = lambda x : 1. - yfg.hermite2nd(x)\n stf_stabilize_func = yfg.concatenate([yfg.hermite2nd, yfg.one], [c_landing_duration])\n match_stl_func = yfg.hermite2nd\n swf_placement_func = yfg.hermite2nd\n swf_height_func = yfg.hermite2nd\n swf_height_sine_func = yfg.sine\n # stf_balancing_func = yfg.concatenate([yfg.hermite2nd, yfg.one], [c_landing_duration])\n stf_balancing_func = yfg.hermite2nd\n\n # forceInfos = [ForceInfo(70, .4, (100,0,0))]\n forceInfos = []\n\n #===============================================================================\n # initialize character\n #===============================================================================\n # mcfgfile = open(dir + 'mcfg', 'r')\n # mcfg = cPickle.load(mcfgfile)\n # mcfgfile.close()\n\n mcfg = buildMcfg()\n\n wcfg = ypc.WorldConfig()\n wcfg.planeHeight = 0.\n wcfg.useDefaultContactModel = False\n wcfg.lockingVel = c_locking_vel\n stepsPerFrame = 30\n wcfg.timeStep = (frameTime)/stepsPerFrame\n\n vpWorld = pcvw.VpWorld(wcfg)\n motionModel = pcvm.VpMotionModel(vpWorld, motion_ori[0], mcfg)\n # ModelOffset = np.array([0., 0., 0.])\n # motionModel.translateByOffset(ModelOffset)\n controlModel = pcvm.VpControlModel(vpWorld, motion_ori[0], mcfg)\n vpWorld.SetIntegrator(\"IMPLICIT_EULER_FAST\")\n vpWorld.initialize()\n print(controlModel)\n # controlModel = None\n\n\n # motionModel.recordVelByFiniteDiff()\n controlModel.initializeHybridDynamics()\n # controlModel.initializeForwardDynamics()\n\n #===============================================================================\n # load segment info\n #===============================================================================\n skeleton = motion_ori[0].skeleton\n\n segname = os.path.splitext(filename)[0]+'.seg'\n segfile = open(dir+segname, 'r')\n seginfo = cPickle.load(segfile)\n segfile.close()\n\n for i in seginfo:\n print(i)\n\n intervals = [info['interval'] for info in seginfo]\n states = [info['state'] for info in seginfo]\n temp_motion = copy.deepcopy(motion_ori)\n segments = yma.splitMotionIntoSegments(temp_motion, intervals)\n print(len(intervals), 'segments')\n for i in range(len(intervals)):\n print('%dth'%i, yba.GaitState.text[states[i]], intervals[i], ',',)\n print()\n\n motion_seg_orig = ym.JointMotion()\n motion_seg_orig += segments[0]\n motion_seg = ym.JointMotion()\n motion_seg += segments[0]\n motion_stitch = ym.JointMotion()\n motion_stitch += segments[0]\n\n motion_stf_stabilize = ym.JointMotion()\n motion_match_stl = ym.JointMotion()\n motion_swf_placement = ym.JointMotion()\n motion_swf_height = ym.JointMotion()\n motion_swf_orientation = ym.JointMotion()\n motion_stf_balancing = ym.JointMotion()\n motion_stf_push = ym.JointMotion()\n motion_control = ym.JointMotion()\n\n motion_debug1 = ym.JointMotion()\n motion_debug2 = ym.JointMotion()\n motion_debug3 = ym.JointMotion()\n\n P = ym.JointMotion()\n P_hat = ym.JointMotion()\n M_tc = ym.JointMotion()\n M_hat_tc_1 = ym.JointMotion()\n\n #===============================================================================\n # loop variable\n #===============================================================================\n seg_index = [0]\n acc_offset = [0]\n extended = [False]\n prev_R_swp = [None]\n stl_y_limit_num = [0]\n stl_xz_limit_num = [0]\n avg_dCM = [mm.O_Vec3()]\n # avg_stf_v = [mm.O_Vec3()]\n # avg_stf_av = [mm.O_Vec3()]\n\n # stf_push_func = [yfg.zero]\n step_length_cur = [0.]\n\n step_length_tar = [0.]\n step_axis = [mm.O_Vec3()]\n\n #===============================================================================\n # information\n #===============================================================================\n bodyIDsToCheck = range(vpWorld.getBodyNum())\n mus = [mu]*len(bodyIDsToCheck)\n\n bodyMasses = controlModel.getBodyMasses()\n totalMass = controlModel.getTotalMass()\n\n # hwangpil\n #extendedFootName = ['Foot_foot_0_0', 'Foot_foot_0_1', 'Foot_foot_1_0',\n # 'Foot_foot_1_1', 'Foot_foot_2_0', 'Foot_foot_2_1']\n\n extendedFootName = ['Foot_foot_0_0', 'Foot_foot_0_1', 'Foot_foot_0_0_0', 'Foot_foot_0_1_0', 'Foot_foot_1_0',\n 'Foot_foot_1_1', 'Foot_foot_1_2']\n\n # extendedFootName = ['Foot_foot_0_1', 'Foot_foot_1_1', 'Foot_foot_2_1']\n\n\n ToeName = ['Foot_foot_0_0_0', 'Foot_foot_0_1_0']\n HeelName = ['Foot_foot_1_0', 'Foot_foot_1_1', 'Foot_foot_1_2']\n\n lIDs = [skeleton.getJointIndex('Left'+name) for name in extendedFootName]\n rIDs = [skeleton.getJointIndex('Right'+name) for name in extendedFootName]\n\n lToes = [skeleton.getJointIndex('Left'+name) for name in ToeName]\n rToes = [skeleton.getJointIndex('Right'+name) for name in ToeName]\n\n lHeels = [skeleton.getJointIndex('Left'+name) for name in HeelName]\n rHeels = [skeleton.getJointIndex('Right'+name) for name in HeelName]\n\n # for i in lIDs+rIDs:\n # controlModel.setHybridDynamics(i, \"DYNAMIC\")\n\n # each dof is whether KINEMATIC or not\n hdAccMask = [True]*controlModel.getTotalDOF()\n hdAccMask[:6] = [False]*6\n # for i in lIDs+rIDs:\n # hdAccMask[3+3*i : 6+3*i] = [False]*3\n\n\n lID = controlModel.name2id('LeftFoot'); rID = controlModel.name2id('RightFoot')\n lUpLeg = skeleton.getJointIndex('LeftUpLeg');rUpLeg = skeleton.getJointIndex('RightUpLeg')\n lKnee = skeleton.getJointIndex('LeftLeg'); rKnee = skeleton.getJointIndex('RightLeg')\n lFoot = skeleton.getJointIndex('LeftFoot'); rFoot = skeleton.getJointIndex('RightFoot')\n spine = skeleton.getJointIndex('Spine')\n\n uppers = [skeleton.getJointIndex(name) for name in ['Hips', 'Spine', 'Spine1', 'LeftArm', 'LeftForeArm', 'RightArm', 'RightForeArm']]\n upperMass = sum([bodyMasses[i] for i in uppers])\n lLegs = [skeleton.getJointIndex(name) for name in ['LeftUpLeg', 'LeftLeg', 'LeftFoot']]\n rLegs = [skeleton.getJointIndex(name) for name in ['RightUpLeg', 'RightLeg', 'RightFoot']]\n allJoints = set(range(skeleton.getJointNum()))\n\n\n footMass = sum([bodyMasses[i] for i in lIDs]) + bodyMasses[lID]\n HeelMass = sum([bodyMasses[i] for i in lHeels])\n ToeMass = sum([bodyMasses[i] for i in lToes])\n print('totalMass: ', totalMass)\n print('footMass: ', footMass)\n print('heelmass: ', HeelMass)\n print('ToeMass: ', ToeMass)\n\n halfFootHeight = controlModel.getBodyShape(lFoot)[1] / 2.\n # halfFootHeight = 0.05\n\n for fi in forceInfos:\n fi.targetBody = spine\n\n #===========================================================================\n # data collection\n #===========================================================================\n rhip_torques = []\n rknee_torques = []\n rankle_torques = []\n rankle_torques = []\n\n #===============================================================================\n # rendering\n #===============================================================================\n rd_CM = [None]; rd_CP = [None]; rd_CMP = [None]\n rd_forces = [None]; rd_force_points = [None]\n rd_torques = []; rd_joint_positions = []\n\n rd_point1 = [None]\n rd_point2 = [None]\n rd_vec1 = [None]; rd_vecori1 = [None]\n rd_vec2 = [None]; rd_vecori2 = [None]\n rd_frame1 = [None]\n rd_frame2 = [None]\n\n if MULTI_VIEWER:\n viewer = ymv.MultiViewer(800, 655)\n # viewer = ymv.MultiViewer(800, 655, True)\n viewer.setRenderers1([yr.VpModelRenderer(motionModel, MOTION_COLOR, yr.POLYGON_FILL)])\n viewer.setRenderers2([yr.VpModelRenderer(controlModel, CHARACTER_COLOR, yr.POLYGON_FILL)])\n else:\n viewer = ysv.SimpleViewer()\n # viewer = hsv.hpSimpleViewer()\n # viewer.record(False)\n\n viewer.doc.addRenderer('motionModel', yr.VpModelRenderer(motionModel, (0,150,255), yr.POLYGON_LINE))\n viewer.doc.addRenderer('controlModel', yr.VpModelRenderer(controlModel, (50,200,200), yr.POLYGON_FILL))\n\n # viewer.doc.addObject('motion_ori', motion_ori)\n # viewer.doc.addRenderer('motion_ori', yr.JointMotionRenderer(motion_ori, (0,100,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_seg_orig', yr.JointMotionRenderer(motion_seg_orig, (0,100,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_seg', yr.JointMotionRenderer(motion_seg, (0,150,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_stitch', yr.JointMotionRenderer(motion_stitch, (0,255,200), yr.LINK_BONE))\n\n viewer.doc.addRenderer('motion_stf_stabilize', yr.JointMotionRenderer(motion_stf_stabilize, (255,0,0), yr.LINK_BONE))\n viewer.doc.addRenderer('motion_match_stl', yr.JointMotionRenderer(motion_match_stl, (255,200,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_swf_placement', yr.JointMotionRenderer(motion_swf_placement, (255,100,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_swf_height', yr.JointMotionRenderer(motion_swf_height, (50,255,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_swf_orientation', yr.JointMotionRenderer(motion_swf_orientation, (255,100,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_stf_push', yr.JointMotionRenderer(motion_stf_push, (50,255,200), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_stf_balancing', yr.JointMotionRenderer(motion_stf_balancing, (255,100,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_control', yr.JointMotionRenderer(motion_control, (255,0,0), yr.LINK_BONE))\n\n # viewer.doc.addRenderer('motion_debug1', yr.JointMotionRenderer(motion_debug1, (0,255,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_debug2', yr.JointMotionRenderer(motion_debug2, (255,0,255), yr.LINK_BONE))\n # viewer.doc.addRenderer('motion_debug3', yr.JointMotionRenderer(motion_debug3, (255,255,0), yr.LINK_BONE))\n\n # viewer.doc.addRenderer('M_tc', yr.JointMotionRenderer(M_tc, (255,255,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('P_hat', yr.JointMotionRenderer(P_hat, (255,255,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('P', yr.JointMotionRenderer(P, (255,255,0), yr.LINK_BONE))\n # viewer.doc.addRenderer('M_hat_tc_1', yr.JointMotionRenderer(M_hat_tc_1, (255,255,0), yr.LINK_BONE))\n\n # viewer.doc.addRenderer('rd_CM', yr.PointsRenderer(rd_CM, (255,255,0)))\n # viewer.doc.addRenderer('rd_CP', yr.PointsRenderer(rd_CP, (255,0,0)))\n # viewer.doc.addRenderer('rd_CMP', yr.PointsRenderer(rd_CMP, (0,255,0)))\n # viewer.doc.addRenderer('forces', yr.ForcesRenderer(rd_forces, rd_force_points, (255,0,0), ratio=.01, fromPoint=False))\n # viewer.doc.addRenderer('torques', yr.VectorsRenderer(rd_torques, rd_joint_positions, (255,0,0)))\n\n # viewer.doc.addRenderer('rd_point1', yr.PointsRenderer(rd_point1, (0,255,0)))\n viewer.doc.addRenderer('rd_point2', yr.PointsRenderer(rd_point2, (255,0,0)))\n # viewer.doc.addRenderer('rd_vec1', yr.VectorsRenderer(rd_vec1, rd_vecori1, (255,0,0)))\n # viewer.doc.addRenderer('rd_vec2', yr.VectorsRenderer(rd_vec2, rd_vecori2, (0,255,0)))\n # viewer.doc.addRenderer('rd_frame1', yr.FramesRenderer(rd_frame1, (0,200,200)))\n # viewer.doc.addRenderer('rd_frame2', yr.FramesRenderer(rd_frame2, (200,200,0)))\n # viewer.setMaxFrame(len(motion_ori)-1)\n\n if not REPEATED:\n viewer.setMaxFrame(len(motion_ori)-1)\n else:\n viewer.setMaxFrame(1000)\n\n if CAMERA_TRACKING:\n if MULTI_VIEWER:\n cameraTargets1 = [None] * (viewer.getMaxFrame()+1)\n cameraTargets2 = [None] * (viewer.getMaxFrame()+1)\n else:\n cameraTargets = [None] * (viewer.getMaxFrame()+1)\n\n if TORQUE_PLOT:\n rhip_torques = [0.]*viewer.getMaxFrame()\n rknee_torques = [0.]*viewer.getMaxFrame()\n rankle_torques = [0.]*viewer.getMaxFrame()\n\n # pt = [0.]\n def postFrameCallback_Always(frame):\n # if frame==1: pt[0] = time.time()\n # if frame==31: print 'elapsed time for 30 frames:', time.time()-pt[0]\n if CAMERA_TRACKING:\n if MULTI_VIEWER:\n if cameraTargets1[frame] is None:\n cameraTargets1[frame] = motionModel.getBodyPositionGlobal(0)\n # cameraTargets1[frame] = motion_ori[frame].getJointPositionGlobal(0)\n viewer.setCameraTarget1(cameraTargets1[frame])\n\n if cameraTargets2[frame] is None:\n cameraTargets2[frame] = controlModel.getJointPositionGlobal(0)\n viewer.setCameraTarget2(cameraTargets2[frame])\n\n else:\n if cameraTargets[frame] is None:\n cameraTargets[frame] = controlModel.getJointPositionGlobal(0)\n viewer.setCameraTarget(cameraTargets[frame])\n if plot is not None:\n plot.updateVline(frame)\n viewer.setPostFrameCallback_Always(postFrameCallback_Always)\n\n plot = None\n # plot = ymp.InteractivePlot()\n if plot is not None:\n plot.setXlimit(0, len(motion_ori))\n plot.setYlimit(-0.05, .05)\n plot.addDataSet('zero')\n plot.addDataSet('diff')\n plot.addDataSet('debug1')\n plot.addDataSet('debug2')\n\n\n def viewer_onClose(data):\n if plot!=None:\n plot.close()\n viewer.onClose(data)\n viewer.callback(viewer_onClose)\n\n def simulateCallback(frame):\n # seginfo\n segIndex = seg_index[0]\n curState = seginfo[segIndex]['state']\n curInterval = yma.offsetInterval(acc_offset[0], seginfo[segIndex]['interval'])\n stanceLegs = seginfo[segIndex]['stanceHips']\n swingLegs = seginfo[segIndex]['swingHips']\n stanceFoots = seginfo[segIndex]['stanceFoots']\n swingFoots = seginfo[segIndex]['swingFoots']\n swingKnees = seginfo[segIndex]['swingKnees']\n groundHeight = seginfo[segIndex]['ground_height']\n maxStfPushFrame = seginfo[segIndex]['max_stf_push_frame']\n\n # hwangpil\n # temporary change\n for legList in (stanceLegs, swingLegs):\n for i in range(len(legList)):\n if legList[i] == 10:\n legList[i] = skeleton.getJointIndex('RightUpLeg')\n\n for footList in (stanceFoots, swingFoots):\n for i in range(len(footList)):\n if footList[i] == 12:\n footList[i] = skeleton.getJointIndex('RightFoot')\n\n stanceToes = []\n if skeleton.getJointIndex('LeftFoot') in stanceFoots:\n stanceToes.extend(lToes)\n if skeleton.getJointIndex('RightFoot') in stanceFoots:\n stanceToes.extend(rToes)\n\n swingHeels = []\n if skeleton.getJointIndex('LeftFoot') in swingFoots:\n swingHeels.extend(lHeels)\n if skeleton.getJointIndex('RightFoot') in swingFoots:\n swingHeels.extend(rHeels)\n\n\n\n\n prev_frame = frame-1 if frame>0 else 0\n # prev_frame = frame\n\n # information\n # dCM_tar = yrp.getCM(motion_seg.getJointVelocitiesGlobal(frame), bodyMasses, upperMass, uppers)\n # CM_tar = yrp.getCM(motion_seg.getJointPositionsGlobal(frame), bodyMasses, upperMass, uppers)\n ## dCM_tar = yrp.getCM(motion_seg.getJointVelocitiesGlobal(frame), bodyMasses, totalMass)\n ## CM_tar = yrp.getCM(motion_seg.getJointPositionsGlobal(frame), bodyMasses, totalMass)\n # stf_tar = motion_seg.getJointPositionGlobal(stanceFoots[0], frame)\n # CMr_tar = CM_tar - stf_tar\n\n dCM_tar = motion_seg.getJointVelocityGlobal(0, prev_frame)\n CM_tar = motion_seg.getJointPositionGlobal(0, prev_frame)\n # dCM_tar = yrp.getCM(motion_seg.getJointVelocitiesGlobal(prev_frame), bodyMasses, upperMass, uppers)\n # CM_tar = yrp.getCM(motion_seg.getJointPositionsGlobal(prev_frame), bodyMasses, upperMass, uppers)\n # dCM_tar = yrp.getCM(motion_seg.getJointVelocitiesGlobal(prev_frame), bodyMasses, totalMass)\n # CM_tar = yrp.getCM(motion_seg.getJointPositionsGlobal(prev_frame), bodyMasses, totalMass)\n stf_tar = motion_seg.getJointPositionGlobal(stanceFoots[0], prev_frame)\n CMr_tar = CM_tar - stf_tar\n\n # dCM : average velocity of root of controlModel over 1 frame\n dCM = avg_dCM[0]\n CM = controlModel.getJointPositionGlobal(0)\n # CM = yrp.getCM(controlModel.getJointPositionsGlobal(), bodyMasses, upperMass, uppers)\n # CM = yrp.getCM(controlModel.getJointPositionsGlobal(), bodyMasses, totalMass)\n CMreal = yrp.getCM(controlModel.getJointPositionsGlobal(), bodyMasses, totalMass)\n stf = controlModel.getJointPositionGlobal(stanceFoots[0])\n CMr = CM - stf\n\n diff_dCM = mm.projectionOnPlane(dCM-dCM_tar, (1,0,0), (0,0,1))\n diff_dCM_axis = np.cross((0,1,0), diff_dCM)\n rd_vec1[0] = diff_dCM; rd_vecori1[0] = CM_tar\n\n diff_CMr = mm.projectionOnPlane(CMr-CMr_tar, (1,0,0), (0,0,1))\n # rd_vec1[0] = diff_CMr; rd_vecori1[0] = stf_tar\n diff_CMr_axis = np.cross((0,1,0), diff_CMr)\n\n direction = mm.normalize2(mm.projectionOnPlane(dCM_tar, (1,0,0), (0,0,1)))\n # direction = mm.normalize2(mm.projectionOnPlane(dCM, (1,0,0), (0,0,1)))\n directionAxis = np.cross((0,1,0), direction)\n\n diff_dCM_sag, diff_dCM_cor = mm.projectionOnVector2(diff_dCM, direction)\n # rd_vec1[0] = diff_dCM_sag; rd_vecori1[0] = CM_tar\n diff_dCM_sag_axis = np.cross((0,1,0), diff_dCM_sag)\n diff_dCM_cor_axis = np.cross((0,1,0), diff_dCM_cor)\n\n diff_CMr_sag, diff_CMr_cor = mm.projectionOnVector2(diff_CMr, direction)\n diff_CMr_sag_axis = np.cross((0,1,0), diff_CMr_sag)\n diff_CMr_cor_axis = np.cross((0,1,0), diff_CMr_cor)\n\n t = (frame-curInterval[0])/float(curInterval[1]-curInterval[0])\n t_raw = t\n if t>1.: t=1.\n\n\n p_root = motion_stitch[frame].getJointPositionGlobal(0)\n R_root = motion_stitch[frame].getJointOrientationGlobal(0)\n\n motion_seg_orig.goToFrame(frame)\n motion_seg.goToFrame(frame)\n motion_stitch.goToFrame(frame)\n\n motion_debug1.append(motion_stitch[frame].copy())\n motion_debug1.goToFrame(frame)\n motion_debug2.append(motion_stitch[frame].copy())\n motion_debug2.goToFrame(frame)\n motion_debug3.append(motion_stitch[frame].copy())\n motion_debug3.goToFrame(frame)\n\n # paper implementation\n M_tc.append(motion_stitch[prev_frame])\n M_tc.goToFrame(frame)\n P_hat.append(M_tc[frame].copy())\n P_hat.goToFrame(frame)\n\n p_temp = ym.JointPosture(skeleton)\n p_temp.rootPos = controlModel.getJointPositionGlobal(0)\n p_temp.setJointOrientationsLocal(controlModel.getJointOrientationsLocal())\n P.append(p_temp)\n P.goToFrame(frame)\n\n # stance foot stabilize\n motion_stf_stabilize.append(motion_stitch[frame].copy())\n motion_stf_stabilize.goToFrame(frame)\n if STANCE_FOOT_STABILIZE:\n for stanceFoot in stanceFoots:\n R_target_foot = motion_seg[frame].getJointOrientationGlobal(stanceFoot)\n R_current_foot = motion_stf_stabilize[frame].getJointOrientationGlobal(stanceFoot)\n motion_stf_stabilize[frame].setJointOrientationGlobal(stanceFoot, cm.slerp(R_current_foot, R_target_foot , stf_stabilize_func(t)))\n # R_target_foot = motion_seg[frame].getJointOrientationLocal(stanceFoot)\n # R_current_foot = motion_stf_stabilize[frame].getJointOrientationLocal(stanceFoot)\n # motion_stf_stabilize[frame].setJointOrientationLocal(stanceFoot, cm.slerp(R_current_foot, R_target_foot , stf_stabilize_func(t)))\n\n # match stance leg\n motion_match_stl.append(motion_stf_stabilize[frame].copy())\n motion_match_stl.goToFrame(frame)\n if MATCH_STANCE_LEG:\n if curState!=yba.GaitState.STOP:\n for i in range(len(stanceLegs)):\n stanceLeg = stanceLegs[i]\n stanceFoot = stanceFoots[i]\n\n # # motion stance leg -> character stance leg as time goes\n R_motion = motion_match_stl[frame].getJointOrientationGlobal(stanceLeg)\n R_character = controlModel.getJointOrientationGlobal(stanceLeg)\n motion_match_stl[frame].setJointOrientationGlobal(stanceLeg, cm.slerp(R_motion, R_character, match_stl_func(t)))\n\n # t_y = match_stl_func_y(t)\n # t_xz = match_stl_func(t)\n #\n # R_motion = motion_match_stl[frame].getJointOrientationGlobal(stanceLeg)\n # R_character = controlModel.getJointOrientationGlobal(stanceLeg)\n # R = np.dot(R_character, R_motion.T)\n # R_y, R_xz = mm.projectRotation((0,1,0), R)\n # motion_match_stl[frame].mulJointOrientationGlobal(stanceLeg, mm.scaleSO3(R_xz, t_xz))\n # motion_match_stl[frame].mulJointOrientationGlobal(stanceLeg, mm.scaleSO3(R_y, t_y))\n\n # swing foot placement\n motion_swf_placement.append(motion_match_stl[frame].copy())\n motion_swf_placement.goToFrame(frame)\n if SWING_FOOT_PLACEMENT:\n t_swing_foot_placement = swf_placement_func(t)\n\n if extended[0]:\n R_swp_sag = prev_R_swp[0][0]\n R_swp_cor = prev_R_swp[0][1]\n else:\n R_swp_sag = mm.I_SO3(); R_swp_cor = mm.I_SO3()\n R_swp_sag = np.dot(R_swp_sag, mm.exp(diff_dCM_sag_axis * K_swp_vel_sag * -t_swing_foot_placement))\n R_swp_cor = np.dot(R_swp_cor, mm.exp(diff_dCM_cor_axis * K_swp_vel_cor * -t_swing_foot_placement))\n if np.dot(direction, diff_CMr_sag) < 0:\n R_swp_sag = np.dot(R_swp_sag, mm.exp(diff_CMr_sag_axis * K_swp_pos_sag * -t_swing_foot_placement))\n else:\n R_swp_sag = np.dot(R_swp_sag, mm.exp(diff_CMr_sag_axis * K_swp_pos_sag_faster * -t_swing_foot_placement))\n R_swp_cor = np.dot(R_swp_cor, mm.exp(diff_CMr_cor_axis * K_swp_pos_cor * -t_swing_foot_placement))\n\n for i in range(len(swingLegs)):\n swingLeg = swingLegs[i]\n swingFoot = swingFoots[i]\n\n # save swing foot global orientation\n # R_swf = motion_swf_placement[frame].getJointOrientationGlobal(swingFoot)\n\n # rotate swing leg\n motion_swf_placement[frame].mulJointOrientationGlobal(swingLeg, R_swp_sag)\n motion_swf_placement[frame].mulJointOrientationGlobal(swingLeg, R_swp_cor)\n\n # restore swing foot global orientation\n # motion_swf_placement[frame].setJointOrientationGlobal(swingFoot, R_swf)\n\n # hwangpil\n # temporal code.... for heel strike and ankle pushup\n # motion_swf_placement[frame].mulJointOrientationGlobal(swingFoot, mm.exp([0., 0., -0.17*t_swing_foot_placement]))\n # motion_swf_placement[frame].mulJointOrientationGlobal(swingFoot, mm.exp([0.2*t_swing_foot_placement, 0., 0.]))\n\n prev_R_swp[0] = (R_swp_sag, R_swp_cor)\n\n # swing foot height\n motion_swf_height.append(motion_swf_placement[frame].copy())\n # motion_swf_height.append(motion_stitch[frame].copy())\n motion_swf_height.goToFrame(frame)\n if SWING_FOOT_HEIGHT:\n for swingFoot in swingFoots:\n stanceFoot = stanceFoots[0]\n\n # save foot global orientation\n R_foot = motion_swf_height[frame].getJointOrientationGlobal(swingFoot)\n R_stance_foot = motion_swf_height[frame].getJointOrientationGlobal(stanceFoot)\n\n if OLD_SWING_HEIGHT:\n height_tar = motion_swf_height[frame].getJointPositionGlobal(swingFoot)[1] - motion_swf_height[frame].getJointPositionGlobal(stanceFoot)[1]\n else:\n height_tar = motion_swf_height[prev_frame].getJointPositionGlobal(swingFoot)[1] - groundHeight\n d_height_tar = motion_swf_height.getJointVelocityGlobal(swingFoot, prev_frame)[1]\n # motion_debug1[frame] = motion_swf_height[frame].copy()\n\n # rotate\n motion_swf_height[frame].rotateByTarget(controlModel.getJointOrientationGlobal(0))\n # motion_debug2[frame] = motion_swf_height[frame].copy()\n # motion_debug2[frame].translateByTarget(controlModel.getJointPositionGlobal(0))\n\n if OLD_SWING_HEIGHT:\n height_cur = motion_swf_height[frame].getJointPositionGlobal(swingFoot)[1] - motion_swf_height[frame].getJointPositionGlobal(stanceFoot)[1]\n else:\n height_cur = controlModel.getJointPositionGlobal(swingFoot)[1] - halfFootHeight - c_swf_offset\n d_height_cur = controlModel.getJointVelocityGlobal(swingFoot)[1]\n\n if OLD_SWING_HEIGHT:\n offset_height = (height_tar - height_cur) * swf_height_func(t) * c5\n else:\n offset_height = ((height_tar - height_cur) * c5\n + (d_height_tar - d_height_cur) * c6) * swf_height_func(t)\n\n offset_sine = c_swf_mid_offset * swf_height_sine_func(t)\n\n offset = 0.\n offset += offset_height\n offset += offset_sine\n\n if offset > 0.:\n newPosition = motion_swf_height[frame].getJointPositionGlobal(swingFoot)\n newPosition[1] += offset\n aik.ik_analytic(motion_swf_height[frame], swingFoot, newPosition)\n else:\n if HIGHER_OFFSET:\n newPosition = motion_swf_height[frame].getJointPositionGlobal(stanceFoot)\n newPosition[1] -= offset\n aik.ik_analytic(motion_swf_height[frame], stanceFoot, newPosition)\n\n # return\n # motion_debug3[frame] = motion_swf_height[frame].copy()\n # motion_debug3[frame].translateByTarget(controlModel.getJointPositionGlobal(0))\n motion_swf_height[frame].rotateByTarget(R_root)\n\n # restore foot global orientation\n motion_swf_height[frame].setJointOrientationGlobal(swingFoot, R_foot)\n motion_swf_height[frame].setJointOrientationGlobal(stanceFoot, R_stance_foot)\n\n if plot!=None:\n plot.addDataPoint('debug1', frame, offset_height)\n # plot.addDataPoint('debug2', frame, height_cur)\n # plot.addDataPoint('diff', frame, diff)\n\n # stance foot push\n motion_stf_push.append(motion_swf_height[frame].copy())\n # motion_stf_push.append(motion_swf_placement[frame].copy())\n motion_stf_push.goToFrame(frame)\n if STANCE_FOOT_PUSH:\n # TODO:\n # swingFoots?????????????????????????\n for swingFoot in swingFoots:\n # max_t = (maxStfPushFrame)/float(curInterval[1]-curInterval[0])\n # stf_push_func = yfg.concatenate([yfg.sine, yfg.zero], [max_t*2])\n stf_push_func = yfg.concatenate([yfg.sine, yfg.zero], [c_taking_duration*2])\n\n R_swp_sag = mm.I_SO3()\n # R_swp_sag = np.dot(R_swp_sag, mm.exp(diff_dCM_sag_axis * K_stp_vel * -stf_push_func(t)))\n\n # if step_length_cur[0] < step_length_tar[0]:\n # ratio = step_length_cur[0] / step_length_tar[0]\n # R_max = maxmaxStfPushFrame\n # R_zero =\n R_swp_sag = np.dot(R_swp_sag, mm.exp((step_length_tar[0] - step_length_cur[0])*step_axis[0] * K_stp_pos * -stf_push_func(t)))\n\n motion_stf_push[frame].mulJointOrientationGlobal(swingFoot, R_swp_sag)\n\n # stance foot balancing\n motion_stf_balancing.append(motion_stf_push[frame].copy())\n motion_stf_balancing.goToFrame(frame)\n if STANCE_FOOT_BALANCING:\n R_stb = mm.exp(diff_dCM_axis * K_stb_vel * stf_balancing_func(t))\n R_stb = np.dot(R_stb, mm.exp(diff_CMr_axis * K_stb_pos * stf_balancing_func(t)))\n for stanceFoot in stanceFoots:\n if frame < 5: break\n motion_stf_balancing[frame].mulJointOrientationGlobal(stanceFoot, R_stb)\n #TODO:\n # hwangpil\n # swing foot heel strike adjustment\n # make heel as flat as possible to ground\n swf_heel_func = yfg.hermite2nd\n for swingHeel in swingHeels:\n joint_vec_cur = np.dot(controlModel.getJointOrientationGlobal(swingHeel), np.array((0., 0., 1.)))\n joint_vec_tar = copy.deepcopy(joint_vec_cur)\n joint_vec_tar[1] = 0.\n R_target_heel = mm.exp(swf_heel_func(t)*mm.logSO3(mm.getSO3FromVectors(joint_vec_cur, joint_vec_tar)))\n motion_stf_balancing[frame].mulJointOrientationGlobal(swingHeel, R_target_heel)\n\n # stance foot ankle pushup adjustment\n # stf_ankle_func = yfg.hermite2nd\n stf_ankle_func = lambda x: -2*(x**2)+3*(x**3)\n if len(stanceFoots) == 1:\n for stanceFoot in stanceFoots:\n R_target_ankle = mm.exp(stf_ankle_func(t)*mm.deg2Rad(20.)*np.array([1., 0., 0.]))\n motion_stf_balancing[frame].mulJointOrientationLocal(stanceFoot, R_target_ankle)\n #'''\n # stance foot toe adjustment\n # stf_toe_func = yfg.hermite2nd\n stf_toe_func = lambda x: -2*(x**8)+3*(x**9)\n if len(stanceFoots) == 1:\n for stanceToe in stanceToes:\n # joint_vec_cur = np.dot(controlModel.getJointOrientationGlobal(stanceToe), np.array((0., 0., 1.)))\n ## joint_vec_cur = np.dot(motion_stf_balancing[frame].getJointOrientationGlobal(stanceToe), np.array((0., 0., 1.)))\n # joint_vec_tar = copy.deepcopy(joint_vec_cur)\n # joint_vec_tar[1] = 0.\n ## R_target_toe = mm.exp(stf_toe_func(t)*mm.logSO3(mm.getSO3FromVectors(joint_vec_cur, joint_vec_tar)))\n # R_target_toe = mm.getSO3FromVectors(joint_vec_cur, joint_vec_tar)\n # motion_stf_balancing[frame].mulJointOrientationGlobal(stanceToe, R_target_toe)\n R_target_toe = mm.exp(stf_toe_func(t)*mm.deg2Rad(-30.)*np.array([1., 0., 0.]))\n motion_stf_balancing[frame].mulJointOrientationLocal(stanceToe, R_target_toe)\n #'''\n\n\n\n\n # control trajectory\n # motion_control.append(motion_stitch[frame].copy())\n # motion_control.append(motion_swf_height[frame].copy())\n # motion_control.append(motion_match_stl[frame].copy())\n motion_control.append(motion_stf_balancing[frame].copy())\n motion_control.goToFrame(frame)\n\n #=======================================================================\n # tracking with inverse dynamics\n #=======================================================================\n\n weightMap = [1.] * (skeleton.getJointNum())\n\n toeWeights = 0.001\n\n for jointIdx in lIDs:\n weightMap[jointIdx] = toeWeights\n\n for jointIdx in rIDs:\n weightMap[jointIdx] = toeWeights\n\n th_r = motion_control.getDOFPositions(frame)\n th = controlModel.getDOFPositions()\n dth_r = motion_control.getDOFVelocities(frame)\n dth = controlModel.getDOFVelocities()\n ddth_r = motion_control.getDOFAccelerations(frame)\n ddth_des = yct.getDesiredDOFAccelerations(th_r, th, dth_r, dth, ddth_r, Kt, Dt, weightMap)\n\n totalDOF = controlModel.getTotalDOF()\n # ddth_des_flat = ype.makeFlatList(totalDOF)\n ddth_des_flat = ype.makeFlatList(controlModel.get3dExtendTotalDOF())\n ype.flatten(ddth_des, ddth_des_flat)\n\n #=======================================================================\n # simulation\n #=======================================================================\n CP = mm.v3(0.,0.,0.)\n F = mm.v3(0.,0.,0.)\n avg_dCM[0] = mm.v3(0.,0.,0.)\n\n # external force rendering info\n del rd_forces[:]; del rd_force_points[:]\n for fi in forceInfos:\n if fi.startFrame <= frame and frame < fi.startFrame + fi.duration*(1/frameTime):\n rd_forces.append(fi.force)\n rd_force_points.append(controlModel.getBodyPositionGlobal(fi.targetBody))\n contactPositions = None\n\n for i in range(stepsPerFrame):\n if i % 5 == 0:\n # bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)\n bodyIDs, contactPositions, contactPositionLocals, contactForces, timeStamp \\\n = hls.calcLCPForcesHD(motion_ori, vpWorld, controlModel, bodyIDsToCheck, 1., ddth_des_flat, ddth_des_flat, solver='qp', hdAccMask=hdAccMask)\n\n if contactForces is not None:\n lContactNum = sum([sum([j==i for j in bodyIDs]) for i in lIDs])\n rContactNum = sum([sum([j==i for j in bodyIDs]) for i in rIDs])\n if 1 <= lContactNum <= 2:\n lbodyIDbs = [any([j==i for i in lIDs])for j in bodyIDs]\n lbodyIDs = [i for i, x in enumerate(lbodyIDbs) if x]\n for i in reversed(lbodyIDs):\n bodyIDs.pop(i)\n contactPositions.pop(i)\n contactPositionLocals.pop(i)\n contactForces.pop(i)\n\n if 1 <= rContactNum <= 2:\n rbodyIDbs = [any([j==i for i in rIDs])for j in bodyIDs]\n rbodyIDs = [i for i, x in enumerate(rbodyIDbs) if x]\n for i in reversed(rbodyIDs):\n bodyIDs.pop(i)\n contactPositions.pop(i)\n contactPositionLocals.pop(i)\n contactForces.pop(i)\n\n if contactForces is not None:\n vpWorld.applyPenaltyForce(bodyIDs, contactPositionLocals, contactForces)\n\n # print contactForces\n\n # apply external force\n for fi in forceInfos:\n if fi.startFrame <= frame and frame < fi.startFrame + fi.duration*(1/frameTime):\n controlModel.applyBodyForceGlobal(fi.targetBody, fi.force)\n\n for i in rIDs+lIDs:\n controlModel.setJointTorqueLocal(i, ddth_des[i])\n controlModel.setDOFAccelerations(ddth_des)\n controlModel.solveHybridDynamics()\n\n if TORQUE_PLOT:\n rhip_torques[frame] += mm.length(controlModel.getJointTorqueLocal(rUpLeg))\n rknee_torques[frame] += mm.length(controlModel.getJointTorqueLocal(rKnee))\n rankle_torques[frame] += mm.length(controlModel.getJointTorqueLocal(rFoot))\n\n # rd_torques[:] = [controlModel.getJointTorqueLocal(j)/100. for j in range(1, skeleton.getJointNum())]\n rd_joint_positions[:] = controlModel.getJointPositionsGlobal()\n\n vpWorld.step()\n # yvu.align2D(controlModel)\n\n if contactForces is not None and len(contactForces) > 0:\n CP += yrp.getCP(contactPositions, contactForces)\n F += sum(contactForces)\n avg_dCM[0] += controlModel.getJointVelocityGlobal(0)\n # avg_dCM[0] += yrp.getCM(controlModel.getJointVelocitiesGlobal(), bodyMasses, upperMass, uppers)\n # avg_dCM[0] += yrp.getCM(controlModel.getJointVelocitiesGlobal(), bodyMasses, totalMass)\n\n # if len(stanceFoots)>0:\n # avg_stf_v[0] += controlModel.getJointVelocityGlobal(stanceFoots[0])\n # avg_stf_av[0] += controlModel.getJointAngVelocityGlobal(stanceFoots[0])\n\n del rd_point2[:]\n if contactPositions is not None:\n rd_point2.extend(contactPositions)\n\n\n CP /= stepsPerFrame\n F /= stepsPerFrame\n avg_dCM[0] /= stepsPerFrame\n\n # if len(stanceFoots)>0:\n # avg_stf_v[0] /= stepsPerFrame\n # avg_stf_av[0] /= stepsPerFrame\n # rd_vec1[0] = avg_stf_av[0]; rd_vec1[0][0] = 0.; rd_vec1[0][2] = 0.\n # rd_vecori1[0]= controlModel.getJointPositionGlobal(stanceFoots[0])\n\n #=======================================================================\n # segment editing\n #=======================================================================\n lastFrame = False\n\n\n # print curState\n # print bodyIDs\n\n '''\n print skeleton.getJointIndex('LeftFoot') = 3\n print skeleton.getJointIndex('LeftFoot_foot_0_0') = 4\n print skeleton.getJointIndex('LeftFoot_foot_0_1') = 5\n print skeleton.getJointIndex('LeftFoot_foot_1_0')\n print skeleton.getJointIndex('LeftFoot_foot_1_1')\n print skeleton.getJointIndex('LeftFoot_foot_2_0')\n print skeleton.getJointIndex('LeftFoot_foot_2_1') = 9\n print skeleton.getJointIndex('RightFoot') = 18\n print skeleton.getJointIndex('RightFoot_foot_0_0') = 19\n print skeleton.getJointIndex('RightFoot_foot_0_1')\n print skeleton.getJointIndex('RightFoot_foot_1_0')\n print skeleton.getJointIndex('RightFoot_foot_1_1')\n print skeleton.getJointIndex('RightFoot_foot_2_0')\n print skeleton.getJointIndex('RightFoot_foot_2_1') = 24\n '''\n\n if SEGMENT_EDITING:\n if curState==yba.GaitState.STOP:\n if frame == len(motion_seg)-1:\n lastFrame = True\n\n elif (curState==yba.GaitState.LSWING or curState==yba.GaitState.RSWING) and t>c_min_contact_time:\n # original\n '''\n swingID = lID if curState==yba.GaitState.LSWING else rID\n contact = False\n\n if swingID in bodyIDs:\n minContactVel = 1000.\n for i in range(len(bodyIDs)):\n if bodyIDs[i]==swingID:\n vel = controlModel.getBodyVelocityGlobal(swingID, contactPositionLocals[i])\n vel[1] = 0\n contactVel = mm.length(vel)\n if contactVel < minContactVel: minContactVel = contactVel\n if minContactVel < c_min_contact_vel: contact = True\n\n extended[0] = False\n '''\n # segmented foot\n swingIDs = copy.deepcopy(lIDs) if curState==yba.GaitState.LSWING else copy.deepcopy(rIDs)\n\n contact = False\n\n for swingID in swingIDs:\n if swingID in bodyIDs:\n minContactVel = 1000.\n for i in range(len(bodyIDs)):\n if bodyIDs[i]==swingID:\n vel = controlModel.getBodyVelocityGlobal(swingID, contactPositionLocals[i])\n vel[1] = 0\n contactVel = mm.length(vel)\n if contactVel < minContactVel: minContactVel = contactVel\n if minContactVel < c_min_contact_vel: contact = True\n\n extended[0] = False\n\n if contact:\n # print frame, 'foot touch'\n lastFrame = True\n acc_offset[0] += frame - curInterval[1]\n\n elif frame == len(motion_seg)-1:\n print(frame, 'extend frame', frame+1)\n\n preserveJoints = []\n # preserveJoints = [lFoot, rFoot]\n # preserveJoints = [lFoot, rFoot, lKnee, rKnee]\n # preserveJoints = [lFoot, rFoot, lKnee, rKnee, lUpLeg, rUpLeg]\n stanceKnees = [rKnee] if curState==yba.GaitState.LSWING else [lKnee]\n preserveJoints = [stanceFoots[0], stanceKnees[0], stanceLegs[0]]\n\n diff = 3\n motion_seg_orig.extend([motion_seg_orig[-1]])\n motion_seg.extend(ymt.extendByIntegration_root(motion_seg, 1, diff))\n\n motion_stitch.extend(ymt.extendByIntegration_constant(motion_stitch, 1, preserveJoints, diff))\n\n # # extend for swing foot ground speed matching & swing foot height lower\n ## extendedPostures = ymt.extendByIntegration(motion_stitch, 1, preserveJoints, diff)\n ## extendedPostures = [motion_stitch[-1]]\n ##\n # extendFrameNum = frame - curInterval[1] + 1\n # k = 1.-extendFrameNum/5.\n # if k<0.: k=0.\n # extendedPostures = ymt.extendByIntegrationAttenuation(motion_stitch, 1, preserveJoints, diff, k)\n #\n ## if len(swingFoots)>0 and np.inner(dCM_tar, dCM)>0.:\n ## print frame, 'speed matching'\n ## R_swf = motion_stitch[-1].getJointOrientationGlobal(swingFoots[0])\n ##\n ## p_swf = motion_stitch[-1].getJointPositionGlobal(swingFoots[0])\n ## v_swf = motion_stitch.getJointVelocityGlobal(swingFoots[0], frame-diff, frame)\n ## a_swf = motion_stitch.getJointAccelerationGlobal(swingFoots[0], frame-diff, frame)\n ## p_swf += v_swf * (frameTime) + a_swf * (frameTime)*(frameTime)\n ## aik.ik_analytic(extendedPostures[0], swingFoots[0], p_swf)\n ##\n ## extendedPostures[0].setJointOrientationGlobal(swingFoots[0], R_swf)\n #\n # motion_stitch.extend(extendedPostures)\n\n extended[0] = True\n else:\n if frame == len(motion_seg)-1: lastFrame = True\n\n if lastFrame:\n if segIndex < len(segments)-1:\n print('%d (%d): end of %dth seg (%s, %s)'%(frame, frame-curInterval[1],segIndex, yba.GaitState.text[curState], curInterval))\n if plot!=None: plot.addDataPoint('diff', frame, (frame-curInterval[1])*.01)\n\n if len(stanceFoots)>0 and len(swingFoots)>0:\n # step_cur = controlModel.getJointPositionGlobal(swingFoots[0]) - controlModel.getJointPositionGlobal(stanceFoots[0])\n # step_tar = motion_seg[curInterval[1]].getJointPositionGlobal(swingFoots[0]) - motion_seg[curInterval[1]].getJointPositionGlobal(stanceFoots[0])\n step_cur = controlModel.getJointPositionGlobal(0) - controlModel.getJointPositionGlobal(stanceFoots[0])\n step_tar = motion_seg[curInterval[1]].getJointPositionGlobal(0) - motion_seg[curInterval[1]].getJointPositionGlobal(stanceFoots[0])\n\n step_cur = mm.projectionOnPlane(step_cur, (1,0,0), (0,0,1))\n step_tar = mm.projectionOnPlane(step_tar, (1,0,0), (0,0,1))\n\n step_cur_sag, step_cur_cor = mm.projectionOnVector2(step_cur, direction)\n step_tar_sag, step_tar_cor = mm.projectionOnVector2(step_tar, direction)\n\n step_length_tar[0] = mm.length(step_tar_sag)\n if np.inner(step_tar_sag, step_cur_sag) > 0:\n step_length_cur[0] = mm.length(step_cur_sag)\n else:\n step_length_cur[0] = -mm.length(step_cur_sag)\n\n step_axis[0] = directionAxis\n\n # rd_vec1[0] = step_tar_sag\n # rd_vecori1[0] = motion_seg[curInterval[1]].getJointPositionGlobal(stanceFoots[0])\n # rd_vec2[0] = step_cur_sag\n # rd_vecori2[0] = controlModel.getJointPositionGlobal(stanceFoots[0])\n\n seg_index[0] += 1\n curSeg = segments[seg_index[0]]\n stl_y_limit_num[0] = 0\n stl_xz_limit_num[0] = 0\n\n del motion_seg_orig[frame+1:]\n motion_seg_orig.extend(ymb.getAttachedNextMotion(curSeg, motion_seg_orig[-1], False, False))\n\n del motion_seg[frame+1:]\n del motion_stitch[frame+1:]\n transitionLength = len(curSeg)-1\n\n # motion_seg.extend(ymb.getAttachedNextMotion(curSeg, motion_seg[-1], False, False))\n # motion_stitch.extend(ymb.getStitchedNextMotion(curSeg, motion_control[-1], transitionLength, stitch_func, True, False))\n\n d = motion_seg[-1] - curSeg[0]\n d.rootPos[1] = 0.\n motion_seg.extend(ymb.getAttachedNextMotion(curSeg, d, True, False))\n\n if NO_FOOT_SLIDING:\n if segIndex == len(segments)-2:\n Rl = motion_control[-1].getJointOrientationLocal(lUpLeg)\n Rr = motion_control[-1].getJointOrientationLocal(rUpLeg)\n Rlk = motion_control[-1].getJointOrientationLocal(lKnee)\n Rrk = motion_control[-1].getJointOrientationLocal(rKnee)\n Rlf = motion_control[-1].getJointOrientationLocal(lFoot)\n Rrf = motion_control[-1].getJointOrientationLocal(rFoot)\n for p in curSeg:\n p.setJointOrientationLocal(lUpLeg, Rl, False)\n p.setJointOrientationLocal(rUpLeg, Rr, False)\n p.setJointOrientationLocal(lKnee, Rlk, False)\n p.setJointOrientationLocal(rKnee, Rrk, False)\n p.setJointOrientationLocal(lFoot, Rlf, False)\n p.setJointOrientationLocal(rFoot, Rrf, False)\n p.updateGlobalT()\n\n d = motion_control[-1] - curSeg[0]\n d.rootPos[1] = 0.\n motion_stitch.extend(ymb.getStitchedNextMotion(curSeg, d, transitionLength, stitch_func, True, False))\n\n # motion_seg.extend(ymb.getAttachedNextMotion(curSeg, motion_seg[-1], False, True))\n # motion_stitch.extend(ymb.getStitchedNextMotion(curSeg, motion_control[-1], transitionLength, stitch_func, True, True))\n else:\n motion_seg_orig.append(motion_seg_orig[-1])\n motion_seg.append(motion_seg[-1])\n motion_stitch.append(motion_control[-1])\n\n\n # rendering\n motionModel.update(motion_ori[frame])\n # motionModel.update(motion_seg[frame])\n\n rd_CP[0] = CP\n rd_CMP[0] = (CMreal[0] - (F[0]/F[1])*CMreal[1], 0, CMreal[2] - (F[2]/F[1])*CMreal[1])\n\n if plot!=None:\n plot.addDataPoint('zero', frame, 0)\n plot.updatePoints()\n\n\n viewer.setSimulateCallback(simulateCallback)\n\n if MULTI_VIEWER:\n viewer.startTimer(frameTime / 1.4)\n else:\n viewer.startTimer(frameTime * .1)\n viewer.show()\n\n Fl.run()\n\n\nwalkings()\n", "##\n##\tThis file is part of qpOASES.\n##\n##\tqpOASES -- An Implementation of the Online Active Set Strategy.\n##\tCopyright (C) 2007-2015 by Hans Joachim Ferreau, Andreas Potschka,\n##\tChristian Kirches et al. All rights reserved.\n##\n##\tqpOASES is free software; you can redistribute it and/or\n##\tmodify it under the terms of the GNU Lesser General Public\n##\tLicense as published by the Free Software Foundation; either\n##\tversion 2.1 of the License, or (at your option) any later version.\n##\n##\tqpOASES is distributed in the hope that it will be useful,\n##\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n##\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n##\tSee the GNU Lesser General Public License for more details.\n##\n##\tYou should have received a copy of the GNU Lesser General Public\n##\tLicense along with qpOASES; if not, write to the Free Software\n##\tFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n##\n\n## Example adapted from examples/example2.cpp.\n## author of this file: Sebastian F. Walter\n\nimport numpy as np\nfrom qpoases import PySQProblem as SQProblem\nfrom qpoases import PySolutionAnalysis as SolutionAnalysis\n\n\n# Setup data of first QP.\nH = np.array([ 1.0, 0.0, 0.0, 0.5 ]).reshape((2,2))\nA = np.array([ 1.0, 1.0 ]).reshape((2,1))\ng = np.array([ 1.5, 1.0 ])\nlb = np.array([ 0.5, -2.0 ])\nub = np.array([ 5.0, 2.0 ])\nlbA = np.array([ -1.0 ])\nubA = np.array([ 2.0 ])\n\n# Setup data of second QP.\nH_new = np.array([ 1.0, 0.5, 0.5, 0.5 ]).reshape((2,2))\nA_new = np.array([ 1.0, 5.0 ]).reshape((2,1))\ng_new = np.array([ 1.0, 1.5 ])\nlb_new = np.array([ 0.0, -1.0 ])\nub_new = np.array([ 5.0, -0.5 ])\nlbA_new = np.array([ -2.0 ])\nubA_new = np.array([ 1.0 ])\n\n# Setting up SQProblem object and solution analyser.\nexample = SQProblem(2, 1)\nanalyser = SolutionAnalysis()\n\n# Solve first QP ...\nnWSR = np.array([10])\nexample.init(H, g, A, lb, ub, lbA, ubA, nWSR)\n\n# ... and analyse it.\nmaxStat = np.zeros(1)\nmaxFeas = np.zeros(1)\nmaxCmpl = np.zeros(1)\n\nanalyser.getKktViolation(example, maxStat, maxFeas, maxCmpl)\nprint(\"maxStat: %e, maxFeas:%e, maxCmpl: %e\\n\"%(maxStat, maxFeas, maxCmpl))\n\n# Solve second QP ...\nnWSR = np.array([10])\nexample.hotstart(H_new, g_new, A_new, lb_new, ub_new,\n lbA_new, ubA_new, nWSR)\n\n# ... and analyse it.\nanalyser.getKktViolation(example, maxStat, maxFeas, maxCmpl)\nprint(\"maxStat: %e, maxFeas:%e, maxCmpl: %e\\n\"%(maxStat, maxFeas, maxCmpl))\n\n\n# ------------ VARIANCE-COVARIANCE EVALUATION --------------------\n\nVar = np.zeros(5*5)\nPrimal_Dual_Var = np.zeros(5*5)\n\nVar.reshape((5,5))[0,0] = 1.\nVar.reshape((5,5))[1,1] = 1.\n\n# ( 1 0 0 0 0 )\n# ( 0 1 0 0 0 )\n# Var = ( 0 0 0 0 0 )\n# ( 0 0 0 0 0 )\n# ( 0 0 0 0 0 )\n\n\nanalyser.getVarianceCovariance(example, Var, Primal_Dual_Var)\nprint('Primal_Dual_Var=\\n', Primal_Dual_Var.reshape((5,5)))\nprint(\"maxStat: %e, maxFeas:%e, maxCmpl: %e\\n\"%(maxStat, maxFeas, maxCmpl))\n" ]
[ [ "numpy.dot", "numpy.inner", "numpy.isnan", "numpy.around", "numpy.eye", "numpy.linalg.norm", "numpy.argmin", "numpy.cross", "numpy.array", "numpy.zeros" ], [ "numpy.dot", "numpy.hstack", "numpy.eye", "numpy.ones", "numpy.append", "numpy.zeros", "numpy.vstack" ], [ "scipy.optimize.minimize_scalar" ], [ "numpy.dot", "numpy.hstack", "numpy.eye", "numpy.ones", "numpy.append", "numpy.cross", "numpy.array", "numpy.zeros", "numpy.vstack" ], [ "numpy.dot", "numpy.array", "numpy.inner", "numpy.cross" ], [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bradduy/computer_vision
[ "cfe97e84a5e216819497405a79ef2ef0ca2b95fe" ]
[ "Data Science/MachineLearningFromPytorch.py" ]
[ "import torch\n\n# f = w * x\n\n# f = 2 * x\nX = torch.tensor([1,2,3,4], dtype=torch.float32) # training sample\nY = torch.tensor([2,4,6,8], dtype=torch.float32) # testing sample\n\nw = torch.tensor(0.0, dtype=torch.float32, requires_grad=True)\n\n#model prediction\ndef forward(x):\n return w * x\n\n# loss = MSE\ndef loss(y, y_predicted):\n return ((y_predicted - y)**2).mean()\n\n#gradient\n#MSE = 1/N * (w*x -y)**2\n#dJ/dw = 1/N 2x (w*x -y)\ndef gradient(x, y, y_predicted):\n return np.dot(2*x, y_predicted-y).mean()\n\nprint(f'Prediction before training: f(5) = {forward(5):.3f}')\n\n#training\nlearning_rate = 0.01\nn_inters = 20\nfor epoch in range(n_inters):\n #prediction = forward pass\n y_pred = forward(X)\n\n #loss\n l = loss(Y, y_pred)\n\n # gradients = backward pass\n l.backward() # dl/dw\n\n #update weights\n with torch.no_grad():\n w -= learning_rate * w.grad\n\n # zero gradients:\n w.grad.zero_()\n\n if epoch % 2 == 0:\n print(f'epoch {epoch+1}: w = {w:.3f}, loss = {l:.8f}')\n\nprint(f'Prediction after training: f(5) = {forward(5):.3f}')\n" ]
[ [ "torch.no_grad", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nirbhayjm/rlpyt
[ "a2741201bbf33f5408306198d220d90f8f7b7250", "a2741201bbf33f5408306198d220d90f8f7b7250", "a2741201bbf33f5408306198d220d90f8f7b7250", "a2741201bbf33f5408306198d220d90f8f7b7250", "a2741201bbf33f5408306198d220d90f8f7b7250" ]
[ "rlpyt/models/pg/atari_lstm_model.py", "rlpyt/utils/launching/affinity.py", "rlpyt/agents/dqn/dqn_agent.py", "rlpyt/utils/array.py", "rlpyt/agents/qpg/sac_v_agent.py" ]
[ "import torch\nimport torch.nn.functional as F\n\nfrom rlpyt.models.conv2d import Conv2dHeadModel\nfrom rlpyt.utils.collections import namedarraytuple\nfrom rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims\n\nRnnState = namedarraytuple(\n \"RnnState\", [\"h\", \"c\"]\n) # For downstream namedarraytuples to work\n\n\nclass AtariLstmModel(torch.nn.Module):\n \"\"\"Recurrent model for Atari agents: a convolutional network into an FC layer\n into an LSTM which outputs action probabilities and state-value estimate.\n \"\"\"\n\n def __init__(\n self,\n image_shape,\n output_size,\n fc_sizes=512, # Between conv and lstm.\n lstm_size=512,\n use_maxpool=False,\n channels=None, # None uses default.\n kernel_sizes=None,\n strides=None,\n paddings=None,\n ):\n \"\"\"Instantiate neural net module according to inputs.\"\"\"\n super().__init__()\n self.conv = Conv2dHeadModel(\n image_shape=image_shape,\n channels=channels or [16, 32],\n kernel_sizes=kernel_sizes or [8, 4],\n strides=strides or [4, 2],\n paddings=paddings or [0, 1],\n use_maxpool=use_maxpool,\n hidden_sizes=fc_sizes, # Applies nonlinearity at end.\n )\n self.lstm = torch.nn.LSTM(self.conv.output_size + output_size + 1, lstm_size)\n self.pi = torch.nn.Linear(lstm_size, output_size)\n self.value = torch.nn.Linear(lstm_size, 1)\n\n def forward(self, image, prev_action, prev_reward, init_rnn_state):\n \"\"\"\n Compute action probabilities and value estimate from input state.\n Infers leading dimensions of input: can be [T,B], [B], or []; provides\n returns with same leading dims. Convolution layers process as [T*B,\n *image_shape], with T=1,B=1 when not given. Expects uint8 images in\n [0,255] and converts them to float32 in [0,1] (to minimize image data\n storage and transfer). Recurrent layers processed as [T,B,H]. Used in\n both sampler and in algorithm (both via the agent). Also returns the\n next RNN state.\n \"\"\"\n img = image.type(torch.float) # Expect torch.uint8 inputs\n img = img.mul_(1.0 / 255) # From [0-255] to [0-1], in place.\n\n # Infer (presence of) leading dimensions: [T,B], [B], or [].\n lead_dim, T, B, img_shape = infer_leading_dims(img, 3)\n\n fc_out = self.conv(img.view(T * B, *img_shape))\n lstm_input = torch.cat(\n [\n fc_out.view(T, B, -1),\n prev_action.view(T, B, -1), # Assumed onehot.\n prev_reward.view(T, B, 1),\n ],\n dim=2,\n )\n init_rnn_state = None if init_rnn_state is None else tuple(init_rnn_state)\n lstm_out, (hn, cn) = self.lstm(lstm_input, init_rnn_state)\n pi = F.softmax(self.pi(lstm_out.view(T * B, -1)), dim=-1)\n v = self.value(lstm_out.view(T * B, -1)).squeeze(-1)\n\n # Restore leading dimensions: [T,B], [B], or [], as input.\n pi, v = restore_leading_dims((pi, v), lead_dim, T, B)\n # Model should always leave B-dimension in rnn state: [N,B,H].\n next_rnn_state = RnnState(h=hn, c=cn)\n\n return pi, v, next_rnn_state\n", "from rlpyt.utils.collections import AttrDict\n\n# Readable-to-less-readable abbreviations.\nN_GPU = \"gpu\"\nCONTEXTS_PER_GPU = \"cxg\" # CUDA contexts.\nGPU_PER_RUN = \"gpr\"\nN_CPU_CORE = \"cpu\"\nHYPERTHREAD_OFFSET = \"hto\" # Can specify if different from n_cpu_core.\nN_SOCKET = \"skt\"\nRUN_SLOT = \"slt\"\nCPU_PER_WORKER = \"cpw\"\nCPU_PER_RUN = \"cpr\" # For cpu-only.\nCPU_RESERVED = \"res\" # Reserve CPU cores per master, not allowed by workers.\n# For async sampling / optimizing.\nASYNC_SAMPLE = \"ass\"\nSAMPLE_GPU_PER_RUN = \"sgr\"\nOPTIM_SAMPLE_SHARE_GPU = \"oss\"\n# For alternating sampler.\nALTERNATING = \"alt\"\nSET_AFFINITY = \"saf\"\n\nABBREVS = [\n N_GPU,\n CONTEXTS_PER_GPU,\n GPU_PER_RUN,\n N_CPU_CORE,\n HYPERTHREAD_OFFSET,\n N_SOCKET,\n CPU_PER_RUN,\n CPU_PER_WORKER,\n CPU_RESERVED,\n ASYNC_SAMPLE,\n SAMPLE_GPU_PER_RUN,\n OPTIM_SAMPLE_SHARE_GPU,\n ALTERNATING,\n SET_AFFINITY,\n]\n\n\n# API\n\n\ndef quick_affinity_code(n_parallel=None, use_gpu=True, contexts_per_gpu=1):\n \"\"\"Tried to autodetect hardware resources and divide them evenly among learning runs.\n\n Args:\n n_parallel (int or None): Can specify the number of concurrent learning runs; if using GPU, leave as ``None`` to use all GPUs, 1 per run\n use_gpu (bool): self-explanatory\n \"\"\"\n if not (use_gpu or n_parallel):\n raise ValueError(\"Either use_gpu must be True or n_parallel > 0 must be given.\")\n import psutil\n\n # n_cpu_core = psutil.cpu_count(logical=False) # sometimes gives bad results\n n_cpu_core = psutil.cpu_count() // 2 # assume hyperthreads will be counted\n if use_gpu:\n import torch\n\n n_gpu = torch.cuda.device_count()\n else:\n n_gpu = 0\n if n_gpu > 0:\n if n_parallel is not None:\n n_gpu = min(n_parallel, n_gpu)\n n_runs = n_gpu * contexts_per_gpu\n n_cpu_core = (n_cpu_core // n_runs) * n_runs # Same for all.\n return encode_affinity(\n n_cpu_core=n_cpu_core, n_gpu=n_gpu, contexts_per_gpu=contexts_per_gpu\n )\n else:\n if not n_parallel:\n raise ValueError(\n \"n_parallel > 0 must be given if use_gpu=False or no GPUs are present.\"\n )\n n_parallel = min(n_parallel, n_cpu_core)\n n_cpu_core = (n_cpu_core // n_parallel) * n_parallel # Same for all.\n cpu_per_run = n_cpu_core // n_parallel\n return encode_affinity(n_cpu_core=n_cpu_core, n_gpu=0, cpu_per_run=cpu_per_run)\n\n\ndef encode_affinity(\n n_cpu_core=1, # Total number to use on machine (not virtual).\n n_gpu=0, # Total number to use on machine.\n contexts_per_gpu=1, # e.g. 2 will put two experiments per GPU.\n gpu_per_run=1, # For multi-GPU optimizaion.\n cpu_per_run=1, # Specify if not using GPU.\n cpu_per_worker=1, # Use 1 unless environment is multi-threaded.\n cpu_reserved=0, # Number CPU to reserve per GPU.\n hyperthread_offset=None, # Leave None for auto-detect.\n n_socket=None, # Leave None for auto-detect.\n run_slot=None, # Leave None in `run` script, but specified in `train` script.\n async_sample=False, # True if asynchronous sampling / optimization.\n sample_gpu_per_run=0, # For asynchronous sampling.\n optim_sample_share_gpu=False, # Async sampling, overrides sample_gpu.\n alternating=False, # True for altenating sampler.\n set_affinity=True, # Everything same except psutil.Process().cpu_affinity(cpus)\n):\n \"\"\"Encodes the hardware configuration into a string (with meanings defined\n in this file) which can be passed as a command line argument to call the\n training script. Use in overall experiments setup script to specify\n computer and experiment resources into ``run_experiments()``.\n\n We refer to an \"experiment\" as an individual learning run, i.e. one set of\n hyperparameters and which does not interact with other runs.\n\n Args:\n n_cpu_core (int): Total number of phyical cores to use on machine (not virtual)\n n_gpu (int): Total number of GPUs to use on machine\n contexts_per_gpu (int): How many experiment to share each GPU\n gpu_per_run (int): How many GPUs to use per experiment (for multi-GPU optimization)\n cpu_per_run (int): If not using GPU, specify how macores per experiment\n cpu_per_worker (int): CPU cores per sampler worker; 1 unless environment is multi-threaded\n cpu_reserved (int): Number of CPUs to reserve per GPU, and not allow sampler to use them\n hyperthread_offset (int): Typically the number of physical cores, since they are labeled 0-x, and hyperthreads as (x+1)-2x; use 0 to disable hyperthreads, None to auto-detect\n n_socket (int): Number of CPU sockets in machine; tries to keep CPUs grouped on same socket, and match socket-to-GPU affinity\n run_slot (int): Which hardware slot to use; leave ``None`` into ``run_experiments()``, but specified for inidividual train script\n async_sample (bool): True if asynchronous sampling/optimization mode; different affinity structure needed\n sample_gpu_per_run (int): In asynchronous mode only, number of action-server GPUs per experiment\n optim_sample_share_gpu (bool): In asynchronous mode only, whether to use same GPU(s) for both training and sampling\n alternating (bool): True if using alternating sampler (will make more worker assignments)\n set_affinity (bool): False to disable runner and sampler from setting cpu affinity via `psutil`, maybe inappropriate in cloud machines.\n\n \"\"\"\n affinity_code = f\"{n_cpu_core}{N_CPU_CORE}_{n_gpu}{N_GPU}\"\n if hyperthread_offset is None:\n hyperthread_offset = get_hyperthread_offset()\n if n_socket is None:\n n_socket = get_n_socket()\n if contexts_per_gpu > 1:\n affinity_code += f\"_{contexts_per_gpu}{CONTEXTS_PER_GPU}\"\n if gpu_per_run > 1:\n affinity_code += f\"_{gpu_per_run}{GPU_PER_RUN}\"\n if n_gpu == 0:\n affinity_code += f\"_{cpu_per_run}{CPU_PER_RUN}\"\n if cpu_per_worker > 1:\n affinity_code += f\"_{cpu_per_worker}{CPU_PER_WORKER}\"\n if hyperthread_offset != n_cpu_core:\n affinity_code += f\"_{hyperthread_offset}{HYPERTHREAD_OFFSET}\"\n if n_socket > 1:\n affinity_code += f\"_{n_socket}{N_SOCKET}\"\n if cpu_reserved > 0:\n affinity_code += f\"_{cpu_reserved}{CPU_RESERVED}\"\n if async_sample:\n affinity_code += f\"_1{ASYNC_SAMPLE}\"\n if sample_gpu_per_run > 0:\n affinity_code += f\"_{sample_gpu_per_run}{SAMPLE_GPU_PER_RUN}\"\n if optim_sample_share_gpu:\n affinity_code += f\"_1{OPTIM_SAMPLE_SHARE_GPU}\"\n if alternating:\n affinity_code += f\"_1{ALTERNATING}\"\n if not set_affinity:\n affinity_code += f\"_0{SET_AFFINITY}\"\n if run_slot is not None:\n assert run_slot <= (n_gpu * contexts_per_gpu) // gpu_per_run\n affinity_code = f\"{run_slot}{RUN_SLOT}_\" + affinity_code\n return affinity_code\n\n\ndef prepend_run_slot(run_slot, affinity_code):\n \"\"\"Use in launch manager when assigning run slot.\"\"\"\n return f\"{run_slot}{RUN_SLOT}_\" + affinity_code\n\n\ndef affinity_from_code(run_slot_affinity_code):\n \"\"\"Use in individual experiment script; pass output to Runner.\"\"\"\n run_slot, aff_code = remove_run_slot(run_slot_affinity_code)\n aff_params = decode_affinity(aff_code)\n if aff_params.get(N_GPU, 0) > 0:\n if aff_params.pop(ASYNC_SAMPLE, 0) > 0:\n return build_async_affinity(run_slot, **aff_params)\n elif aff_params.get(GPU_PER_RUN, 1) > 1:\n return build_multigpu_affinity(run_slot, **aff_params)\n return build_gpu_affinity(run_slot, **aff_params)\n return build_cpu_affinity(run_slot, **aff_params)\n\n\ndef make_affinity(run_slot=0, **kwargs):\n \"\"\"Input same kwargs as ``encode_affinity()``, returns the AttrDict form.\"\"\"\n return affinity_from_code(encode_affinity(run_slot=run_slot, **kwargs))\n\n\n# Helpers\n\n\ndef get_n_socket():\n import subprocess\n\n return max(\n 1,\n int(\n subprocess.check_output(\n 'cat /proc/cpuinfo | grep \"physical id\" | sort -u | wc -l', shell=True\n )\n ),\n )\n\n\ndef get_hyperthread_offset():\n import psutil # (If returns 0, will not try to use hyperthreads.)\n\n # UNRELIABLE:\n # hto = psutil.cpu_count() - psutil.cpu_count(logical=False)\n vcpu = psutil.cpu_count()\n if vcpu != psutil.cpu_count(logical=False) and vcpu % 2 == 0:\n # Best guess?\n return vcpu // 2\n return 0\n\n\ndef get_n_run_slots(affinity_code):\n aff = decode_affinity(affinity_code)\n if aff.get(\"ass\", 0) > 0: # Asynchronous sample mode.\n total_gpu = aff.get(\"gpr\", 1) + aff.get(\"sgr\", 0) * (1 - aff.get(\"oss\", 0))\n n_run_slots = aff[\"gpu\"] // total_gpu # NOTE: no cxg yet.\n elif aff.get(\"gpu\", 0) > 0:\n n_run_slots = (aff[\"gpu\"] * aff.get(\"cxg\", 1)) // aff.get(\"gpr\", 1)\n else:\n n_run_slots = aff[\"cpu\"] // aff[\"cpr\"]\n return n_run_slots\n\n\ndef remove_run_slot(run_slot_affinity_code):\n run_slot_str, aff_code = run_slot_affinity_code.split(\"_\", 1)\n assert run_slot_str[-3:] == RUN_SLOT\n run_slot = int(run_slot_str[:-3])\n return run_slot, aff_code\n\n\ndef decode_affinity(affinity_code):\n codes = affinity_code.split(\"_\")\n aff_kwargs = dict()\n for code in codes:\n abrv = code[-3:]\n if abrv not in ABBREVS:\n raise ValueError(f\"Unrecognized affinity code abbreviation: {abrv}\")\n value = int(code[:-3])\n aff_kwargs[abrv] = value\n return aff_kwargs\n\n\ndef build_cpu_affinity(\n slt, cpu, cpr, cpw=1, hto=None, res=0, skt=1, gpu=0, alt=0, saf=1\n):\n assert gpu == 0\n assert cpu % cpr == 0\n hto = cpu if hto is None else hto # Default is None, 0 is OFF.\n assert (hto - cpu) % skt == 0\n n_run_slots = cpu // cpr\n assert slt <= n_run_slots\n cpu_per_skt = max(cpu, hto) // skt\n if n_run_slots >= skt:\n slt_per_skt = n_run_slots // skt\n my_skt = slt // slt_per_skt\n slt_in_skt = slt % slt_per_skt\n min_core = my_skt * cpu_per_skt + slt_in_skt * cpr\n cores = tuple(range(min_core, min_core + cpr))\n else: # One run multiple sockets.\n skt_per_slt = skt // n_run_slots\n cores = list()\n low_skt = slt * skt_per_slt\n for s in range(skt_per_slt):\n min_core = (low_skt + s) * cpu_per_skt\n high_core = min_core + cpr // skt_per_slt\n cores.extend(list(range(min_core, high_core)))\n cores = tuple(cores)\n worker_cores = cores[res:]\n assert len(worker_cores) % cpw == 0\n master_cpus = get_master_cpus(cores, hto)\n workers_cpus = get_workers_cpus(worker_cores, cpw, hto, alt)\n affinity = AttrDict(\n all_cpus=master_cpus,\n master_cpus=master_cpus,\n workers_cpus=workers_cpus,\n master_torch_threads=len(cores),\n worker_torch_threads=cpw,\n alternating=bool(alt), # Just to pass through a check.\n set_affinity=bool(saf),\n )\n return affinity\n\n\ndef build_gpu_affinity(\n slt, gpu, cpu, cxg=1, cpw=1, hto=None, res=0, skt=1, alt=0, saf=1\n):\n \"\"\"Divides CPUs evenly among GPUs.\"\"\"\n n_ctx = gpu * cxg\n assert slt < n_ctx\n assert cpu % n_ctx == 0\n cpr = cpu // n_ctx\n if cxg > 1:\n slt = (slt % gpu) * cxg + slt // gpu # Spread over GPUs first.\n affinity = build_cpu_affinity(\n slt, cpu, cpr, cpw=cpw, hto=hto, res=res, skt=skt, gpu=0, alt=alt, saf=saf\n )\n affinity[\"cuda_idx\"] = slt // cxg\n return affinity\n\n\ndef build_multigpu_affinity(\n run_slot, gpu, cpu, gpr=1, cpw=1, hto=None, res=0, skt=1, alt=0, saf=1\n):\n return [\n build_gpu_affinity(\n slt, gpu, cpu, cxg=1, cpw=cpw, hto=hto, res=res, skt=skt, alt=alt, saf=saf\n )\n for slt in range(run_slot * gpr, (run_slot + 1) * gpr)\n ]\n\n\ndef build_async_affinity(\n run_slot, gpu, cpu, gpr=1, sgr=0, oss=0, cpw=1, hto=None, res=1, skt=1, alt=0, saf=1\n):\n oss = bool(oss)\n sgr = gpr if oss else sgr\n total_gpr = gpr + sgr * (not oss)\n n_run_slots = gpu // total_gpr\n assert run_slot < n_run_slots\n cpr = cpu // n_run_slots\n smp_cpr = cpr - res * gpr\n gpu_per_skt = gpu // skt\n hto = cpu if hto is None else hto # Default is None, 0 is OFF.\n cpu_per_skt = max(cpu, hto) // skt\n opt_affinities = list()\n smp_affinities = list()\n all_cpus = tuple()\n if total_gpr <= gpu_per_skt:\n run_per_skt = n_run_slots // skt\n assert n_run_slots % skt == 0 # Relax later?\n skt_per_run = 1\n run_in_skt = run_slot % run_per_skt\n my_skt = run_slot // run_per_skt\n low_opt_gpu = my_skt * gpu_per_skt + run_in_skt * total_gpr\n high_opt_gpu = low_opt_gpu + gpr\n my_opt_gpus = list(range(low_opt_gpu, high_opt_gpu))\n my_smp_gpus = (\n my_opt_gpus if oss else list(range(high_opt_gpu, high_opt_gpu + sgr))\n )\n else: # One run takes more than one socket: spread opt gpus across sockets.\n skt_per_run = skt // n_run_slots\n low_skt = run_slot * skt_per_run\n assert gpr % skt_per_run == 0, \"Maybe try n_socket=1.\"\n assert sgr % skt_per_run == 0, \"Maybe try n_socket=1.\"\n my_opt_gpus = list()\n my_smp_gpus = list()\n run_in_skt = run_per_skt = 0\n for s in range(skt_per_run):\n low_opt_gpu = (low_skt + s) * gpu_per_skt\n high_opt_gpu = low_opt_gpu + gpr // skt_per_run\n my_opt_gpus.extend(list(range(low_opt_gpu, high_opt_gpu)))\n if oss:\n my_smp_gpus = my_opt_gpus\n else:\n high_smp_gpu = high_opt_gpu + sgr // skt_per_run\n my_smp_gpus.extend(list(range(high_opt_gpu, high_smp_gpu)))\n for i, opt_gpu in enumerate(my_opt_gpus):\n gpu_in_skt = opt_gpu % gpu_per_skt\n gpu_skt = opt_gpu // gpu_per_skt\n gpu_res = i if run_per_skt >= 1 else gpu_in_skt\n low_opt_core = gpu_skt * cpu_per_skt + run_in_skt * cpr + gpu_res * res\n high_opt_core = low_opt_core + res\n opt_cores = tuple(range(low_opt_core, high_opt_core))\n opt_cpus = get_master_cpus(opt_cores, hto)\n opt_affinity = dict(\n cpus=opt_cpus,\n cuda_idx=opt_gpu,\n torch_threads=len(opt_cores),\n set_affinity=bool(saf),\n )\n opt_affinities.append(opt_affinity)\n all_cpus += opt_cpus\n wrkr_per_smp = smp_cpr // cpw\n smp_cpr = wrkr_per_smp * cpw\n smp_cpg = smp_cpr // max(1, sgr)\n for i, smp_gpu in enumerate(my_smp_gpus):\n gpu_skt = smp_gpu // gpu_per_skt\n gpu_in_skt = smp_gpu % gpu_per_skt\n smp_cpu_off = i if run_per_skt >= 1 else gpu_in_skt - (gpr // skt_per_run)\n low_smp_core = (\n gpu_skt * cpu_per_skt\n + run_in_skt * cpr\n + (gpr // skt_per_run) * res\n + smp_cpu_off * smp_cpg\n )\n high_smp_core = low_smp_core + smp_cpg\n master_cores = tuple(range(low_smp_core, high_smp_core))\n master_cpus = get_master_cpus(master_cores, hto)\n workers_cpus = get_workers_cpus(master_cores, cpw, hto, alt)\n smp_affinity = AttrDict(\n all_cpus=master_cpus,\n master_cpus=master_cpus,\n workers_cpus=workers_cpus,\n master_torch_threads=len(master_cores),\n worker_torch_threads=cpw,\n cuda_idx=smp_gpu,\n alternating=bool(alt), # Just to pass through a check.\n set_affinity=bool(saf),\n )\n smp_affinities.append(smp_affinity)\n all_cpus += master_cpus\n if not smp_affinities: # sgr==0; CPU sampler.\n if total_gpr <= gpu_per_skt:\n low_smp_core = my_skt * cpu_per_skt + run_in_skt * cpr + gpr * res\n master_cores = tuple(range(low_smp_core, low_smp_core + smp_cpr))\n else:\n master_cores = tuple()\n for s in range(skt_per_run):\n low_smp_core = (low_skt + s) * cpu_per_skt + (gpr // gpu_per_skt) * res\n master_cores += tuple(\n range(low_smp_core, low_smp_core + smp_cpr // skt_per_run)\n )\n master_cpus = get_master_cpus(master_cores, hto)\n workers_cpus = get_workers_cpus(master_cores, cpw, hto, alt)\n smp_affinities = AttrDict(\n all_cpus=master_cpus,\n master_cpus=master_cpus,\n workers_cpus=workers_cpus,\n master_torch_threads=len(master_cores),\n worker_torch_threads=cpw,\n cuda_idx=None,\n alternating=bool(alt), # Just to pass through a check.\n set_affinity=bool(saf),\n )\n all_cpus += master_cpus\n affinity = AttrDict(\n all_cpus=all_cpus, # For exp launcher to use taskset.\n optimizer=opt_affinities,\n sampler=smp_affinities,\n set_affinity=bool(saf),\n )\n\n return affinity\n\n\n# def offset_for_socket(hto, cpu, skt, slt, n_run_slots):\n# \"\"\"If hto==cpu or skt==1, returns 0.\"\"\"\n# assert (hto - cpu) % skt == 0\n# rem_cpu_per_skt = (hto - cpu) // skt\n# slt_per_skt = n_run_slots // skt\n# my_skt = slt // slt_per_skt\n# return my_skt * rem_cpu_per_skt\n\n\ndef get_master_cpus(cores, hto):\n hyperthreads = tuple(c + hto for c in cores) if hto > 0 else ()\n return tuple(cores) + hyperthreads\n\n\ndef get_workers_cpus(cores, cpw, hto, alt):\n cores = cores[: (len(cores) // cpw) * cpw] # No worker less than cpw.\n cpus = tuple(cores[i : i + cpw] for i in range(0, len(cores), cpw))\n if hto > 0:\n hyperthreads = tuple(c + hto for c in cores)\n hyperthreads = tuple(\n hyperthreads[i : i + cpw] for i in range(0, len(cores), cpw)\n )\n if alt:\n cpus += hyperthreads\n else:\n cpus = tuple(c + h for c, h in zip(cpus, hyperthreads))\n elif alt:\n cpus += cpus\n return cpus\n\n\ndef build_affinities_gpu_1cpu_drive(\n slt, gpu, cpu, cxg=1, gpr=1, cpw=1, hto=None, skt=1\n):\n \"\"\"OLD.\n Divides CPUs evenly among GPUs, with one CPU held open for each GPU, to\n drive it. Workers assigned on the remaining CPUs. Master permitted to use\n driver core + worker cores (good in case of multi-context per GPU and old\n alternating action server sampler, from accel_rl). GPU-driving CPUs grouped\n at the lowest numbered cores of each CPU socket.\n \"\"\"\n if gpr > 1:\n raise NotImplementedError # (parallel training)\n n_ctx = gpu * cxg\n n_run_slots = n_ctx // gpr\n assert slt < n_run_slots\n cpu_per_gpu = cpu // gpu\n sim_cpu_per_gpu = cpu_per_gpu - 1\n n_sim_cpu = cpu - gpu\n sim_cpu_per_ctx = n_sim_cpu // n_ctx\n\n assert gpu >= skt\n assert gpu % skt == 0\n gpu_per_skt = gpu // skt\n assert cpu % skt == 0\n cpu_per_skt = cpu // skt\n\n my_ctx = slt # Different for multi-context run, not implemented.\n my_gpu = my_ctx // cxg\n my_skt = my_gpu // gpu_per_skt\n gpu_in_skt = my_gpu % gpu_per_skt\n gpu_core = gpu_in_skt + my_skt * cpu_per_skt\n ctx_in_gpu = my_ctx % cxg\n\n min_sim_core = (\n my_skt * cpu_per_skt\n + gpu_per_skt\n + gpu_in_skt * sim_cpu_per_gpu\n + ctx_in_gpu * sim_cpu_per_ctx\n )\n sim_cores = tuple(range(min_sim_core, min_sim_core + sim_cpu_per_ctx))\n\n assert len(sim_cores) % cpw == 0\n if hto is None:\n hto = cpu\n if hto > 0:\n hyperthreads = tuple(c + hto for c in sim_cores)\n workers_cpus = tuple(\n sim_cores[i : i + cpw] + hyperthreads[i : i + cpw]\n for i in range(0, len(sim_cores), cpw)\n )\n master_cpus = (gpu_core,) + sim_cores + (gpu_core + hto,) + hyperthreads\n else:\n workers_cpus = tuple(\n sim_cores[i : i + cpw] for i in range(0, len(sim_cores), cpw)\n )\n master_cpus = (gpu_core,) + sim_cores\n\n affinity = AttrDict(\n all_cpus=master_cpus,\n master_cpus=master_cpus,\n workers_cpus=workers_cpus,\n master_torch_threads=1,\n worker_torch_threads=cpw,\n cuda_idx=my_gpu,\n )\n return affinity\n", "import torch\n\nfrom rlpyt.agents.base import AgentStep, BaseAgent\nfrom rlpyt.agents.dqn.epsilon_greedy import EpsilonGreedyAgentMixin\nfrom rlpyt.distributions.epsilon_greedy import EpsilonGreedy\nfrom rlpyt.models.utils import update_state_dict\nfrom rlpyt.utils.buffer import buffer_to\nfrom rlpyt.utils.collections import namedarraytuple\nfrom rlpyt.utils.logging import logger\n\n# from torch.nn.parallel import DistributedDataParallel as DDP\n# from torch.nn.parallel import DistributedDataParallelCPU as DDPC # Deprecated\n\n\nAgentInfo = namedarraytuple(\"AgentInfo\", \"q\")\n\n\nclass DqnAgent(EpsilonGreedyAgentMixin, BaseAgent):\n \"\"\"\n Standard agent for DQN algorithms with epsilon-greedy exploration.\n \"\"\"\n\n def __call__(self, observation, prev_action, prev_reward):\n \"\"\"Returns Q-values for states/observations (with grad).\"\"\"\n prev_action = self.distribution.to_onehot(prev_action)\n model_inputs = buffer_to(\n (observation, prev_action, prev_reward), device=self.device\n )\n q = self.model(*model_inputs)\n return q.cpu()\n\n def initialize(self, env_spaces, share_memory=False, global_B=1, env_ranks=None):\n \"\"\"Along with standard initialization, creates vector-valued epsilon\n for exploration, if applicable, with a different epsilon for each\n environment instance.\"\"\"\n _initial_model_state_dict = self.initial_model_state_dict\n self.initial_model_state_dict = (\n None # don't let base agent try to initialize model\n )\n super().initialize(\n env_spaces, share_memory, global_B=global_B, env_ranks=env_ranks\n )\n self.target_model = self.ModelCls(**self.env_model_kwargs, **self.model_kwargs)\n if _initial_model_state_dict is not None:\n self.model.load_state_dict(_initial_model_state_dict[\"model\"])\n self.target_model.load_state_dict(_initial_model_state_dict[\"model\"])\n self.distribution = EpsilonGreedy(dim=env_spaces.action.n)\n if env_ranks is not None:\n self.make_vec_eps(global_B, env_ranks)\n\n def to_device(self, cuda_idx=None):\n super().to_device(cuda_idx)\n self.target_model.to(self.device)\n\n def state_dict(self):\n return dict(\n model=self.model.state_dict(), target=self.target_model.state_dict()\n )\n\n @torch.no_grad()\n def step(self, observation, prev_action, prev_reward):\n \"\"\"Computes Q-values for states/observations and selects actions by\n epsilon-greedy. (no grad)\"\"\"\n prev_action = self.distribution.to_onehot(prev_action)\n model_inputs = buffer_to(\n (observation, prev_action, prev_reward), device=self.device\n )\n q = self.model(*model_inputs)\n q = q.cpu()\n action = self.distribution.sample(q)\n agent_info = AgentInfo(q=q)\n # action, agent_info = buffer_to((action, agent_info), device=\"cpu\")\n return AgentStep(action=action, agent_info=agent_info)\n\n def target(self, observation, prev_action, prev_reward):\n \"\"\"Returns the target Q-values for states/observations.\"\"\"\n prev_action = self.distribution.to_onehot(prev_action)\n model_inputs = buffer_to(\n (observation, prev_action, prev_reward), device=self.device\n )\n target_q = self.target_model(*model_inputs)\n return target_q.cpu()\n\n def update_target(self, tau=1):\n \"\"\"Copies the model parameters into the target model.\"\"\"\n update_state_dict(self.target_model, self.model.state_dict(), tau)\n", "import numpy as np\n\n\ndef select_at_indexes(indexes, array):\n \"\"\"Returns the contents of ``array`` at the multi-dimensional integer\n array ``indexes``. Leading dimensions of ``array`` must match the\n dimensions of ``indexes``.\n \"\"\"\n dim = len(indexes.shape)\n assert indexes.shape == array.shape[:dim]\n num = int(np.prod(indexes.shape))\n a_flat = array.reshape((num,) + array.shape[dim:])\n s_flat = a_flat[np.arange(num), indexes.reshape(-1)]\n selected = s_flat.reshape(array.shape[:dim] + array.shape[dim + 1 :])\n return selected\n\n\ndef to_onehot(indexes, dim, dtype=None):\n \"\"\"Converts integer values in multi-dimensional array ``indexes``\n to one-hot values of size ``dim``; expanded in an additional\n trailing dimension.\"\"\"\n dtype = indexes.dtype if dtype is None else dtype\n onehot = np.zeros((indexes.size, dim), dtype=dtype)\n onehot[np.arange(indexes.size), indexes.reshape(-1)] = 1\n return onehot.reshape(indexes.shape + (dim,))\n\n\ndef from_onehot(onehot, dtype=None):\n \"\"\"Argmax over trailing dimension of array ``onehot``. Optional return\n dtype specification.\"\"\"\n return np.asarray(np.argmax(onehot, axis=-1), dtype=dtype)\n\n\ndef valid_mean(array, valid=None, axis=None):\n \"\"\"Mean of ``array``, accounting for optional mask ``valid``,\n optionally along an axis.\"\"\"\n if valid is None:\n return array.mean(axis=axis)\n return (array * valid).sum(axis=axis) / valid.sum(axis=axis)\n\n\ndef infer_leading_dims(array, dim):\n \"\"\"Determine any leading dimensions of ``array``, which can have up to two\n leading dimensions more than the number of data dimensions, ``dim``. Used\n to check for [B] or [T,B] leading. Returns size of leading dimensions (or\n 1 if they don't exist), the data shape, and whether the leading dimensions\n where found.\n \"\"\"\n assert array.ndim in (dim, dim + 1, dim + 2)\n shape = array.shape[len(array.shape) - dim :]\n T = B = 1\n has_T = has_B = False\n if array.ndim == dim + 2:\n T, B = array.shape[:2]\n has_T = has_B = True # Might have T=1 or B=1.\n elif array.ndim == dim + 1:\n B = array.shape[0]\n has_B = True\n return T, B, shape, has_T, has_B\n", "from collections import namedtuple\n\nimport numpy as np\nimport torch\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom rlpyt.agents.base import AgentStep, BaseAgent\nfrom rlpyt.distributions.gaussian import DistInfoStd, Gaussian\nfrom rlpyt.models.qpg.mlp import PiMlpModel, QofMuMlpModel, VMlpModel\nfrom rlpyt.models.utils import update_state_dict\nfrom rlpyt.utils.buffer import buffer_to\nfrom rlpyt.utils.collections import namedarraytuple\nfrom rlpyt.utils.logging import logger\nfrom rlpyt.utils.quick_args import save__init__args\n\n# from torch.nn.parallel import DistributedDataParallelCPU as DDPC # Deprecated\n\n\nMIN_LOG_STD = -20\nMAX_LOG_STD = 2\n\nAgentInfo = namedarraytuple(\"AgentInfo\", [\"dist_info\"])\nModels = namedtuple(\"Models\", [\"pi\", \"q1\", \"q2\", \"v\"])\n\n\nclass SacAgent(BaseAgent):\n \"\"\"TO BE DEPRECATED.\"\"\"\n\n def __init__(\n self,\n ModelCls=PiMlpModel, # Pi model.\n QModelCls=QofMuMlpModel,\n VModelCls=VMlpModel,\n model_kwargs=None, # Pi model.\n q_model_kwargs=None,\n v_model_kwargs=None,\n initial_model_state_dict=None, # All models.\n action_squash=1.0, # Max magnitude (or None).\n pretrain_std=0.75, # With squash 0.75 is near uniform.\n ):\n if model_kwargs is None:\n model_kwargs = dict(hidden_sizes=[256, 256])\n if q_model_kwargs is None:\n q_model_kwargs = dict(hidden_sizes=[256, 256])\n if v_model_kwargs is None:\n v_model_kwargs = dict(hidden_sizes=[256, 256])\n super().__init__(\n ModelCls=ModelCls,\n model_kwargs=model_kwargs,\n initial_model_state_dict=initial_model_state_dict,\n )\n save__init__args(locals())\n self.min_itr_learn = 0 # Get from algo.\n\n def initialize(self, env_spaces, share_memory=False, global_B=1, env_ranks=None):\n _initial_model_state_dict = self.initial_model_state_dict\n self.initial_model_state_dict = None # Don't let base agent try to load.\n super().initialize(\n env_spaces, share_memory, global_B=global_B, env_ranks=env_ranks\n )\n self.initial_model_state_dict = _initial_model_state_dict\n self.q1_model = self.QModelCls(**self.env_model_kwargs, **self.q_model_kwargs)\n self.q2_model = self.QModelCls(**self.env_model_kwargs, **self.q_model_kwargs)\n self.v_model = self.VModelCls(**self.env_model_kwargs, **self.v_model_kwargs)\n self.target_v_model = self.VModelCls(\n **self.env_model_kwargs, **self.v_model_kwargs\n )\n self.target_v_model.load_state_dict(self.v_model.state_dict())\n if self.initial_model_state_dict is not None:\n self.load_state_dict(self.initial_model_state_dict)\n assert len(env_spaces.action.shape) == 1\n self.distribution = Gaussian(\n dim=env_spaces.action.shape[0],\n squash=self.action_squash,\n min_std=np.exp(MIN_LOG_STD),\n max_std=np.exp(MAX_LOG_STD),\n )\n\n def to_device(self, cuda_idx=None):\n super().to_device(cuda_idx)\n self.q1_model.to(self.device)\n self.q2_model.to(self.device)\n self.v_model.to(self.device)\n self.target_v_model.to(self.device)\n\n def data_parallel(self):\n device_id = super().data_parallel\n self.q1_model = DDP(\n self.q1_model,\n device_ids=None if device_id is None else [device_id], # 1 GPU.\n output_device=device_id,\n )\n self.q2_model = DDP(\n self.q2_model,\n device_ids=None if device_id is None else [device_id], # 1 GPU.\n output_device=device_id,\n )\n self.v_model = DDP(\n self.v_model,\n device_ids=None if device_id is None else [device_id], # 1 GPU.\n output_device=device_id,\n )\n return device_id\n\n def give_min_itr_learn(self, min_itr_learn):\n self.min_itr_learn = min_itr_learn # From algo.\n\n def make_env_to_model_kwargs(self, env_spaces):\n assert len(env_spaces.action.shape) == 1\n return dict(\n observation_shape=env_spaces.observation.shape,\n action_size=env_spaces.action.shape[0],\n )\n\n def q(self, observation, prev_action, prev_reward, action):\n model_inputs = buffer_to(\n (observation, prev_action, prev_reward, action), device=self.device\n )\n q1 = self.q1_model(*model_inputs)\n q2 = self.q2_model(*model_inputs)\n return q1.cpu(), q2.cpu()\n\n def v(self, observation, prev_action, prev_reward):\n model_inputs = buffer_to(\n (observation, prev_action, prev_reward), device=self.device\n )\n v = self.v_model(*model_inputs)\n return v.cpu()\n\n def pi(self, observation, prev_action, prev_reward):\n model_inputs = buffer_to(\n (observation, prev_action, prev_reward), device=self.device\n )\n mean, log_std = self.model(*model_inputs)\n dist_info = DistInfoStd(mean=mean, log_std=log_std)\n action, log_pi = self.distribution.sample_loglikelihood(dist_info)\n # action = self.distribution.sample(dist_info)\n # log_pi = self.distribution.log_likelihood(action, dist_info)\n log_pi, dist_info = buffer_to((log_pi, dist_info), device=\"cpu\")\n return action, log_pi, dist_info # Action stays on device for q models.\n\n def target_v(self, observation, prev_action, prev_reward):\n model_inputs = buffer_to(\n (observation, prev_action, prev_reward), device=self.device\n )\n target_v = self.target_v_model(*model_inputs)\n return target_v.cpu()\n\n @torch.no_grad()\n def step(self, observation, prev_action, prev_reward):\n model_inputs = buffer_to(\n (observation, prev_action, prev_reward), device=self.device\n )\n mean, log_std = self.model(*model_inputs)\n dist_info = DistInfoStd(mean=mean, log_std=log_std)\n action = self.distribution.sample(dist_info)\n agent_info = AgentInfo(dist_info=dist_info)\n action, agent_info = buffer_to((action, agent_info), device=\"cpu\")\n return AgentStep(action=action, agent_info=agent_info)\n\n def update_target(self, tau=1):\n update_state_dict(self.target_v_model, self.v_model.state_dict(), tau)\n\n @property\n def models(self):\n return Models(pi=self.model, q1=self.q1_model, q2=self.q2_model, v=self.v_model)\n\n def pi_parameters(self):\n return self.model.parameters()\n\n def q1_parameters(self):\n return self.q1_model.parameters()\n\n def q2_parameters(self):\n return self.q2_model.parameters()\n\n def v_parameters(self):\n return self.v_model.parameters()\n\n def train_mode(self, itr):\n super().train_mode(itr)\n self.q1_model.train()\n self.q2_model.train()\n self.v_model.train()\n\n def sample_mode(self, itr):\n super().sample_mode(itr)\n self.q1_model.eval()\n self.q2_model.eval()\n self.v_model.eval()\n if itr == 0:\n logger.log(f\"Agent at itr {itr}, sample std: {self.pretrain_std}\")\n if itr == self.min_itr_learn:\n logger.log(f\"Agent at itr {itr}, sample std: learned.\")\n std = None if itr >= self.min_itr_learn else self.pretrain_std\n self.distribution.set_std(std) # If None: std from policy dist_info.\n\n def eval_mode(self, itr):\n super().eval_mode(itr)\n self.q1_model.eval()\n self.q2_model.eval()\n self.v_model.eval()\n self.distribution.set_std(0.0) # Deterministic (dist_info std ignored).\n\n def state_dict(self):\n return dict(\n model=self.model.state_dict(), # Pi model.\n q1_model=self.q1_model.state_dict(),\n q2_model=self.q2_model.state_dict(),\n v_model=self.v_model.state_dict(),\n target_v_model=self.target_v_model.state_dict(),\n )\n\n def load_state_dict(self, state_dict):\n self.model.load_state_dict(state_dict[\"model\"])\n self.q1_model.load_state_dict(state_dict[\"q1_model\"])\n self.q2_model.load_state_dict(state_dict[\"q2_model\"])\n self.v_model.load_state_dict(state_dict[\"v_model\"])\n self.target_v_model.load_state_dict(state_dict[\"target_v_model\"])\n" ]
[ [ "torch.nn.Linear", "torch.nn.LSTM" ], [ "torch.cuda.device_count" ], [ "torch.no_grad" ], [ "numpy.arange", "numpy.argmax", "numpy.zeros", "numpy.prod" ], [ "numpy.exp", "torch.no_grad", "torch.nn.parallel.DistributedDataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PeppeSaccardi/pytorch-lightning
[ "046110797227c352126c779c207e076ce9682eae", "046110797227c352126c779c207e076ce9682eae" ]
[ "tests/checkpointing/test_trainer_checkpoint.py", "pytorch_lightning/utilities/model_summary.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom copy import deepcopy\n\nimport torch\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning import seed_everything, Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom tests.helpers import BoringModel\n\n\ndef test_finetuning_with_resume_from_checkpoint(tmpdir):\n \"\"\"\n This test validates that generated ModelCheckpoint is pointing to the right best_model_path during test\n \"\"\"\n\n seed_everything(4)\n\n checkpoint_callback = ModelCheckpoint(monitor=\"val_loss\", dirpath=tmpdir, filename=\"{epoch:02d}\", save_top_k=-1)\n\n class ExtendedBoringModel(BoringModel):\n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.001)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)\n return [optimizer], [lr_scheduler]\n\n def validation_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n self.log(\"val_loss\", loss, on_epoch=True, prog_bar=True)\n\n model = ExtendedBoringModel()\n model.validation_epoch_end = None\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=12,\n limit_val_batches=6,\n limit_test_batches=12,\n callbacks=[checkpoint_callback],\n logger=False,\n )\n trainer.fit(model)\n assert os.listdir(tmpdir) == [\"epoch=00.ckpt\"]\n\n best_model_paths = [checkpoint_callback.best_model_path]\n results = []\n\n for idx in range(3, 6):\n # load from checkpoint\n trainer = pl.Trainer(\n default_root_dir=tmpdir,\n max_epochs=idx,\n limit_train_batches=12,\n limit_val_batches=12,\n limit_test_batches=12,\n resume_from_checkpoint=best_model_paths[-1],\n progress_bar_refresh_rate=0,\n )\n trainer.fit(model)\n trainer.test()\n results.append(deepcopy(trainer.callback_metrics))\n best_model_paths.append(trainer.checkpoint_callback.best_model_path)\n\n for idx in range(len(results) - 1):\n assert results[idx][\"val_loss\"] > results[idx + 1][\"val_loss\"]\n\n for idx, best_model_path in enumerate(best_model_paths):\n if idx == 0:\n assert best_model_path.endswith(f\"epoch=0{idx}.ckpt\")\n else:\n assert f\"epoch={idx + 1}\" in best_model_path\n\n\ndef test_accumulated_gradient_batches_with_resume_from_checkpoint(tmpdir):\n \"\"\"\n This test validates that accumulated gradient is properly recomputed and reset on the trainer.\n \"\"\"\n\n ckpt = ModelCheckpoint(dirpath=tmpdir, save_last=True)\n model = BoringModel()\n trainer_kwargs = dict(\n max_epochs=1, accumulate_grad_batches={0: 2}, callbacks=ckpt, limit_train_batches=1, limit_val_batches=0\n )\n trainer = Trainer(**trainer_kwargs)\n trainer.fit(model)\n\n trainer_kwargs[\"max_epochs\"] = 2\n trainer_kwargs[\"resume_from_checkpoint\"] = ckpt.last_model_path\n trainer = Trainer(**trainer_kwargs)\n trainer.fit(model)\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom collections import OrderedDict\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.utils.hooks import RemovableHandle\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.utilities import AMPType, DeviceType, rank_zero_deprecation\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_8\nfrom pytorch_lightning.utilities.warnings import WarningCache\n\nlog = logging.getLogger(__name__)\nwarning_cache = WarningCache()\n\nPARAMETER_NUM_UNITS = [\" \", \"K\", \"M\", \"B\", \"T\"]\nUNKNOWN_SIZE = \"?\"\n\n\nclass LayerSummary:\n \"\"\"\n Summary class for a single layer in a :class:`~pytorch_lightning.core.lightning.LightningModule`.\n It collects the following information:\n\n - Type of the layer (e.g. Linear, BatchNorm1d, ...)\n - Input shape\n - Output shape\n - Number of parameters\n\n The input and output shapes are only known after the example input array was\n passed through the model.\n\n Example::\n\n >>> model = torch.nn.Conv2d(3, 8, 3)\n >>> summary = LayerSummary(model)\n >>> summary.num_parameters\n 224\n >>> summary.layer_type\n 'Conv2d'\n >>> output = model(torch.rand(1, 3, 5, 5))\n >>> summary.in_size\n [1, 3, 5, 5]\n >>> summary.out_size\n [1, 8, 3, 3]\n\n Args:\n module: A module to summarize\n\n \"\"\"\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self._module = module\n self._hook_handle = self._register_hook()\n self._in_size = None\n self._out_size = None\n\n def __del__(self):\n self.detach_hook()\n\n def _register_hook(self) -> Optional[RemovableHandle]:\n \"\"\"\n Registers a hook on the module that computes the input- and output size(s) on the first forward pass.\n If the hook is called, it will remove itself from the from the module, meaning that\n recursive models will only record their input- and output shapes once.\n Registering hooks on :class:`~torch.jit.ScriptModule` is not supported.\n\n Return:\n A handle for the installed hook, or ``None`` if registering the hook is not possible.\n \"\"\"\n\n def hook(module, inp, out):\n if len(inp) == 1:\n inp = inp[0]\n self._in_size = parse_batch_shape(inp)\n self._out_size = parse_batch_shape(out)\n self._hook_handle.remove()\n\n handle = None\n if not isinstance(self._module, torch.jit.ScriptModule):\n handle = self._module.register_forward_hook(hook)\n return handle\n\n def detach_hook(self):\n \"\"\"\n Removes the forward hook if it was not already removed in the forward pass.\n Will be called after the summary is created.\n \"\"\"\n if self._hook_handle is not None:\n self._hook_handle.remove()\n\n @property\n def in_size(self) -> Union[str, List]:\n return self._in_size or UNKNOWN_SIZE\n\n @property\n def out_size(self) -> Union[str, List]:\n return self._out_size or UNKNOWN_SIZE\n\n @property\n def layer_type(self) -> str:\n \"\"\"Returns the class name of the module.\"\"\"\n return str(self._module.__class__.__name__)\n\n @property\n def num_parameters(self) -> int:\n \"\"\"Returns the number of parameters in this module.\"\"\"\n return sum(np.prod(p.shape) if not _is_lazy_weight_tensor(p) else 0 for p in self._module.parameters())\n\n\nclass ModelSummary:\n \"\"\"\n Generates a summary of all layers in a :class:`~pytorch_lightning.core.lightning.LightningModule`.\n\n Args:\n model: The model to summarize (also referred to as the root module).\n mode: Can be one of\n\n - `top` (default): only the top-level modules will be recorded (the children of the root module)\n - `full`: summarizes all layers and their submodules in the root module\n\n .. deprecated:: v1.4\n This parameter was deprecated in v1.4 in favor of `max_depth` and will be removed in v1.6.\n\n max_depth: Maximum depth of modules to show. Use -1 to show all modules or 0 to show no\n summary. Defaults to 1.\n\n The string representation of this summary prints a table with columns containing\n the name, type and number of parameters for each layer.\n\n The root module may also have an attribute ``example_input_array`` as shown in the example below.\n If present, the root module will be called with it as input to determine the\n intermediate input- and output shapes of all layers. Supported are tensors and\n nested lists and tuples of tensors. All other types of inputs will be skipped and show as `?`\n in the summary table. The summary will also display `?` for layers not used in the forward pass.\n\n Example::\n\n >>> import pytorch_lightning as pl\n >>> class LitModel(pl.LightningModule):\n ...\n ... def __init__(self):\n ... super().__init__()\n ... self.net = nn.Sequential(nn.Linear(256, 512), nn.BatchNorm1d(512))\n ... self.example_input_array = torch.zeros(10, 256) # optional\n ...\n ... def forward(self, x):\n ... return self.net(x)\n ...\n >>> model = LitModel()\n >>> ModelSummary(model, max_depth=1) # doctest: +NORMALIZE_WHITESPACE\n | Name | Type | Params | In sizes | Out sizes\n ------------------------------------------------------------\n 0 | net | Sequential | 132 K | [10, 256] | [10, 512]\n ------------------------------------------------------------\n 132 K Trainable params\n 0 Non-trainable params\n 132 K Total params\n 0.530 Total estimated model params size (MB)\n >>> ModelSummary(model, max_depth=-1) # doctest: +NORMALIZE_WHITESPACE\n | Name | Type | Params | In sizes | Out sizes\n --------------------------------------------------------------\n 0 | net | Sequential | 132 K | [10, 256] | [10, 512]\n 1 | net.0 | Linear | 131 K | [10, 256] | [10, 512]\n 2 | net.1 | BatchNorm1d | 1.0 K | [10, 512] | [10, 512]\n --------------------------------------------------------------\n 132 K Trainable params\n 0 Non-trainable params\n 132 K Total params\n 0.530 Total estimated model params size (MB)\n \"\"\"\n\n MODES = dict(top=1, full=-1) # TODO: remove in v1.6\n\n def __init__(self, model, mode: Optional[str] = None, max_depth: Optional[int] = 1):\n self._model = model\n\n # temporary mapping from mode to max_depth\n if max_depth is None or mode is not None:\n if mode in ModelSummary.MODES:\n max_depth = ModelSummary.MODES[mode]\n rank_zero_deprecation(\n \"Argument `mode` in `ModelSummary` is deprecated in v1.4\"\n f\" and will be removed in v1.6. Use `max_depth={max_depth}` to replicate `mode={mode}` behaviour.\"\n )\n else:\n raise MisconfigurationException(f\"`mode` can be {', '.join(ModelSummary.MODES)}, got {mode}.\")\n\n if not isinstance(max_depth, int) or max_depth < -1:\n raise ValueError(f\"`max_depth` can be -1, 0 or > 0, got {max_depth}.\")\n\n self._max_depth = max_depth\n self._layer_summary = self.summarize()\n # 1 byte -> 8 bits\n # TODO: how do we compute precisin_megabytes in case of mixed precision?\n precision = self._model.precision if isinstance(self._model.precision, int) else 32\n self._precision_megabytes = (precision / 8.0) * 1e-6\n\n @property\n def named_modules(self) -> List[Tuple[str, nn.Module]]:\n if self._max_depth == 0:\n mods = []\n elif self._max_depth == 1:\n # the children are the top-level modules\n mods = self._model.named_children()\n else:\n mods = self._model.named_modules()\n mods = list(mods)[1:] # do not include root module (LightningModule)\n return list(mods)\n\n @property\n def layer_names(self) -> List[str]:\n return list(self._layer_summary.keys())\n\n @property\n def layer_types(self) -> List[str]:\n return [layer.layer_type for layer in self._layer_summary.values()]\n\n @property\n def in_sizes(self) -> List:\n return [layer.in_size for layer in self._layer_summary.values()]\n\n @property\n def out_sizes(self) -> List:\n return [layer.out_size for layer in self._layer_summary.values()]\n\n @property\n def param_nums(self) -> List[int]:\n return [layer.num_parameters for layer in self._layer_summary.values()]\n\n @property\n def total_parameters(self) -> int:\n return sum(p.numel() if not _is_lazy_weight_tensor(p) else 0 for p in self._model.parameters())\n\n @property\n def trainable_parameters(self) -> int:\n return sum(\n p.numel() if not _is_lazy_weight_tensor(p) else 0 for p in self._model.parameters() if p.requires_grad\n )\n\n @property\n def model_size(self) -> float:\n # todo: seems it does not work with quantized models - it returns 0.0\n return self.total_parameters * self._precision_megabytes\n\n def summarize(self) -> Dict[str, LayerSummary]:\n summary = OrderedDict((name, LayerSummary(module)) for name, module in self.named_modules)\n if self._model.example_input_array is not None:\n self._forward_example_input()\n for layer in summary.values():\n layer.detach_hook()\n\n if self._max_depth >= 1:\n # remove summary entries with depth > max_depth\n for k in [k for k in summary if k.count(\".\") >= self._max_depth]:\n del summary[k]\n\n return summary\n\n def _forward_example_input(self) -> None:\n \"\"\"Run the example input through each layer to get input- and output sizes.\"\"\"\n model = self._model\n trainer = self._model.trainer\n\n input_ = model.example_input_array\n input_ = model._apply_batch_transfer_handler(input_)\n\n if trainer is not None and trainer.amp_backend == AMPType.NATIVE and trainer._device_type != DeviceType.TPU:\n model.forward = torch.cuda.amp.autocast()(model.forward)\n\n mode = model.training\n model.eval()\n with torch.no_grad():\n # let the model hooks collect the input- and output shapes\n if isinstance(input_, (list, tuple)):\n model(*input_)\n elif isinstance(input_, dict):\n model(**input_)\n else:\n model(input_)\n model.train(mode) # restore mode of module\n\n def __str__(self):\n \"\"\"\n Makes a summary listing with:\n\n Layer Name, Layer Type, Number of Parameters, Input Sizes, Output Sizes, Model Size\n \"\"\"\n arrays = [\n [\" \", list(map(str, range(len(self._layer_summary))))],\n [\"Name\", self.layer_names],\n [\"Type\", self.layer_types],\n [\"Params\", list(map(get_human_readable_count, self.param_nums))],\n ]\n if self._model.example_input_array is not None:\n arrays.append([\"In sizes\", self.in_sizes])\n arrays.append([\"Out sizes\", self.out_sizes])\n total_parameters = self.total_parameters\n trainable_parameters = self.trainable_parameters\n model_size = self.model_size\n\n return _format_summary_table(total_parameters, trainable_parameters, model_size, *arrays)\n\n def __repr__(self):\n return str(self)\n\n\ndef parse_batch_shape(batch: Any) -> Union[str, List]:\n if hasattr(batch, \"shape\"):\n return list(batch.shape)\n\n if isinstance(batch, (list, tuple)):\n shape = [parse_batch_shape(el) for el in batch]\n return shape\n\n return UNKNOWN_SIZE\n\n\ndef _format_summary_table(total_parameters: int, trainable_parameters: int, model_size: float, *cols) -> str:\n \"\"\"\n Takes in a number of arrays, each specifying a column in\n the summary table, and combines them all into one big\n string defining the summary table that are nicely formatted.\n \"\"\"\n n_rows = len(cols[0][1])\n n_cols = 1 + len(cols)\n\n # Get formatting width of each column\n col_widths = []\n for c in cols:\n col_width = max(len(str(a)) for a in c[1]) if n_rows else 0\n col_width = max(col_width, len(c[0])) # minimum length is header length\n col_widths.append(col_width)\n\n # Formatting\n s = \"{:<{}}\"\n total_width = sum(col_widths) + 3 * n_cols\n header = [s.format(c[0], l) for c, l in zip(cols, col_widths)]\n\n # Summary = header + divider + Rest of table\n summary = \" | \".join(header) + \"\\n\" + \"-\" * total_width\n for i in range(n_rows):\n line = []\n for c, l in zip(cols, col_widths):\n line.append(s.format(str(c[1][i]), l))\n summary += \"\\n\" + \" | \".join(line)\n summary += \"\\n\" + \"-\" * total_width\n\n summary += \"\\n\" + s.format(get_human_readable_count(trainable_parameters), 10)\n summary += \"Trainable params\"\n summary += \"\\n\" + s.format(get_human_readable_count(total_parameters - trainable_parameters), 10)\n summary += \"Non-trainable params\"\n summary += \"\\n\" + s.format(get_human_readable_count(total_parameters), 10)\n summary += \"Total params\"\n summary += \"\\n\" + s.format(get_formatted_model_size(model_size), 10)\n summary += \"Total estimated model params size (MB)\"\n\n return summary\n\n\ndef get_formatted_model_size(total_model_size: float) -> float:\n return f\"{total_model_size:,.3f}\"\n\n\ndef get_human_readable_count(number: int) -> str:\n \"\"\"\n Abbreviates an integer number with K, M, B, T for thousands, millions,\n billions and trillions, respectively.\n\n Examples:\n >>> get_human_readable_count(123)\n '123 '\n >>> get_human_readable_count(1234) # (one thousand)\n '1.2 K'\n >>> get_human_readable_count(2e6) # (two million)\n '2.0 M'\n >>> get_human_readable_count(3e9) # (three billion)\n '3.0 B'\n >>> get_human_readable_count(4e14) # (four hundred trillion)\n '400 T'\n >>> get_human_readable_count(5e15) # (more than trillion)\n '5,000 T'\n\n Args:\n number: a positive integer number\n\n Return:\n A string formatted according to the pattern described above.\n\n \"\"\"\n assert number >= 0\n labels = PARAMETER_NUM_UNITS\n num_digits = int(np.floor(np.log10(number)) + 1 if number > 0 else 1)\n num_groups = int(np.ceil(num_digits / 3))\n num_groups = min(num_groups, len(labels)) # don't abbreviate beyond trillions\n shift = -3 * (num_groups - 1)\n number = number * (10 ** shift)\n index = num_groups - 1\n if index < 1 or number >= 100:\n return f\"{int(number):,d} {labels[index]}\"\n\n return f\"{number:,.1f} {labels[index]}\"\n\n\ndef _is_lazy_weight_tensor(p: Tensor) -> bool:\n if _TORCH_GREATER_EQUAL_1_8:\n from torch.nn.parameter import UninitializedParameter\n\n if isinstance(p, UninitializedParameter):\n warning_cache.warn(\n \"A layer with UninitializedParameter was found. \"\n \"Thus, the total number of parameters detected may be inaccurate.\"\n )\n return True\n return False\n\n\ndef summarize(\n lightning_module: \"pl.LightningModule\", mode: Optional[str] = \"top\", max_depth: Optional[int] = None\n) -> Optional[ModelSummary]:\n \"\"\"\n Summarize the LightningModule specified by `lightning_module`.\n\n Args:\n lightning_module: `LightningModule` to summarize.\n mode: Can be either ``'top'`` (summarize only direct submodules) or ``'full'`` (summarize all layers).\n\n .. deprecated:: v1.4\n This parameter was deprecated in v1.4 in favor of `max_depth` and will be removed in v1.6.\n\n max_depth: The maximum depth of layer nesting that the summary will include. A value of 0 turns the\n layer summary off. Default: 1.\n\n Return:\n The model summary object\n \"\"\"\n\n # temporary mapping from mode to max_depth\n if max_depth is None:\n if mode in ModelSummary.MODES:\n max_depth = ModelSummary.MODES[mode]\n rank_zero_deprecation(\n \"Argument `mode` in `LightningModule.summarize` is deprecated in v1.4\"\n f\" and will be removed in v1.6. Use `max_depth={max_depth}` to replicate `mode={mode}` behavior.\"\n )\n model_summary = ModelSummary(lightning_module, max_depth=max_depth)\n elif mode is not None:\n raise MisconfigurationException(f\"`mode` can be None, {', '.join(ModelSummary.MODES)}, got {mode}\")\n else:\n model_summary = ModelSummary(lightning_module, max_depth=max_depth)\n log.info(\"\\n\" + str(model_summary))\n return model_summary\n" ]
[ [ "torch.optim.lr_scheduler.StepLR" ], [ "torch.cuda.amp.autocast", "numpy.ceil", "numpy.log10", "torch.no_grad", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nschmucker/Sundial
[ "3c1ef3ac422314b7d811c842065f0992a8ff8aeb" ]
[ "sundial.py" ]
[ "\"\"\"\nsundial.py\nThis file contains code to run an indoor sundial\nDeveloped on Python 3.7.3 / Raspberry Pi 4\nNathaniel Schmucker\n\"\"\"\n\nfrom busio import I2C\nfrom board import SCL, SDA\nfrom adafruit_pca9685 import PCA9685\nfrom adafruit_motor import servo\n\nfrom pysolar.solar import get_altitude, get_azimuth\nfrom scipy.optimize import fsolve\nfrom numpy import isclose\nfrom math import cos, sin, tan, pi\n\nimport requests\nimport datetime\nfrom time import sleep\n\n\n# --- Global constants; update to reflect physical sundial ---\nGNOMON_LOC = (0, 0, 0) # By definition, center on gnomon tip\nGNOMON_LENGTH = None\nARM_LOC = (0, -8, 6) # Pivot point of movable arm\nARM_LENGTH = 13.25 # Consistent units with ARM_LOC\n\n# Independence Hall\nLAT = 39.95 \nLON = -75.15 \n\nAPI_KEY = \"API_KEY\"\nBASE_URL = \"https://data.climacell.co/v4/timelines?\"\nURL = BASE_URL + \\\n \"location=\" + str(LAT) + \",\" + str(LON) + \\\n \"&fields=cloudCover\" + \\\n \"&timesteps=current\" + \\\n \"&apikey=\" + API_KEY\n\n\n# --- Variables; need to update for initial code execution ---\n# See helper_sundial_inputs.py\ngnomon = {\n \"loc\": GNOMON_LOC,\n \"length\": GNOMON_LENGTH,\n \"alt\": 0, # radians, relative to horizon\n \"az\": 0 # radians, relative to north\n}\narm = {\n \"loc\": ARM_LOC,\n \"length\": ARM_LENGTH,\n \"alt\": 0, # radians, relative to horizon\n \"az\": 0 # radians, relative to north\n}\nlast_sunrise = {\n \"alt\": -0.5,\n \"az\": 2.1,\n \"t\": 10.3\n}\nguess = {\n \"alt\": 0.1,\n \"az\": 3.6,\n \"t\": 7.1\n}\ntimes = {\n \"now\": datetime.datetime.now(datetime.timezone.utc),\n \"last_sunrise\": datetime.datetime(2021, 4, 10, 10, 21, tzinfo=datetime.timezone.utc)\n}\n\nis_led_on = gnomon[\"alt\"] >= 0\n\n\n# --- Functions ---\ndef get_cloudcover():\n \"\"\"Helper function to retrieve current cloudcover\n \n Uses climacell API: https://docs.climacell.co/reference/welcome\n \"\"\"\n \n r = requests.get(URL)\n j = r.json()\n cloud_cover = j[\"data\"][\"timelines\"][0][\"intervals\"][0][\"values\"][\"cloudCover\"]/100\n \n return cloud_cover # [0,1]\n\ndef mimic_clouds(raw_val):\n \"\"\"Helper function to adjust LED brightness for cloudcover\n\n Never goes below 80%, even if 100% cloudy\n \"\"\"\n \n pct_sun = 1 - get_cloudcover()\n pct_to_adj = 0.8 + 0.2*pct_sun\n adj_val = int(int(raw_val)*pct_to_adj)\n \n return adj_val\n\ndef update_leds():\n \"\"\"Adjust LED based on whether sun is up and % cloudiness\"\"\"\n \n brightness = 0xffff if is_led_on else 0\n adjusted_brightness = mimic_clouds(brightness)\n \n led0.duty_cycle = adjusted_brightness\n led2.duty_cycle = adjusted_brightness\n led4.duty_cycle = adjusted_brightness\n\ndef func(vars):\n \"\"\"Intersection of a line and a sphere\n\n Coordinate system centered on gnomon tip\n Line passes through gnomon tip and is in line with the sun\n Sphere is centered on the arm's pivot and has arm-length radius\n \"\"\"\n \n alt, az, t = vars\n return [(arm[\"loc\"][0] + arm[\"length\"]*cos(alt)*cos(az-pi/2)) - (gnomon[\"loc\"][0] + t*cos(gnomon[\"az\"]-pi/2)),\n (arm[\"loc\"][1] + arm[\"length\"]*cos(alt)*sin(az-pi/2)) - (gnomon[\"loc\"][1] + t*sin(gnomon[\"az\"]-pi/2)),\n (arm[\"loc\"][2] + arm[\"length\"]*sin(alt)) - (gnomon[\"loc\"][2] + t*tan(gnomon[\"alt\"]))]\n\ndef validate_fsolve(x):\n \"\"\"Ensure fsolve successfully found roots in the right quadrant\"\"\"\n \n finds_zeros = all(isclose(func(x), [0.0, 0.0, 0.0]))\n positive_t = x[2] >= 0\n\n return (finds_zeros and positive_t)\n\ndef rotate_angle(angle, min_val, max_val):\n \"\"\"Adjust if our roots are the wrong multiple of 2pi\n\n e.g., sin(0) = sin(2pi) = sin(4pi) = ...\n \"\"\"\n \n a = angle\n while a < min_val: a += 2*pi\n while a > max_val: a += -2*pi\n\n return a\n\n\n# --- Setup Servo hat and assign LEDs and servos to their channels\ni2c = I2C(SCL, SDA)\nhat = PCA9685(i2c)\nhat.frequency = 50\n\nservo_alt = servo.Servo(hat.channels[13], min_pulse=600, max_pulse=2500)\nservo_az = servo.Servo(hat.channels[15], min_pulse=600, max_pulse=2500)\nled0 = hat.channels[0]\nled2 = hat.channels[2]\nled4 = hat.channels[4]\n\nunstable_math = False\nwhile not unstable_math:\n times[\"now\"] = datetime.datetime.now(datetime.timezone.utc)\n \n # Get sun's location at current time (in radians)\n gnomon[\"alt\"] = get_altitude(LAT, LON, times[\"now\"])*pi/180\n gnomon[\"az\"] = get_azimuth(LAT, LON, times[\"now\"])*pi/180\n \n if gnomon[\"alt\"] < 0:\n # Sleep until 10 minutes before this morning's sunrise\n # and then increments of 1 minute until sunrise\n if is_led_on:\n sleep_time = times[\"last_sunrise\"] + datetime.timedelta(days=1, minutes=-10) - times[\"now\"]\n else:\n sleep_time = datetime.timedelta(minutes=1)\n \n # Prep our next guess to be the last sunrise alt/az/t\n guess[\"alt\"] = last_sunrise[\"alt\"]\n guess[\"az\"] = last_sunrise[\"az\"]\n guess[\"t\"] = last_sunrise[\"t\"]\n \n # Light off and move servos\n is_led_on = False\n update_leds()\n \n servo_alt.angle = 135\n servo_az.angle = 90\n \n sleep(int(sleep_time))\n \n else:\n # Calculate sun's location relative to arm pivot point\n root = fsolve(func, (guess[\"alt\"], guess[\"az\"], guess[\"t\"]))\n\n # Validate fsolve worked and then continue with updates\n if validate_fsolve(root):\n # Move our alt and az to be in the correct range\n arm[\"alt\"] = rotate_angle(root[0], -pi/2, pi/2)\n arm[\"az\"] = rotate_angle(root[1], pi/2, 3*pi/2)\n \n # If the sun is coming up, refresh our best guess for sunrise time/alt/az/t\n if not is_led_on:\n times[\"last_sunrise\"] = times[\"now\"]\n last_sunrise[\"alt\"] = arm[\"alt\"]\n last_sunrise[\"az\"] = arm[\"az\"]\n last_sunrise[\"t\"] = root[2]\n \n # Prep our next guess to be the latest solution\n guess[\"alt\"] = arm[\"alt\"]\n guess[\"az\"] = arm[\"az\"]\n guess[\"t\"] = root[2]\n\n # Move servos and light on\n servo_alt.angle = (arm[\"alt\"]+pi/2)*180/pi\n servo_az.angle = (pi*3/2-arm[\"az\"])*180/pi\n \n is_led_on = True\n update_leds()\n\n # Sleep 240 seconds (1 degree of earth's rotation)\n sleep(240)\n else:\n unstable_math = True\n\n# Light off and servos to home position\nis_led_on = False\nupdate_leds()\nservo_alt.angle = 135\nservo_az.angle = 90\n" ]
[ [ "scipy.optimize.fsolve" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
sangyx/graph-tricks
[ "618bea55e221a9a3caedbe73aaa584303e583a98", "618bea55e221a9a3caedbe73aaa584303e583a98" ]
[ "benchmark/pyg/model.py", "gtrick/dgl/drop_edge.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch_geometric.nn import MessagePassing\nimport torch.nn.functional as F\nfrom torch_geometric.nn import global_mean_pool, GCNConv, SAGEConv\nfrom torch_geometric.utils import degree\n\nfrom ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder\n\n### GIN convolution along the graph structure\nclass EGINConv(MessagePassing):\n def __init__(self, emb_dim):\n '''\n emb_dim (int): node embedding dimensionality\n '''\n\n super(EGINConv, self).__init__(aggr=\"add\")\n\n self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, emb_dim))\n self.eps = torch.nn.Parameter(torch.Tensor([0]))\n\n self.edge_encoder = BondEncoder(emb_dim=emb_dim)\n \n def reset_parameters(self):\n for c in self.mlp.children():\n if hasattr(c, 'reset_parameters'):\n c.reset_parameters()\n nn.init.constant_(self.eps.data, 0)\n for emb in self.edge_encoder.bond_embedding_list:\n nn.init.xavier_uniform_(emb.weight.data)\n\n def forward(self, x, edge_index, edge_attr):\n edge_embedding = self.edge_encoder(edge_attr)\n out = self.mlp((1 + self.eps) *x + self.propagate(edge_index, x=x, edge_attr=edge_embedding))\n\n return out\n\n def message(self, x_j, edge_attr):\n return F.relu(x_j + edge_attr)\n\n def update(self, aggr_out):\n return aggr_out\n\n### GCN convolution along the graph structure\nclass EGCNConv(MessagePassing):\n def __init__(self, emb_dim):\n super(EGCNConv, self).__init__(aggr='add')\n\n self.linear = torch.nn.Linear(emb_dim, emb_dim)\n self.root_emb = torch.nn.Embedding(1, emb_dim)\n self.edge_encoder = BondEncoder(emb_dim = emb_dim)\n \n def reset_parameters(self):\n self.linear.reset_parameters()\n self.root_emb.reset_parameters()\n for emb in self.edge_encoder.bond_embedding_list:\n nn.init.xavier_uniform_(emb.weight.data)\n\n def forward(self, x, edge_index, edge_attr):\n x = self.linear(x)\n edge_embedding = self.edge_encoder(edge_attr)\n\n row, col = edge_index\n\n #edge_weight = torch.ones((edge_index.size(1), ), device=edge_index.device)\n deg = degree(row, x.size(0), dtype = x.dtype) + 1\n deg_inv_sqrt = deg.pow(-0.5)\n deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0\n\n norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]\n\n return self.propagate(edge_index, x=x, edge_attr=edge_embedding, norm=norm) + F.relu(x + self.root_emb.weight) * 1./deg.view(-1,1)\n\n def message(self, x_j, edge_attr, norm):\n return norm.view(-1, 1) * F.relu(x_j + edge_attr)\n\n def update(self, aggr_out):\n return aggr_out\n\n\nclass EGCN(nn.Module):\n\n def __init__(self, hidden_channels, out_channels, num_layers,\n dropout):\n\n super(EGCN, self).__init__()\n\n self.node_encoder = AtomEncoder(hidden_channels)\n\n self.convs = nn.ModuleList()\n self.bns = nn.ModuleList()\n\n for i in range(num_layers):\n self.convs.append(\n EGCNConv(hidden_channels))\n if i != num_layers - 1:\n self.bns.append(nn.BatchNorm1d(hidden_channels))\n\n self.dropout = dropout\n\n self.out = nn.Linear(hidden_channels, out_channels)\n\n def reset_parameters(self):\n for emb in self.node_encoder.atom_embedding_list:\n torch.nn.init.xavier_uniform_(emb.weight.data)\n\n num_layers = len(self.convs)\n\n for i in range(num_layers):\n self.convs[i].reset_parameters()\n if i != num_layers - 1:\n self.bns[i].reset_parameters()\n\n self.out.reset_parameters()\n\n def forward(self, batched_data):\n x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch\n\n h = self.node_encoder(x)\n\n for i, conv in enumerate(self.convs[:-1]):\n h = conv(h, edge_index, edge_attr)\n h = self.bns[i](h)\n h = F.relu(h)\n h = F.dropout(h, p=self.dropout, training=self.training)\n h = self.convs[-1](h, edge_index, edge_attr)\n h = F.dropout(h, self.dropout, training=self.training)\n\n h = global_mean_pool(h, batch)\n h = self.out(h)\n\n return h\n\n\nclass EGIN(nn.Module):\n def __init__(self, hidden_channels, out_channels, num_layers,\n dropout):\n\n super(EGIN, self).__init__()\n\n self.node_encoder = AtomEncoder(hidden_channels)\n\n self.convs = nn.ModuleList()\n self.bns = nn.ModuleList()\n\n for i in range(num_layers):\n self.convs.append(\n EGINConv(hidden_channels))\n if i != num_layers - 1:\n self.bns.append(nn.BatchNorm1d(hidden_channels))\n\n self.dropout = dropout\n\n self.out = nn.Linear(hidden_channels, out_channels)\n\n def reset_parameters(self):\n for emb in self.node_encoder.atom_embedding_list:\n nn.init.xavier_uniform_(emb.weight.data)\n \n num_layers = len(self.convs)\n\n for i in range(num_layers):\n self.convs[i].reset_parameters()\n if i != num_layers - 1:\n self.bns[i].reset_parameters()\n\n self.out.reset_parameters()\n\n def forward(self, batched_data):\n x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch\n\n h = self.node_encoder(x)\n\n for i, conv in enumerate(self.convs[:-1]):\n h = conv(h, edge_index, edge_attr)\n h = self.bns[i](h)\n h = F.relu(h)\n h = F.dropout(h, p=self.dropout, training=self.training)\n h = self.convs[-1](h, edge_index, edge_attr)\n h = F.dropout(h, self.dropout, training=self.training)\n\n h = global_mean_pool(h, batch)\n h = self.out(h)\n\n return h\n\n\nclass GCN(torch.nn.Module):\n def __init__(self, in_channels, hidden_channels, out_channels, num_layers,\n dropout):\n super(GCN, self).__init__()\n\n self.convs = torch.nn.ModuleList()\n self.convs.append(GCNConv(in_channels, hidden_channels, cached=True))\n self.bns = torch.nn.ModuleList()\n self.bns.append(torch.nn.BatchNorm1d(hidden_channels))\n for _ in range(num_layers - 2):\n self.convs.append(\n GCNConv(hidden_channels, hidden_channels, cached=True))\n self.bns.append(torch.nn.BatchNorm1d(hidden_channels))\n self.convs.append(GCNConv(hidden_channels, out_channels, cached=True))\n\n self.dropout = dropout\n\n def reset_parameters(self):\n for conv in self.convs:\n conv.reset_parameters()\n for bn in self.bns:\n bn.reset_parameters()\n\n def forward(self, x, adj_t):\n for i, conv in enumerate(self.convs[:-1]):\n x = conv(x, adj_t)\n x = self.bns[i](x)\n x = F.relu(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.convs[-1](x, adj_t)\n return x\n\n\nclass SAGE(torch.nn.Module):\n def __init__(self, in_channels, hidden_channels, out_channels, num_layers,\n dropout):\n super(SAGE, self).__init__()\n\n self.convs = torch.nn.ModuleList()\n self.convs.append(SAGEConv(in_channels, hidden_channels))\n self.bns = torch.nn.ModuleList()\n self.bns.append(torch.nn.BatchNorm1d(hidden_channels))\n for _ in range(num_layers - 2):\n self.convs.append(SAGEConv(hidden_channels, hidden_channels))\n self.bns.append(torch.nn.BatchNorm1d(hidden_channels))\n self.convs.append(SAGEConv(hidden_channels, out_channels))\n\n self.dropout = dropout\n\n def reset_parameters(self):\n for conv in self.convs:\n conv.reset_parameters()\n for bn in self.bns:\n bn.reset_parameters()\n\n def forward(self, x, adj_t):\n for i, conv in enumerate(self.convs[:-1]):\n x = conv(x, adj_t)\n x = self.bns[i](x)\n x = F.relu(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.convs[-1](x, adj_t)\n return x", "import torch\nfrom torch import nn\nimport dgl\n\n\nclass DropEdge(nn.Module):\n def __init__(self, p) -> None:\n super(DropEdge).__init__()\n self.p = p\n\n def forward(self, g):\n eids = g.edges(form='eid')\n mask = torch.rand(eids.shape) < (1 - self.p)\n ng = dgl.remove_edges(g, eids[mask])\n return ng\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.Tensor", "torch.nn.functional.dropout", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Embedding", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.init.xavier_uniform_", "torch.nn.ReLU" ], [ "torch.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rjpower/tensorflow-io
[ "39aa0b46cfaa403121fdddbd491a03d2f3190a87", "39aa0b46cfaa403121fdddbd491a03d2f3190a87", "39aa0b46cfaa403121fdddbd491a03d2f3190a87", "39aa0b46cfaa403121fdddbd491a03d2f3190a87" ]
[ "tensorflow_io/cifar/__init__.py", "tests/test_pcap_eager.py", "tensorflow_io/audio/python/ops/audio_ops.py", "tensorflow_io/image/python/ops/image_dataset_ops.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"CIFAR File Dataset.\n\n@@CIFAR10Dataset\n@@CIFAR100Dataset\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow_io.cifar.python.ops.cifar_ops import CIFAR10Dataset\nfrom tensorflow_io.cifar.python.ops.cifar_ops import CIFAR100Dataset\n\nfrom tensorflow.python.util.all_util import remove_undocumented\n\n_allowed_symbols = [\n \"CIFAR10Dataset\",\n \"CIFAR100Dataset\",\n]\n\nremove_undocumented(__name__, allowed_exception_list=_allowed_symbols)\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n# ==============================================================================\n\"\"\"\nTest PcapDataset\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nimport tensorflow_io.pcap as pcap_io # pylint: disable=wrong-import-position\n\nif not (hasattr(tf, \"version\") and tf.version.VERSION.startswith(\"2.\")):\n tf.compat.v1.enable_eager_execution()\n\n\ndef test_pcap_input():\n \"\"\"test_pcap_input\n \"\"\"\n print(\"Testing PcapDataset\")\n pcap_filename = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"test_pcap\", \"http.pcap\")\n file_url = \"file://\" + pcap_filename\n url_filenames = [file_url]\n dataset = pcap_io.PcapDataset(url_filenames, batch=1)\n\n packets_total = 0\n for v in dataset:\n (packet_timestamp, packet_data) = v\n if packets_total == 0:\n assert packet_timestamp.numpy()[0] == 1084443427.311224 # we know this is the correct value in the test pcap file\n assert len(packet_data.numpy()[0]) == 62 # we know this is the correct packet data buffer length in the test pcap file\n packets_total += 1\n assert packets_total == 43 # we know this is the correct number of packets in the test pcap file\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Audio Dataset.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_io.core.python.ops import data_ops as data_ops\nfrom tensorflow_io.core.python.ops import core_ops as audio_ops\n\nclass WAVDataset(data_ops.Dataset):\n \"\"\"A WAV Dataset\"\"\"\n\n def __init__(self, filename, batch=None):\n \"\"\"Create a WAVDataset.\n\n Args:\n filename: A `tf.string` tensor containing one or more filenames.\n \"\"\"\n batch = 0 if batch is None else batch\n dtypes = [tf.int16]\n shapes = [\n tf.TensorShape([None])] if batch == 0 else [\n tf.TensorShape([None, None])]\n super(WAVDataset, self).__init__(\n audio_ops.wav_dataset,\n audio_ops.wav_input(filename),\n batch, dtypes, shapes)\n\nclass AudioDataset(data_ops.Dataset):\n \"\"\"A Audio File Dataset that reads the audio file.\"\"\"\n\n def __init__(self, filename, batch=None):\n \"\"\"Create a `AudioDataset`.\n Args:\n filename: A `tf.string` tensor containing one or more filenames.\n \"\"\"\n batch = 0 if batch is None else batch\n dtypes = [tf.int16]\n shapes = [\n tf.TensorShape([None])] if batch == 0 else [\n tf.TensorShape([None, None])]\n super(AudioDataset, self).__init__(\n ffmpeg_ops.audio_dataset,\n ffmpeg_ops.audio_input(filename),\n batch, dtypes, shapes)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Image Dataset.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow import dtypes\nfrom tensorflow.compat.v1 import data\nfrom tensorflow_io import _load_library\nimage_ops = _load_library('_image_ops.so')\n\n\nclass WebPDataset(data.Dataset):\n \"\"\"A WebP Image File Dataset that reads the WebP file.\"\"\"\n\n def __init__(self, filenames):\n \"\"\"Create a `WebPDataset`.\n\n filenames: A `tf.string` tensor containing one or more filenames.\n \"\"\"\n self._filenames = tf.convert_to_tensor(\n filenames, dtype=dtypes.string, name=\"filenames\")\n super(WebPDataset, self).__init__()\n\n def _inputs(self):\n return []\n\n def _as_variant_tensor(self):\n return image_ops.web_p_dataset(self._filenames)\n\n @property\n def output_classes(self):\n return tf.Tensor\n\n @property\n def output_shapes(self):\n return tf.TensorShape([None, None, None])\n\n @property\n def output_types(self):\n return dtypes.uint8\n\nclass TIFFDataset(data.Dataset):\n \"\"\"A TIFF Image File Dataset that reads the TIFF file.\"\"\"\n\n def __init__(self, filenames):\n \"\"\"Create a `TIFFDataset`.\n\n filenames: A `tf.string` tensor containing one or more filenames.\n \"\"\"\n self._filenames = tf.convert_to_tensor(\n filenames, dtype=dtypes.string, name=\"filenames\")\n super(TIFFDataset, self).__init__()\n\n def _inputs(self):\n return []\n\n def _as_variant_tensor(self):\n return image_ops.tiff_dataset(self._filenames)\n\n @property\n def output_classes(self):\n return tf.Tensor\n\n @property\n def output_shapes(self):\n return tf.TensorShape([None, None, None])\n\n @property\n def output_types(self):\n return dtypes.uint8\n\nclass GIFDataset(data.Dataset):\n \"\"\"A GIF Image File Dataset that reads the GIF file.\"\"\"\n\n def __init__(self, filenames):\n \"\"\"Create a `GIFDataset`.\n filenames: A `tf.string` tensor containing one or more filenames.\n \"\"\"\n self._filenames = tf.convert_to_tensor(\n filenames, dtype=dtypes.string, name=\"filenames\")\n super(GIFDataset, self).__init__()\n\n def _inputs(self):\n return []\n\n def _as_variant_tensor(self):\n return image_ops.gif_dataset(self._filenames)\n\n @property\n def output_classes(self):\n return tf.Tensor\n\n @property\n def output_shapes(self):\n return tf.TensorShape([None, None, None])\n\n @property\n def output_types(self):\n return dtypes.uint8\n\ndef decode_webp(contents, name=None):\n \"\"\"\n Decode a WebP-encoded image to a uint8 tensor.\n\n Args:\n contents: A `Tensor` of type `string`. 0-D. The WebP-encoded image.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `uint8` and shape of `[height, width, 4]` (RGBA).\n \"\"\"\n return image_ops.decode_web_p(contents, name=name)\n\ndef draw_bounding_boxes(images, boxes, texts=None, colors=None, name=None):\n \"\"\"\n Draw bounding boxes on a batch of images.\n\n Args:\n images: A Tensor. Must be one of the following types: float32, half.\n 4-D with shape [batch, height, width, depth]. A batch of images.\n boxes: A Tensor of type float32. 3-D with shape\n [batch, num_bounding_boxes, 4] containing bounding boxes.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `uint8` and shape of `[height, width, 4]` (RGBA).\n \"\"\"\n if texts is None:\n texts = []\n if colors is None:\n colors = [[]]\n return image_ops.draw_bounding_boxes_v3(\n images, boxes, colors, texts, name=name)\n" ]
[ [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.version.VERSION.startswith", "tensorflow.compat.v1.enable_eager_execution" ], [ "tensorflow.TensorShape" ], [ "tensorflow.convert_to_tensor", "tensorflow.TensorShape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
usccolumbia/deeperGATGNN
[ "24ee53b23d1559040b7aab971768434753b582ff", "24ee53b23d1559040b7aab971768434753b582ff" ]
[ "matdeeplearn/models/super_mpnn.py", "matdeeplearn/models/schnet.py" ]
[ "import torch\r\nfrom torch import Tensor\r\nimport torch.nn.functional as F\r\nfrom torch.nn import Sequential, Linear, ReLU, BatchNorm1d, GRU\r\nimport torch_geometric\r\nfrom torch_geometric.nn import (\r\n Set2Set,\r\n global_mean_pool,\r\n global_add_pool,\r\n global_max_pool,\r\n NNConv,\r\n DiffGroupNorm\r\n)\r\nfrom torch_scatter import scatter_mean, scatter_add, scatter_max, scatter\r\n\r\n\r\n# SUPER_MPNN\r\nclass SUPER_MPNN(torch.nn.Module):\r\n def __init__(\r\n self,\r\n data,\r\n dim1=64,\r\n dim2=64,\r\n dim3=64,\r\n pre_fc_count=1,\r\n gc_count=3,\r\n post_fc_count=1,\r\n pool=\"global_mean_pool\",\r\n pool_order=\"early\",\r\n batch_norm=\"True\",\r\n batch_track_stats=\"True\",\r\n act=\"relu\",\r\n dropout_rate=0.0,\r\n **kwargs\r\n ):\r\n super(SUPER_MPNN, self).__init__()\r\n\r\n \r\n if batch_track_stats == \"False\":\r\n self.batch_track_stats = False \r\n else:\r\n self.batch_track_stats = True \r\n self.batch_norm = batch_norm\r\n self.pool = pool\r\n self.act = act\r\n self.pool_order = pool_order\r\n self.dropout_rate = dropout_rate\r\n \r\n ##Determine gc dimension dimension\r\n assert gc_count > 0, \"Need at least 1 GC layer\" \r\n if pre_fc_count == 0:\r\n gc_dim = data.num_features\r\n else:\r\n gc_dim = dim1\r\n ##Determine post_fc dimension\r\n if pre_fc_count == 0:\r\n post_fc_dim = data.num_features\r\n else:\r\n post_fc_dim = dim1\r\n ##Determine output dimension length\r\n if data[0].y.ndim == 0:\r\n output_dim = 1\r\n else:\r\n output_dim = len(data[0].y[0])\r\n\r\n ##Set up pre-GNN dense layers (NOTE: in v0.1 this is always set to 1 layer)\r\n if pre_fc_count > 0:\r\n self.pre_lin_list = torch.nn.ModuleList()\r\n for i in range(pre_fc_count):\r\n if i == 0:\r\n lin = torch.nn.Linear(data.num_features, dim1)\r\n self.pre_lin_list.append(lin)\r\n else:\r\n lin = torch.nn.Linear(dim1, dim1)\r\n self.pre_lin_list.append(lin)\r\n elif pre_fc_count == 0:\r\n self.pre_lin_list = torch.nn.ModuleList()\r\n\r\n ##Set up GNN layers\r\n self.conv_list = torch.nn.ModuleList()\r\n self.gru_list = torch.nn.ModuleList()\r\n self.bn_list = torch.nn.ModuleList()\r\n for i in range(gc_count):\r\n nn = Sequential(\r\n Linear(data.num_edge_features, dim3), ReLU(), Linear(dim3, gc_dim * gc_dim)\r\n )\r\n conv = NNConv(\r\n gc_dim, gc_dim, nn, aggr=\"mean\"\r\n ) \r\n self.conv_list.append(conv)\r\n gru = GRU(gc_dim, gc_dim)\r\n self.gru_list.append(gru)\r\n\r\n ##Track running stats set to false can prevent some instabilities; this causes other issues with different val/test performance from loader size?\r\n if self.batch_norm == \"True\":\r\n #bn = BatchNorm1d(gc_dim, track_running_stats=self.batch_track_stats)\r\n bn = DiffGroupNorm(gc_dim, 10, track_running_stats=self.batch_track_stats)\r\n self.bn_list.append(bn)\r\n\r\n ##Set up post-GNN dense layers (NOTE: in v0.1 there was a minimum of 2 dense layers, and fc_count(now post_fc_count) added to this number. In the current version, the minimum is zero)\r\n if post_fc_count > 0:\r\n self.post_lin_list = torch.nn.ModuleList()\r\n for i in range(post_fc_count):\r\n if i == 0:\r\n ##Set2set pooling has doubled dimension\r\n if self.pool_order == \"early\" and self.pool == \"set2set\":\r\n lin = torch.nn.Linear(post_fc_dim * 2, dim2)\r\n else:\r\n lin = torch.nn.Linear(post_fc_dim, dim2)\r\n self.post_lin_list.append(lin)\r\n else:\r\n lin = torch.nn.Linear(dim2, dim2)\r\n self.post_lin_list.append(lin)\r\n self.lin_out = torch.nn.Linear(dim2, output_dim)\r\n\r\n elif post_fc_count == 0:\r\n self.post_lin_list = torch.nn.ModuleList()\r\n if self.pool_order == \"early\" and self.pool == \"set2set\":\r\n self.lin_out = torch.nn.Linear(post_fc_dim*2, output_dim)\r\n else:\r\n self.lin_out = torch.nn.Linear(post_fc_dim, output_dim) \r\n\r\n ##Set up set2set pooling (if used)\r\n if self.pool_order == \"early\" and self.pool == \"set2set\":\r\n self.set2set = Set2Set(post_fc_dim, processing_steps=3)\r\n elif self.pool_order == \"late\" and self.pool == \"set2set\":\r\n self.set2set = Set2Set(output_dim, processing_steps=3, num_layers=1)\r\n # workaround for doubled dimension by set2set; if late pooling not reccomended to use set2set\r\n self.lin_out_2 = torch.nn.Linear(output_dim * 2, output_dim)\r\n\r\n def forward(self, data):\r\n\r\n ##Pre-GNN dense layers\r\n for i in range(0, len(self.pre_lin_list)):\r\n if i == 0:\r\n out = self.pre_lin_list[i](data.x)\r\n out = getattr(F, self.act)(out)\r\n #prev_out = out\r\n else:\r\n out = self.pre_lin_list[i](out)\r\n out = getattr(F, self.act)(out)\r\n #out = torch.add(out, prev_out)\r\n #prev_out = out\r\n prev_out = out\r\n\r\n ##GNN layers\r\n if len(self.pre_lin_list) == 0:\r\n h = data.x.unsqueeze(0) \r\n else:\r\n h = out.unsqueeze(0) \r\n for i in range(0, len(self.conv_list)):\r\n if len(self.pre_lin_list) == 0 and i == 0:\r\n if self.batch_norm == \"True\":\r\n m = self.conv_list[i](data.x, data.edge_index, data.edge_attr)\r\n m = self.bn_list[i](m)\r\n else:\r\n m = self.conv_list[i](data.x, data.edge_index, data.edge_attr)\r\n else:\r\n if self.batch_norm == \"True\":\r\n m = self.conv_list[i](out, data.edge_index, data.edge_attr)\r\n m = self.bn_list[i](m)\r\n else:\r\n m = self.conv_list[i](out, data.edge_index, data.edge_attr) \r\n m = getattr(F, self.act)(m) \r\n m = F.dropout(m, p=self.dropout_rate, training=self.training)\r\n out, h = self.gru_list[i](m.unsqueeze(0), h)\r\n out = out.squeeze(0)\r\n out = torch.add(out, prev_out)\r\n prev_out = out \r\n\r\n ##Post-GNN dense layers\r\n if self.pool_order == \"early\":\r\n if self.pool == \"set2set\":\r\n out = self.set2set(out, data.batch)\r\n else:\r\n out = getattr(torch_geometric.nn, self.pool)(out, data.batch)\r\n for i in range(0, len(self.post_lin_list)):\r\n out = self.post_lin_list[i](out)\r\n out = getattr(F, self.act)(out)\r\n #out = torch.add(out, prev_out)\r\n #prev_out = out\r\n out = self.lin_out(out)\r\n #out = torch.add(out, prev_out)\r\n #prev_out = out\r\n\r\n elif self.pool_order == \"late\":\r\n for i in range(0, len(self.post_lin_list)):\r\n out = self.post_lin_list[i](out)\r\n out = getattr(F, self.act)(out)\r\n #out = torch.add(out, prev_out)\r\n #prev_out = out\r\n out = self.lin_out(out)\r\n #out = torch.add(out, prev_out)\r\n #prev_out = out\r\n\r\n if self.pool == \"set2set\":\r\n out = self.set2set(out, data.batch)\r\n out = self.lin_out_2(out)\r\n #out = torch.add(out, prev_out)\r\n #prev_out = out\r\n else:\r\n out = getattr(torch_geometric.nn, self.pool)(out, data.batch)\r\n \r\n if out.shape[1] == 1:\r\n return out.view(-1)\r\n else:\r\n return out\r\n", "import torch\nfrom torch import Tensor\nimport torch.nn.functional as F\nfrom torch.nn import Sequential, Linear, BatchNorm1d\nimport torch_geometric\nfrom torch_geometric.nn import (\n Set2Set,\n global_mean_pool,\n global_add_pool,\n global_max_pool,\n)\nfrom torch_scatter import scatter_mean, scatter_add, scatter_max, scatter\nfrom torch_geometric.nn.models.schnet import InteractionBlock\n\n# Schnet\nclass SchNet(torch.nn.Module):\n def __init__(\n self,\n data,\n dim1=64,\n dim2=64,\n dim3=64,\n cutoff=8,\n pre_fc_count=1,\n gc_count=3,\n post_fc_count=1,\n pool=\"global_mean_pool\",\n pool_order=\"early\",\n batch_norm=\"True\",\n batch_track_stats=\"True\",\n act=\"relu\",\n dropout_rate=0.0,\n **kwargs\n ):\n super(SchNet, self).__init__()\n \n if batch_track_stats == \"False\":\n self.batch_track_stats = False \n else:\n self.batch_track_stats = True \n self.batch_norm = batch_norm\n self.pool = pool\n self.act = act\n self.pool_order = pool_order\n self.dropout_rate = dropout_rate\n \n ##Determine gc dimension dimension\n assert gc_count > 0, \"Need at least 1 GC layer\" \n if pre_fc_count == 0:\n gc_dim = data.num_features\n else:\n gc_dim = dim1\n ##Determine post_fc dimension\n if pre_fc_count == 0:\n post_fc_dim = data.num_features\n else:\n post_fc_dim = dim1\n ##Determine output dimension length\n if data[0].y.ndim == 0:\n output_dim = 1\n else:\n output_dim = len(data[0].y[0])\n\n ##Set up pre-GNN dense layers (NOTE: in v0.1 this is always set to 1 layer)\n if pre_fc_count > 0:\n self.pre_lin_list = torch.nn.ModuleList()\n for i in range(pre_fc_count):\n if i == 0:\n lin = torch.nn.Linear(data.num_features, dim1)\n self.pre_lin_list.append(lin)\n else:\n lin = torch.nn.Linear(dim1, dim1)\n self.pre_lin_list.append(lin)\n elif pre_fc_count == 0:\n self.pre_lin_list = torch.nn.ModuleList()\n\n ##Set up GNN layers \n self.conv_list = torch.nn.ModuleList()\n self.bn_list = torch.nn.ModuleList()\n for i in range(gc_count):\n conv = InteractionBlock(gc_dim, data.num_edge_features, dim3, cutoff)\n self.conv_list.append(conv)\n ##Track running stats set to false can prevent some instabilities; this causes other issues with different val/test performance from loader size?\n if self.batch_norm == \"True\":\n bn = BatchNorm1d(gc_dim, track_running_stats=self.batch_track_stats)\n self.bn_list.append(bn)\n\n ##Set up post-GNN dense layers (NOTE: in v0.1 there was a minimum of 2 dense layers, and fc_count(now post_fc_count) added to this number. In the current version, the minimum is zero)\n if post_fc_count > 0:\n self.post_lin_list = torch.nn.ModuleList()\n for i in range(post_fc_count):\n if i == 0:\n ##Set2set pooling has doubled dimension\n if self.pool_order == \"early\" and self.pool == \"set2set\":\n lin = torch.nn.Linear(post_fc_dim * 2, dim2)\n else:\n lin = torch.nn.Linear(post_fc_dim, dim2)\n self.post_lin_list.append(lin)\n else:\n lin = torch.nn.Linear(dim2, dim2)\n self.post_lin_list.append(lin)\n self.lin_out = torch.nn.Linear(dim2, output_dim)\n\n elif post_fc_count == 0:\n self.post_lin_list = torch.nn.ModuleList()\n if self.pool_order == \"early\" and self.pool == \"set2set\":\n self.lin_out = torch.nn.Linear(post_fc_dim*2, output_dim)\n else:\n self.lin_out = torch.nn.Linear(post_fc_dim, output_dim) \n\n ##Set up set2set pooling (if used)\n if self.pool_order == \"early\" and self.pool == \"set2set\":\n self.set2set = Set2Set(post_fc_dim, processing_steps=3)\n elif self.pool_order == \"late\" and self.pool == \"set2set\":\n self.set2set = Set2Set(output_dim, processing_steps=3, num_layers=1)\n # workaround for doubled dimension by set2set; if late pooling not reccomended to use set2set\n self.lin_out_2 = torch.nn.Linear(output_dim * 2, output_dim)\n\n def forward(self, data):\n\n ##Pre-GNN dense layers\n for i in range(0, len(self.pre_lin_list)):\n if i == 0:\n out = self.pre_lin_list[i](data.x)\n out = getattr(F, self.act)(out)\n else:\n out = self.pre_lin_list[i](out)\n out = getattr(F, self.act)(out)\n\n ##GNN layers\n for i in range(0, len(self.conv_list)):\n if len(self.pre_lin_list) == 0 and i == 0:\n if self.batch_norm == \"True\":\n out = data.x + self.conv_list[i](data.x, data.edge_index, data.edge_weight, data.edge_attr)\n out = self.bn_list[i](out)\n else:\n out = data.x + self.conv_list[i](data.x, data.edge_index, data.edge_weight, data.edge_attr)\n else:\n if self.batch_norm == \"True\":\n out = out + self.conv_list[i](out, data.edge_index, data.edge_weight, data.edge_attr)\n out = self.bn_list[i](out)\n else:\n out = out + self.conv_list[i](out, data.edge_index, data.edge_weight, data.edge_attr) \n #out = getattr(F, self.act)(out)\n out = F.dropout(out, p=self.dropout_rate, training=self.training)\n\n ##Post-GNN dense layers\n if self.pool_order == \"early\":\n if self.pool == \"set2set\":\n out = self.set2set(out, data.batch)\n else:\n out = getattr(torch_geometric.nn, self.pool)(out, data.batch)\n for i in range(0, len(self.post_lin_list)):\n out = self.post_lin_list[i](out)\n out = getattr(F, self.act)(out)\n out = self.lin_out(out)\n\n elif self.pool_order == \"late\":\n for i in range(0, len(self.post_lin_list)):\n out = self.post_lin_list[i](out)\n out = getattr(F, self.act)(out)\n out = self.lin_out(out)\n if self.pool == \"set2set\":\n out = self.set2set(out, data.batch)\n out = self.lin_out_2(out)\n else:\n out = getattr(torch_geometric.nn, self.pool)(out, data.batch)\n \n if out.shape[1] == 1:\n return out.view(-1)\n else:\n return out\n" ]
[ [ "torch.add", "torch.nn.functional.dropout", "torch.nn.ModuleList", "torch.nn.GRU", "torch.nn.Linear", "torch.nn.ReLU" ], [ "torch.nn.Linear", "torch.nn.ModuleList", "torch.nn.BatchNorm1d", "torch.nn.functional.dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
foocker/Image2Katex
[ "775efd6a68168fe46572e5017a861321c342e623" ]
[ "models/decoder.py" ]
[ "'''\nFilename: decoder.py\nProject: models\nFile Created: Wednesday, 11th July 2018 3:37:09 pm\nAuthor: xiaofeng ([email protected])\n--------------------------\nLast Modified: Sunday, 2nd December 2018 4:09:59 pm\nModified By: xiaofeng ([email protected])\n---------------------------\nCopyright: 2018.06 - 2018 OnionMath. OnionMath\n'''\n\nfrom __future__ import division\n\nimport tensorflow as tf\n\nfrom models.component.attention_cell_compile import AttCell\nfrom models.component.decoder_beamsearch import BeamSearchDecoderCell\n# from .component.attention_cell_step import AttCell\nfrom models.component.decoder_dynamic import dynamic_decode\nfrom models.component.decoder_greedy import GreedyDecoderCell\nfrom tensorflow.contrib.rnn import GRUCell, LSTMCell\nfrom models.component.LnRnn import LNGRUCell, LNLSTMCell\nfrom models.component.word_embeding import Embedding, embedding_initializer\n\n\nclass DecoderAtt(object):\n def __init__(self, config, vocab):\n self._config = config\n self._vocab = vocab\n self._name = self._config.model.get('decoder_name')\n self._vocabsize = self._vocab.vocab_size\n self._id_end = self._config.dataset.get('id_end')\n self._embeding_dim = self._config.model.get('embeding_dims')\n self._encoder_dim = self._config.model.get('rnn_encoder_dim')\n self._decoder_dim = self._config.model.get('rnn_decoder_dim')\n self._att_dim = self._config.model.get('att_dim')\n assert self._encoder_dim * 2 == self._decoder_dim, \\\n \"Encoder bilstm out dim is the double encoder dim and it must be equal with decoder dim\"\n\n self._tiles = 1 if self._config.model.decoding == 'greedy' else self._config.model.beam_size\n\n self._vocab_embeding = tf.get_variable(\n \"vocab_embeding\", dtype=tf.float32, shape=[self._vocabsize, self._embeding_dim],\n initializer=embedding_initializer())\n self._start_token = tf.squeeze(input=self._vocab_embeding[0, :], name='start_flage')\n\n def __call__(self, encoder_out, droupout, input_sequence=None):\n\n self._batch_size = tf.shape(encoder_out)[0]\n\n with tf.variable_scope(self._name, reuse=False):\n sequence_embeding = Embedding('embeding', self._vocab_embeding, input_sequence)\n # attention cell come from Rnn\n \"\"\" Uniform gru cell \"\"\"\n RnnCell = GRUCell(name='DecoderGru', num_units=self._decoder_dim)\n \"\"\" LN gru cell \"\"\"\n # RnnCell = LNGRUCell(name='DecoderGru', num_units=self._decoder_dim)\n att_cell = AttCell(\n name='AttCell', att_input=encoder_out, cell=RnnCell, n_hid=self._decoder_dim,\n dim_att=self._att_dim, dim_o=self._decoder_dim, dropuout=droupout,\n vacab_size=self._vocabsize)\n # [batch,sequence_length]\n # sequence_length is equal with the input label length\n sequence_length = tf.tile(tf.expand_dims(\n tf.shape(sequence_embeding)[1], 0), [self._batch_size])\n\n pred_train, _ = tf.nn.dynamic_rnn(\n att_cell, sequence_embeding, initial_state=att_cell.initial_state(),\n sequence_length=sequence_length, dtype=tf.float32, swap_memory=True)\n # evaluating , predict\n with tf.variable_scope(self._name, reuse=True):\n \"\"\" uniform gru cell \"\"\"\n RnnCell = GRUCell(name='DecoderGru', num_units=self._decoder_dim)\n \"\"\" LN gru cell \"\"\"\n # RnnCell = LNGRUCell(name='DecoderGru', num_units=self._decoder_dim)\n att_cell = AttCell(\n name='AttCell', att_input=encoder_out, cell=RnnCell, n_hid=self._decoder_dim,\n dim_att=self._att_dim, dim_o=self._decoder_dim, dropuout=droupout,\n vacab_size=self._vocabsize, tiles=self._tiles)\n if self._config.model.decoding == 'beams_search':\n decoder_cell = BeamSearchDecoderCell(\n self._vocab_embeding, att_cell, self._batch_size, self._start_token,\n self._id_end, self._config.model.beam_size,\n self._config.model.div_gamma, self._config.model.div_prob)\n else:\n decoder_cell = GreedyDecoderCell(\n self._vocab_embeding, att_cell, self._batch_size, self._start_token,\n self._id_end)\n pred_validate, _ = dynamic_decode(\n decoder_cell, self._config.model.MaxPredictLength + 1)\n\n return pred_train, pred_validate\n" ]
[ [ "tensorflow.variable_scope", "tensorflow.squeeze", "tensorflow.contrib.rnn.GRUCell", "tensorflow.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.0", "1.2" ] } ]
tareknaous/visual-clustering
[ "73ab04b560c72917ddb5ad69594afee59ebd44d7", "73ab04b560c72917ddb5ad69594afee59ebd44d7" ]
[ "dataset.py", "dataset/utils/functions.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\n\n#blobs\nn_samples = 1500\nblobs = datasets.make_blobs(n_samples=n_samples, centers=4, random_state=3)\n# plt.scatter(blobs[0][:,0],blobs[0][:,1])\n# plt.show()\n\ncluster_0_points = []\ncluster_1_points = []\ncluster_2_points = []\ncluster_3_points = []\n\nfor i in range(0,len(blobs[0])):\n if blobs[1][i] == 0:\n cluster_0_points.append(blobs[0][i])\n if blobs[1][i] == 1:\n cluster_1_points.append(blobs[0][i])\n if blobs[1][i] == 2:\n cluster_2_points.append(blobs[0][i])\n if blobs[1][i] == 3:\n cluster_3_points.append(blobs[0][i])\n\n\nclusters = []\n\nclusters.append(cluster_0_points)\nclusters.append(cluster_1_points)\nclusters.append(cluster_2_points)\nclusters.append(cluster_3_points)\n\n\n\nfrom scipy.spatial import ConvexHull, convex_hull_plot_2d\nimport matplotlib.pyplot as plt\n\n#Cluster 0\nhull_0 = ConvexHull(cluster_0_points)\npoints_0 = np.array(cluster_0_points)\n\nfor simplex in hull_0.simplices:\n plt.plot(points_0[simplex, 0], points_0[simplex, 1], 'k-')\n\n\n\n#Cluster 1\nhull_1 = ConvexHull(cluster_1_points)\npoints_1 = np.array(cluster_1_points)\n\nfor simplex in hull_1.simplices:\n plt.plot(points_1[simplex, 0], points_1[simplex, 1], 'k-')\n\n\n#Cluster 2\nhull_2 = ConvexHull(cluster_2_points)\npoints_2 = np.array(cluster_2_points)\n\nfor simplex in hull_2.simplices:\n plt.plot(points_2[simplex, 0], points_2[simplex, 1], 'k-')\n\n\n\n#Cluster 3\nhull_3 = ConvexHull(cluster_3_points)\npoints_3 = np.array(cluster_3_points)\n\nfor simplex in hull_3.simplices:\n plt.plot(points_3[simplex, 0], points_3[simplex, 1], 'k-')\n\n\nplt.show()", "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nimport alphashape\nfrom scipy.spatial import ConvexHull, convex_hull_plot_2d\nfrom shapely.ops import cascaded_union\nfrom shapely.geometry import Polygon\n\ndef find_intersections(polygons):\n 'CONSIDER CLUSTERS SHOULD BE UNITED BASED ON PERCENTAGE OF INTERSECTION'\n 'Add in dictionary whether the intersection should be united or subtracted'\n #Percentage threshold for uniting polygons\n THRESHOLD = 30\n #create empty dictionary\n intersections = dict()\n\n #create keys in dictionary\n for i in range(0,len(polygons)):\n key = i\n intersections[key] = []\n \n #Add intersections in the dictionary based on percentage criterion\n for i in range(0,len(polygons)):\n for j in range(i+1,len(polygons)):\n intersection_percentage = []\n intersection_percentage.append((polygons[i].intersection(polygons[j]).area)/polygons[i].area*100)\n intersection_percentage.append((polygons[i].intersection(polygons[j]).area)/polygons[j].area*100)\n \n if polygons[i].intersects(polygons[j]) == True:\n if intersection_percentage[0] >=THRESHOLD or intersection_percentage[0] >= THRESHOLD:\n key = i\n value = [j, 'union']\n intersections[key].append(value)\n else:\n key = i\n value = [j, 'subtraction']\n intersections[key].append(value)\n\n return intersections\n\n\n\n\ndef return_unique_polygons(intersections):\n 'updated with union and subtraction criteria'\n remove = [] #used to store index of keys to remove\n\n #check which keys in the dictionary will need to be removed\n for key in intersections:\n for value in intersections[key]:\n if value[0] in intersections:\n remove.append(value[0])\n\n #remove key from dictionary\n for i in range(0,len(remove)):\n #Add exception if code was trying to remove key that was already removed\n try:\n intersections.pop(remove[i])\n except KeyError:\n continue\n\n return intersections\n\n\n\n\ndef plot_new_polygons(unique_dictionary, polygons):\n 'Subtracts polygons with intersection % below threshold, and combine polygons with intersection % above threshold'\n\n mask_polygons = []\n\n #Variable to decide whether to perform subtraction in case we have 3 or more intersecting polygons\n need_subtract = False\n\n for key in unique_dictionary:\n need_subtract = False\n #check if the key is empty (has not values)\n if not unique_dictionary[key]:\n #plot the polygon with no intersections\n x,y = polygons[key].exterior.xy\n # plt.plot(x,y)\n mask_polygons.append(polygons[key])\n\n else:\n #create an array to add the polygons to be merged\n combination_merge = []\n #added the polygon in the key itself\n combination_merge.append(polygons[key])\n #create an array to add the polygons to be subtracted, in case there is any\n combination_substract = []\n\n for value in unique_dictionary[key]:\n if value[1] == 'union':\n combination_merge.append(polygons[value[0]])\n\n elif value[1] == 'subtraction':\n combination_substract.append(polygons[value[0]])\n need_subtract = True\n \n #merge the polygons to be merged\n merged = cascaded_union(combination_merge)\n\n #If no need to subtract, then just plot the merged polygons\n if need_subtract == False:\n x,y = merged.exterior.xy\n # plt.plot(x,y)\n mask_polygons.append(merged)\n\n elif need_subtract == True:\n #subtract the one to be subtracted from the merged ones\n subtracted = []\n for i in range(0,len(combination_substract)):\n subtracted.append(merged.symmetric_difference(combination_substract[i]))\n for j in range(0,len(subtracted[i])):\n x,y = subtracted[i][j].exterior.xy\n # plt.plot(x,y)\n mask_polygons.append(subtracted[i][j])\n \n return mask_polygons\n\n\n\ndef create_mask(polygons):\n for i in range(0,len(polygons)):\n x, y = polygons[i].exterior.xy\n plt.fill(x,y, \"black\")\n plt.axis('off')\n\n\n\ndef create_polygons(type, num_samples, num_clusters, random_state, *cluster_std, keep_points=False):\n if type == 'blobs': # works fine\n data = datasets.make_blobs(n_samples=num_samples, centers=num_clusters, random_state=random_state,\n center_box=(-30, 30))\n\n elif type == 'aniso': # works fine\n X, y = datasets.make_blobs(n_samples=num_samples, centers=num_clusters, random_state=random_state, center_box=(-30, 30))\n transformation = [[0.6, -0.6], [-0.4, 0.8]]\n X_aniso = np.dot(X, transformation)\n data = (X_aniso, y)\n\n elif type == 'noisy_moons': # works fine\n data = datasets.make_moons(n_samples=num_samples, noise=.05)\n if num_clusters != 2:\n raise Exception(\"Can only take 2 clusters for noisy_moons\")\n\n elif type == 'noisy_circles': # works fine\n data = datasets.make_circles(n_samples=num_samples, factor=.01, noise=.2)\n if num_clusters != 2:\n raise Exception(\"Can only take 2 clusters for noisy_circles\")\n\n elif type == 'varied_blobs': # works fine\n cluster_std = 1.5 * np.random.random(num_clusters)\n data = datasets.make_blobs(n_samples=num_samples,\n centers=num_clusters,\n cluster_std=cluster_std,\n random_state=random_state,\n center_box=(-30, 30))\n if keep_points==True:\n plt.figure()\n plt.scatter(data[0][:, 0], data[0][:, 1], s=1, c='black')\n plt.axis('off')\n\n # Create a list of empty arrays for each cluster\n clusters = [[] for _ in range(num_clusters)]\n\n # Check each point to which cluster it belongs and append to the list accordingly\n for i in range(0, len(data[0])):\n cluster_index = data[1][i]\n clusters[cluster_index].append(data[0][i])\n\n # Create emtpy arrays for convex hulls and data points\n hulls = [[] for _ in range(num_clusters)]\n points = [[] for _ in range(num_clusters)]\n hulls_vertices = [[] for _ in range(num_clusters)]\n\n # Use the Concave Hull for the noisy moons shape\n if type == \"noisy_moons\":\n ALPHA = 5\n for i in range(0, len(clusters)):\n hull = alphashape.alphashape(np.array(clusters[i]), ALPHA)\n hull_pts = hull.exterior.coords.xy\n hulls[i] = hull_pts\n\n # Append vertices\n for i in range(0, len(hulls)):\n for j in range(0, len(hulls[0][i])):\n vertex = [hulls[i][0][j], hulls[i][1][j]]\n hulls_vertices[i].append(vertex)\n\n\n # Use the ConvexHull for all other shapes\n else:\n # Append the hulls\n for i in range(0, len(clusters)):\n hulls[i] = ConvexHull(clusters[i])\n\n # Append vertices of the hulls\n for i in range(0, len(hulls)):\n for j in range(0, len(hulls[i].vertices)):\n hulls_vertices[i].append(clusters[i][hulls[i].vertices[j]])\n\n # Create empty array to append the polygons\n polygons = []\n\n # Create polygons from hull vertices\n for i in range(0, len(hulls_vertices)):\n polygon = Polygon(np.array(hulls_vertices[i]))\n polygons.append(polygon)\n\n return polygons" ]
[ [ "sklearn.datasets.make_blobs", "matplotlib.pyplot.plot", "scipy.spatial.ConvexHull", "numpy.array", "matplotlib.pyplot.show" ], [ "numpy.dot", "numpy.random.random", "matplotlib.pyplot.scatter", "scipy.spatial.ConvexHull", "sklearn.datasets.make_moons", "sklearn.datasets.make_circles", "matplotlib.pyplot.fill", "matplotlib.pyplot.axis", "numpy.array", "sklearn.datasets.make_blobs", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wgfi110/athena
[ "e704884ec6a3a947769d892aa267578038e49ecb", "e704884ec6a3a947769d892aa267578038e49ecb", "e704884ec6a3a947769d892aa267578038e49ecb" ]
[ "athena/data/datasets/base.py", "athena/data/datasets/preprocess.py", "athena/transform/feats/pitch_test.py" ]
[ "# coding=utf-8\n# Copyright (C) 2019 ATHENA AUTHORS; Xiangang Li; Shuaijiang Zhao\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\" base dataset \"\"\"\n\nimport math\nimport random\nimport os\nfrom absl import logging\nimport tensorflow as tf\nfrom athena.transform import AudioFeaturizer\nfrom ..feature_normalizer import FeatureNormalizer\nfrom ...utils.hparam import register_and_parse_hparams\nfrom ...utils.data_queue import DataQueue\n\n\ndef data_loader(dataset_builder, batch_size=16, num_threads=1):\n \"\"\"data loader\n \"\"\"\n num_samples = len(dataset_builder)\n if num_samples == 0:\n raise ValueError(\"num samples is empty\")\n\n if num_threads == 1:\n def _gen_data():\n \"\"\"multi thread loader\n \"\"\"\n for i in range(num_samples):\n yield dataset_builder[i]\n else:\n # multi-thread\n logging.info(\"loading data using %d threads\" % num_threads)\n data_queue = DataQueue(\n lambda i: dataset_builder[i],\n capacity=4096,\n num_threads=num_threads,\n max_index=num_samples\n )\n def _gen_data():\n \"\"\"multi thread loader\n \"\"\"\n for _ in range(num_samples):\n yield data_queue.get()\n\n # make dataset using from_generator\n dataset = tf.compat.v2.data.Dataset.from_generator(\n _gen_data,\n output_types=dataset_builder.sample_type,\n output_shapes=dataset_builder.sample_shape,\n )\n\n # Padding the features to its max length dimensions.\n dataset = dataset.padded_batch(\n batch_size=batch_size,\n padded_shapes=dataset_builder.sample_shape,\n drop_remainder=True,\n )\n\n # Prefetch to improve speed of input pipeline.\n dataset = dataset.prefetch(buffer_size=500)\n return dataset\n\n\nclass BaseDatasetBuilder:\n \"\"\"base dataset builder\n \"\"\"\n default_config = {}\n\n def __init__(self, config=None):\n # hparams\n self.hparams = register_and_parse_hparams(\n self.default_config, config, cls=self.__class__)\n logging.info(\"hparams: {}\".format(self.hparams))\n self.entries = []\n\n def reload_config(self, config):\n \"\"\" reload the config \"\"\"\n if config is not None:\n self.hparams.override_from_dict(config)\n\n def preprocess_data(self, file_path):\n \"\"\" loading data \"\"\"\n raise NotImplementedError\n\n def __getitem__(self, index):\n raise NotImplementedError\n\n def __len__(self):\n return len(self.entries)\n\n @property\n def sample_type(self):\n \"\"\"example types\n \"\"\"\n raise NotImplementedError\n\n @property\n def sample_shape(self):\n \"\"\"examples shapes\n \"\"\"\n raise NotImplementedError\n\n @property\n def sample_signature(self):\n \"\"\"examples signature\n \"\"\"\n raise NotImplementedError\n\n def as_dataset(self, batch_size=16, num_threads=1):\n \"\"\"return tf.data.Dataset object\n \"\"\"\n return data_loader(self, batch_size, num_threads)\n\n def shard(self, num_shards, index):\n \"\"\"creates a Dataset that includes only 1/num_shards of this dataset\n \"\"\"\n if index >= num_shards:\n raise ValueError(\"the index should smaller the num_shards\")\n logging.info(\"Creates the sub-dataset which is the %d part of %d\" % (index, num_shards))\n original_entries = self.entries\n self.entries = []\n total_samples = (len(original_entries) // num_shards) * num_shards\n for i in range(total_samples):\n if i % num_shards == index:\n self.entries.append(original_entries[i])\n return self\n\n def batch_wise_shuffle(self, batch_size=64):\n \"\"\"Batch-wise shuffling of the data entries.\n\n Each data entry is in the format of (audio_file, file_size, transcript).\n If epoch_index is 0 and sortagrad is true, we don't perform shuffling and\n return entries in sorted file_size order. Otherwise, do batch_wise shuffling.\n\n Args:\n batch_size (int, optional): an integer for the batch size. Defaults to 64.\n \"\"\"\n if len(self.entries) == 0:\n return self\n logging.info(\"perform batch_wise_shuffle with batch_size %d\" % batch_size)\n max_buckets = int(math.floor(len(self.entries) / batch_size))\n total_buckets = list(range(max_buckets))\n random.shuffle(total_buckets)\n shuffled_entries = []\n for i in total_buckets:\n shuffled_entries.extend(self.entries[i * batch_size : (i + 1) * batch_size])\n shuffled_entries.extend(self.entries[max_buckets * batch_size :])\n self.entries = shuffled_entries\n return self\n\n def compute_cmvn_if_necessary(self, is_necessary=True):\n \"\"\" compute cmvn file\n \"\"\"\n return self\n\n\nclass SpeechBaseDatasetBuilder(BaseDatasetBuilder):\n \"\"\" speech base dataset \"\"\"\n default_config = {\n \"audio_config\": {\"type\": \"Fbank\"},\n \"num_cmvn_workers\": 1,\n \"cmvn_file\": None,\n \"data_csv\": None\n }\n\n def __init__(self, config=None):\n super().__init__(config=config)\n self.speakers = []\n self.audio_featurizer = AudioFeaturizer(self.hparams.audio_config)\n self.feature_normalizer = FeatureNormalizer(self.hparams.cmvn_file)\n\n @property\n def num_class(self):\n \"\"\" return the number of classes \"\"\"\n raise NotImplementedError\n\n def compute_cmvn_if_necessary(self, is_necessary=True):\n \"\"\"vitural interface\n \"\"\"\n if not is_necessary:\n return self\n if os.path.exists(self.hparams.cmvn_file):\n return self\n feature_dim = self.audio_featurizer.dim * self.audio_featurizer.num_channels\n with tf.device(\"/cpu:0\"):\n self.feature_normalizer.compute_cmvn(\n self.entries, self.speakers, self.audio_featurizer, feature_dim,\n self.hparams.num_cmvn_workers\n )\n self.feature_normalizer.save_cmvn([\"speaker\", \"mean\", \"var\"])\n return self\n", "# coding=utf-8\n# Copyright (C) ATHENA AUTHORS; Ruixiong Zhang;\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=missing-function-docstring, invalid-name\n\n\"\"\" preprecessing for speech features \"\"\"\nimport random\nimport tensorflow as tf\nfrom PIL import Image\nfrom ...utils.hparam import register_and_parse_hparams\nimport tensorflow as tf\n\nclass SpecAugment:\n \"\"\"Implementation of specaugument from paper \"SpecAugment: A Simple Data\n Augmentation Method for Automatic Speech Recognition\"\n\n Args:\n preprocess_config: it contains configs below::\n\n time_warping: warped time parameter, should be in (0, time / 2),\n a random horizontal center point in (W, time_steps - W) will be warped\n either to left or right by a distance chosen from range [0, W) randomly.\n time_masking: masked time range, should be in (0, time_steps),\n the final masked time steps will be [t_0, t_0 + t),\n t is random from[0, T), t_0 is random from [0, time_steps - t)\n frequency_masking: masked frequency range, should be in (0, dimension),\n the final masked frequencies will be [f_0, f_0 + f),\n f is random from[0, F), f_0 is random from [0, dimension - f)\n mask_cols: masking operation is executed mask_cols times in each axis\n \"\"\"\n\n default_config = {\n \"time_warping\": 0,\n \"time_masking\": 0,\n \"frequency_masking\": 0,\n \"mask_cols\": 0,\n \"mask_type\": \"mean\"\n }\n\n def __init__(self, preprocess_config):\n hparams = register_and_parse_hparams(self.default_config, preprocess_config, cls=self.__class__)\n self.time_warping = hparams.time_warping\n self.time_masking = hparams.time_masking\n self.frequency_masking = hparams.frequency_masking\n self.mask_cols = hparams.mask_cols\n self.mask_type = hparams.mask_type\n\n def __call__(self, feat):\n \"\"\"spec augment preprocess for audio features\n\n Args:\n feat: audio features, shape should be [time_steps, dimension, channels]\n\n Returns:\n processed features\n \"\"\"\n feat = self.feat_time_warping(feat)\n feat = self.feat_masking(feat, axis=0, mask_num=self.time_masking)\n feat = self.feat_masking(feat, axis=1, mask_num=self.frequency_masking)\n return feat\n\n def feat_time_warping(self, feat):\n \"\"\"time warping for spec agument\n\n Args:\n feat: audio features, shape should be [time_steps, dimension, channels]\n\n Returns:\n time warped features\n \"\"\"\n time_steps = feat.shape[0]\n if self.time_warping >= time_steps - self.time_warping - 1 or self.time_warping == 0:\n return feat\n feat = tf.squeeze(feat).numpy()\n center = random.randrange(self.time_warping + 1, time_steps - self.time_warping)\n distance = random.randrange(-self.time_warping + 1, self.time_warping)\n warped = center + distance\n left = Image.fromarray(feat[:center]).resize(\n (feat.shape[1], warped), Image.BICUBIC)\n right = Image.fromarray(feat[center:]).resize(\n (feat.shape[1], time_steps - warped), Image.BICUBIC)\n feat[:warped] = left\n feat[warped:] = right\n warped_feat = tf.expand_dims(tf.convert_to_tensor(feat), -1)\n return warped_feat\n\n def feat_masking(self, feat, axis=0, mask_num=0):\n \"\"\"masking for spec augment\n\n Args:\n feat: audio features, shape should be [time_steps, dimension, channels]\n axis (int, optional): the axis to be masked. Defaults to 0.\n mask_num (int, optional): masked time or frequency range. Defaults to 0.\n\n Returns:\n masked features\n \"\"\"\n time_steps, freq_dim, channels = feat.shape\n\n dim_size = tf.shape(feat)[axis]\n if mask_num > dim_size or mask_num == 0 or self.mask_cols == 0:\n return feat\n\n total_t = tf.random.uniform([self.mask_cols, 1], 0, mask_num, tf.int32)\n max_t = tf.reduce_max(total_t)\n t_0 = tf.random.uniform([self.mask_cols, 1], 0, dim_size - max_t, tf.int32)\n t_end = t_0 + total_t\n t_0 = tf.tile(t_0, [1, dim_size]) # (mask_cols, dim_size)\n t_end = tf.tile(t_end, [1, dim_size]) # (mask_cols, dim_size)\n base_mask = tf.expand_dims(tf.range(dim_size), axis=0)\n base_mask = tf.tile(base_mask, [self.mask_cols, 1])\n # (time_mask_cols, time_steps)\n mask = tf.math.logical_xor(t_end <= base_mask, base_mask < t_0)\n final_mask = mask[0]\n for mask_bran in mask:\n final_mask = tf.math.logical_and(final_mask, mask_bran)\n if axis == 0:\n final_mask = tf.tile(final_mask[:, tf.newaxis, tf.newaxis], [1, freq_dim, channels])\n elif axis == 1:\n final_mask = tf.tile(final_mask[tf.newaxis, :, tf.newaxis], [time_steps, 1, channels])\n if self.mask_type == \"mean\":\n mask_metrics = tf.tile(tf.reduce_mean(feat, keepdims=True),\n [time_steps, freq_dim, channels])\n else:\n mask_metrics = tf.zeros(shape=[time_steps, freq_dim, channels])\n feat = tf.where(final_mask, feat, mask_metrics)\n return feat\n", "# Copyright (C) ATHENA AUTHORS\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The model tests pitch FE.\"\"\"\n\nimport os\nfrom pathlib import Path\nimport tensorflow as tf\nfrom tensorflow.python.framework.ops import disable_eager_execution\nfrom athena.transform.feats.read_wav import ReadWav\nfrom athena.transform.feats.pitch import Pitch\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n\nclass SpectrumTest(tf.test.TestCase):\n \"\"\"Pitch extraction test.\"\"\"\n def test_spectrum(self):\n \"\"\"Test Pitch using 16kHz && 8kHz wav.\"\"\"\n wav_path_16k = str(\n Path(os.environ[\"MAIN_ROOT\"]).joinpath(\"examples/sm1_cln.wav\")\n )\n wav_path_8k = str(\n Path(os.environ[\"MAIN_ROOT\"]).joinpath(\"examples/english.wav\")\n )\n\n with self.session():\n for wav_file in [wav_path_16k]:\n read_wav = ReadWav.params().instantiate()\n input_data, sample_rate = read_wav(wav_file)\n\n pitch = Pitch.params(\n {\"window_length\": 0.025, \"soft_min_f0\": 10.0}\n ).instantiate()\n pitch_test = pitch(input_data, sample_rate)\n\n if tf.executing_eagerly():\n self.assertEqual(tf.rank(pitch_test).numpy(), 2)\n else:\n self.assertEqual(tf.rank(pitch_test).eval(), 2)\n\n output_true = [\n [-0.1366025, 143.8855],\n [-0.0226383, 143.8855],\n [-0.08464742, 143.8855],\n [-0.08458386, 143.8855],\n [-0.1208689, 143.8855],\n ]\n\n if wav_file == wav_path_16k:\n if tf.executing_eagerly():\n print(\"Transform: \", pitch_test.numpy()[0:5, :])\n print(\"kaldi:\", output_true)\n self.assertAllClose(\n pitch_test.numpy()[0:5, :],\n output_true,\n rtol=1e-05,\n atol=1e-05,\n )\n else:\n print(\"Transform: \", pitch_test.eval())\n print(\"kaldi:\", output_true)\n self.assertAllClose(\n pitch_test.eval()[0:5, :],\n output_true,\n rtol=1e-05,\n atol=1e-05,\n )\n\n\nif __name__ == \"__main__\":\n\n is_eager = True\n if not is_eager:\n disable_eager_execution()\n else:\n if tf.__version__ < \"2.0.0\":\n tf.compat.v1.enable_eager_execution()\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v2.data.Dataset.from_generator", "tensorflow.device" ], [ "tensorflow.convert_to_tensor", "tensorflow.reduce_max", "tensorflow.math.logical_xor", "tensorflow.shape", "tensorflow.range", "tensorflow.zeros", "tensorflow.reduce_mean", "tensorflow.random.uniform", "tensorflow.squeeze", "tensorflow.math.logical_and", "tensorflow.where", "tensorflow.tile" ], [ "tensorflow.executing_eagerly", "tensorflow.compat.v1.enable_eager_execution", "tensorflow.test.main", "tensorflow.rank", "tensorflow.python.framework.ops.disable_eager_execution" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] } ]
wangbingok1118/SSD_Pytorch
[ "8d3f924671cec367c3c420eba2f002cc5b5181bb" ]
[ "demo.py" ]
[ "import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1,0\"\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport argparse\nfrom torch.autograd import Variable\nimport torch.utils.data as data\nfrom data import COCODetection, VOCDetection, detection_collate, BaseTransform, preproc\nfrom layers.modules import MultiBoxLoss, RefineMultiBoxLoss\nfrom layers.functions import Detect\nfrom utils.nms_wrapper import nms, soft_nms\nfrom configs.config import cfg, cfg_from_file, VOC_CLASSES, COCO_CLASSES\nfrom utils.box_utils import draw_rects\nimport numpy as np\nimport time\nimport os\nimport sys\nimport pickle\nimport datetime\nfrom models.model_builder import SSD\nimport yaml\nimport cv2\n\n\ndef arg_parse():\n parser = argparse.ArgumentParser(\n description='Single Shot MultiBox Detection')\n parser.add_argument(\n \"--images\",\n dest='images',\n help=\"Image / Directory containing images to perform detection upon\",\n default=\"images\",\n type=str)\n parser.add_argument(\n '--weights',\n default='weights/ssd_darknet_300.pth',\n type=str,\n help='Trained state_dict file path to open')\n parser.add_argument(\n '--cfg',\n dest='cfg_file',\n help='Config file for training (and optionally testing)')\n parser.add_argument(\n '--save_folder',\n default='eval/',\n type=str,\n help='File path to save results')\n parser.add_argument(\n '--num_workers',\n default=8,\n type=int,\n help='Number of workers used in dataloading')\n parser.add_argument(\n '--retest', default=False, type=bool, help='test cache results')\n args = parser.parse_args()\n return args\n\n\ndef im_detect(img, net, detector, transform, thresh=0.01):\n with torch.no_grad():\n t0 = time.time()\n w, h = img.shape[1], img.shape[0]\n x = transform(img)[0].unsqueeze(0)\n x = x.cuda()\n t1 = time.time()\n output = net(x)\n boxes, scores = detector.forward(output)\n t2 = time.time()\n max_conf, max_id = scores[0].topk(1, 1, True, True)\n pos = max_id > 0\n if len(pos) == 0:\n return np.empty((0, 6))\n boxes = boxes[0][pos.view(-1, 1).expand(len(pos), 4)].view(-1, 4)\n scores = max_conf[pos].view(-1, 1)\n max_id = max_id[pos].view(-1, 1)\n inds = scores > thresh\n if len(inds) == 0:\n return np.empty((0, 6))\n boxes = boxes[inds.view(-1, 1).expand(len(inds), 4)].view(-1, 4)\n scores = scores[inds].view(-1, 1)\n max_id = max_id[inds].view(-1, 1)\n c_dets = torch.cat((boxes, scores, max_id.float()), 1).cpu().numpy()\n img_classes = np.unique(c_dets[:, -1])\n output = None\n flag = False\n for cls in img_classes:\n cls_mask = np.where(c_dets[:, -1] == cls)[0]\n image_pred_class = c_dets[cls_mask, :]\n keep = nms(image_pred_class, cfg.TEST.NMS_OVERLAP, force_cpu=True)\n keep = keep[:50]\n image_pred_class = image_pred_class[keep, :]\n if not flag:\n output = image_pred_class\n flag = True\n else:\n output = np.concatenate((output, image_pred_class), axis=0)\n output[:, 0:2][output[:, 0:2] < 0] = 0\n output[:, 2:4][output[:, 2:4] > 1] = 1\n scale = np.array([w, h, w, h])\n output[:, :4] = output[:, :4] * scale\n t3 = time.time()\n print(\"transform_t:\", round(t1 - t0, 3), \"detect_time:\",\n round(t2 - t1, 3), \"nms_time:\", round(t3 - t2, 3))\n return output\n\n\ndef main():\n global args\n args = arg_parse()\n ssh_run_param(args)\n cfg_from_file(args.cfg_file)\n bgr_means = cfg.TRAIN.BGR_MEAN\n dataset_name = cfg.DATASETS.DATA_TYPE\n batch_size = cfg.TEST.BATCH_SIZE\n num_workers = args.num_workers\n if cfg.DATASETS.DATA_TYPE == 'VOC':\n trainvalDataset = VOCDetection\n classes = VOC_CLASSES\n top_k = 200\n else:\n trainvalDataset = COCODetection\n classes = COCO_CLASSES\n top_k = 300\n valSet = cfg.DATASETS.VAL_TYPE\n num_classes = cfg.MODEL.NUM_CLASSES\n save_folder = args.save_folder\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n cfg.TRAIN.TRAIN_ON = False\n net = SSD(cfg)\n\n checkpoint = torch.load(args.weights)\n state_dict = checkpoint['model']\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n head = k[:7]\n if head == 'module.':\n name = k[7:] # remove `module.`\n else:\n name = k\n new_state_dict[name] = v\n net.load_state_dict(new_state_dict)\n\n detector = Detect(cfg)\n img_wh = cfg.TEST.INPUT_WH\n ValTransform = BaseTransform(img_wh, bgr_means, (2, 0, 1))\n input_folder = args.images\n thresh = cfg.TEST.CONFIDENCE_THRESH\n for item in os.listdir(input_folder)[2:3]:\n img_path = os.path.join(input_folder, item)\n print(img_path)\n img = cv2.imread(img_path)\n dets = im_detect(img, net, detector, ValTransform, thresh)\n draw_img = draw_rects(img, dets, classes)\n out_img_name = \"output_\" + item\n save_path = os.path.join(save_folder, out_img_name)\n cv2.imwrite(save_path, img)\n\n\ndef ssh_run_param(args):\n args.cfg_file = './configs/refine_vgg_voc_512.yaml'\n\n\nif __name__ == '__main__':\n st = time.time()\n main()\n print(\"final time\", time.time() - st)\n" ]
[ [ "torch.set_default_tensor_type", "numpy.unique", "torch.load", "numpy.concatenate", "torch.no_grad", "numpy.array", "numpy.where", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
inaccel/TF2
[ "1a3ce4c63675a30156bfcf3a1b9682154ef13183" ]
[ "TransForm_Kit/Quantization/debug/Pytorch-ResNet50-Log2QuantizeLoad-FPGA_Quantize-Batch-2.py" ]
[ "\n# coding: utf-8\n\n\nimport math\nimport struct\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nimport torch.utils.data as data\nimport torchvision\nimport torchvision.models as tvmodel\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom torch.autograd import Variable\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\nimport os\nimport gc\n#os.environ['CUDA_VISIBLE_DEVICES']='2'\n\nlayer_name_binQ = ['data','conv1-scale','res2a_branch2a-scale','res2a_branch2b-scale','res2c','res2b_branch2a-scale','res2b_branch2b-scale','res2c','res2c_branch2a-scale','res2c_branch2b-scale','res2c','res3a_branch2a-scale','res3a_branch2b-scale','res3d','res3b_branch2a-scale','res3b_branch2b-scale','res3d','res3c_branch2a-scale','res3c_branch2b-scale','res3d','res3d_branch2a-scale','res3d_branch2b-scale','res3d','res4a_branch2a-scale','res4a_branch2b-scale','res4f','res4b_branch2a-scale','res4b_branch2b-scale','res4f','res4c_branch2a-scale','res4c_branch2b-scale','res4f','res4d_branch2a-scale','res4d_branch2b-scale','res4f','res4e_branch2a-scale','res4e_branch2b-scale','res4f','res4f_branch2a-scale','res4f_branch2b-scale','res4f','res5a_branch2a-scale','res5a_branch2b-scale','res5c','res5b_branch2a-scale','res5b_branch2b-scale','res5c','res5c_branch2a-scale','res5c_branch2b-scale','res5c','fc1000']\n\nlayer_name_bin = ['data','conv1-scale','res2a_branch2a-scale','res2a_branch2b-scale','res2a_branch2c-scale','res2a_branch1-scale','res2a','res2b_branch2a-scale','res2b_branch2b-scale','res2b_branch2c-scale','res2b','res2c_branch2a-scale','res2c_branch2b-scale','res2c_branch2c-scale','res2c','res3a_branch2a-scale','res3a_branch2b-scale','res3a_branch2c-scale','res3a_branch1-scale','res3a','res3b_branch2a-scale','res3b_branch2b-scale','res3b_branch2c-scale','res3b','res3c_branch2a-scale','res3c_branch2b-scale','res3c_branch2c-scale','res3c','res3d_branch2a-scale','res3d_branch2b-scale','res3d_branch2c-scale','res3d','res4a_branch2a-scale','res4a_branch2b-scale','res4a_branch2c-scale','res4a_branch1-scale','res4a','res4b_branch2a-scale','res4b_branch2b-scale','res4b_branch2c-scale','res4b','res4c_branch2a-scale','res4c_branch2b-scale','res4c_branch2c-scale','res4c','res4d_branch2a-scale','res4d_branch2b-scale','res4d_branch2c-scale','res4d','res4e_branch2a-scale','res4e_branch2b-scale','res4e_branch2c-scale','res4e','res4f_branch2a-scale','res4f_branch2b-scale','res4f_branch2c-scale','res4f','res5a_branch2a-scale','res5a_branch2b-scale','res5a_branch2c-scale','res5a_branch1-scale','res5a','res5b_branch2a-scale','res5b_branch2b-scale','res5b_branch2c-scale','res5b','res5c_branch2a-scale','res5c_branch2b-scale','res5c_branch2c-scale','res5c','pool5','fc1000']\n\nlayer_count = 0\nfilter_count = 0\nfeature_file_count = 0\ndef FeatureWrite(name,x):\n with open('Feature_int8/resnet50/'+name+'.txt','w') as data:\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n for k in range(x.shape[2]):\n for l in range(x.shape[3]):\n st = x[i][j][k][l].item()\n st = str(st)\n data.write(st)\n data.write('\\n')\n\ndef FeatureWriteFC(name,x):\n with open('Feature_int8/resnet50/'+name,'w') as data:\n for i in range(x.size(0)):\n for j in range(x.size(1)):\n st = x[i][j].item()\n st = str(st)\n data.write(st)\n data.write('\\n') \n\ndef FeatureWriteFC_Bin(name,x):\n with open('Feature_int8/resnet50/'+name,'wb') as data:\n for i in range(x.size(0)):\n for j in range(x.size(1)):\n st = x[i][j].item()\n st = struct.pack('f',st)\n data.write(st)\n\n\ndef GetQ(Q): \n global layer_name_binQ\n q_list = []\n for m in range(51):\n Q_key = layer_name_binQ[m]\n with open('../channel_q/resnet50/' + Q_key,'r') as data:\n line = data.readline()\n linedata = line.strip().split(' ')\n for item in linedata:\n item = np.int8(item)\n q_list.append(item)\n print(len(q_list))\n Q[Q_key] = q_list\n q_list = []\n return(Q) \n\ndef GetRealWeight(Filter,Q):\n global filter_name,layer_name_binQ,INFLAT\n filter_count = 0\n for i in range(50):\n print('**********')\n if i == 3 or i == 12 or i == 24 or i == 42:\n res = 2\n else:\n res = 1 \n Q1_key = layer_name_binQ[i] \n Q2_key = layer_name_binQ[i+1]\n for n in range(res):\n if n == 1:\n Q1_key = layer_name_binQ[i-2]\n filter_key = filter_name[filter_count]\n print(filter_key)\n out_c = Filter[filter_key].shape[0]\n in_c = Filter[filter_key].shape[1]\n for j in range(out_c):\n Filter[filter_key][j] = Filter[filter_key][j] + Q[Q2_key][j] + INFLAT\n for k in range(in_c):\n Filter[filter_key][j][k] = Filter[filter_key][j][k] - Q[Q1_key][k]\n filter_min = np.min(Filter[filter_key])\n filter_max = np.max(Filter[filter_key])\n print(filter_min)\n print(filter_max)\n Filter[filter_key][Filter[filter_key]<0] = 0\n filter_count = filter_count + 1\n print('**********')\n return (Filter)\n\ndef GetBias(bias,Q2_key):\n global layer_name_binQ,INFLAT,Q\n #Q2_key = layer_name_binQ[1] \n out_c = len(Q[Q2_key])\n bias_power = np.zeros((out_c))\n bias_power = np.float32(bias_power)\n bias = np.array(bias)\n for i in range(out_c):\n power_num = Q[Q2_key][i] + INFLAT \n bias_power[i] = bias[i]*pow(2.,power_num)\n return (bias_power)\n\ndef Conv2dInt8(input_data,weight,weight2,stride,padding):\n npd = ((0,0),(0,0),(padding,padding),(padding,padding))\n input_data = np.lib.pad(input_data,npd,'constant',constant_values=0)\n input_size = input_data.shape \n weight_size = weight.shape\n N = input_size[0]\n in_c = input_size[1]\n out_c = weight_size[0]\n\n out_h = int((input_size[2] - weight_size[2])/stride + 1)\n out_w = int((input_size[3] - weight_size[3])/stride + 1)\n conv_result = np.ones((N,out_c,out_h,out_w))\n conv_result = np.int32(conv_result)\n inputdata = np.int8(input_data) \n weight = np.int32(weight)\n weight2 = np.int8(weight2)\n for i in range(N):\n for j in range(out_c):\n for k in np.arange(input_size[2] - weight_size[2] + 1)[::stride]:\n for l in np.arange(input_size[3] - weight_size[3] + 1)[::stride]:\n conv_result[i,j,k//stride,l//stride] = np.sum((input_data[i,:,k:k + weight_size[2],l:l + weight_size[3]]<<weight[j,:])*weight2[j,:])\n return (conv_result)\n\ndef BN(conv_result,bias_power,alpha,beta):\n global layer_count,layer_name_binQ,Q,INFLAT\n Q2_key = layer_name_binQ[layer_count] \n N = conv_result.shape[0]\n out_c = conv_result.shape[1]\n out_h = conv_result.shape[2]\n out_w = conv_result.shape[3]\n bias_power = np.float32(bias_power)\n bn_result = np.zeros((N,out_c,out_h,out_w))\n bn_result = np.float32(bn_result)\n for i in range(N):\n for j in range(out_c):\n bn_result[i][j] = (alpha[j]*(conv_result[i][j] + bias_power[j]) + beta[j]*pow(2.,(Q[Q2_key][j]+INFLAT)))*pow(2.,-INFLAT)\n bn_result = np.round(bn_result)\n return (bn_result)\n\ndef FC(x,weight,weight2,bias):\n global BatchSize\n x = np.int8(x)\n weight = np.int32(weight)\n weight2 = np.int8(weight2)\n print(\"the shape of the output is :\",end=' ')\n print(weight.shape[0])\n out = np.ones((BatchSize,weight.shape[0]))\n out = np.int32(out)\n for i in range(BatchSize):\n for j in range(weight.shape[0]):\n out[i,j] = np.sum((x[i,:]<<weight[j,:])*weight2[j,:])#bias[j]\n out = torch.Tensor(out)\n bias = torch.Tensor(bias)\n out[0,:] = out[0,:] + bias[:]\n out[0,:] = out[0,:]*pow(2.,-INFLAT)\n out = torch.round(out)\n out[out<-128] = -128\n out[out>127] = 127\n return (out)\n\n\"\"\"\nclass PreProcess(object):\n def __call__(self,image):\n w = 224\n h = 224\n image = np.array(image)\n img_matlab = image.copy()\n tmp = img_matlab[:,:,2].copy()\n img_matlab[:,:,2] = img_matlab[:,:,0]\n img_matlab[:,:,0] = tmp\n\n imgFloat = img_matlab.astype(float)\n imgResize = cv2.resize(imgFloat,(w,h))\n imgResize[:,:,0] = imgResize[:,:,0] - 110.177\n imgResize[:,:,1] = imgResize[:,:,1] - 117.644\n imgResize[:,:,2] = imgResize[:,:,2] - 117.378\n imgProc = imgResize\n imgProc = np.swapaxes(imgProc, 0, 2)\n imgProc = np.swapaxes(imgProc, 1, 2)\n imgProc = torch.Tensor(imgProc)\n return imgProc \n\"\"\"\n\nBatchSize= 1\ndata_dir = '/data/yutong/imagenet'\n#transform = transforms.Compose([PreProcess()])\ntransform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])\n])\nval = datasets.ImageFolder(os.path.join(data_dir,'val'),transform)\nValLoader = data.DataLoader(val,batch_size=BatchSize,shuffle=False)\n\n# ResNet50\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n #3x3 convolution with padding\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n #1x1 convolution\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\nclass Bottleneck(nn.Module):\n expansion = 4\n __constants__ = ['downsample']\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(Bottleneck,self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n \n def forward(self,x):\n global layer_name_bin,filter_name,bn_name,layer_count,filter_count,feature_file_count,Filter,Filter2,alpha,beta\n stride = self.stride\n x = np.int8(x)\n identity = x\n filter_key = filter_name[filter_count]\n bn_key = bn_name[filter_count]\n out = Conv2dInt8(x,Filter[filter_key],Filter2[filter_key],1,0)\n layer_count = layer_count + 1\n feature_file_count = feature_file_count + 1\n bias = np.zeros((out.shape[1]))\n print('FilterCount = ',end=' ')\n print(filter_count)\n out = BN(out,bias,alpha[bn_key],beta[bn_key])\n out[out<-128] = -128\n out[out>127] = 127\n filter_count = filter_count + 1\n FeatureWrite(layer_name_bin[feature_file_count],out)\n out = torch.Tensor(out)\n out = self.relu(out)\n \n filter_key = filter_name[filter_count]\n bn_key = bn_name[filter_count]\n out = np.int8(out)\n out = Conv2dInt8(out,Filter[filter_key],Filter2[filter_key],stride,1) #padding=dialation\n layer_count = layer_count + 1\n feature_file_count = feature_file_count + 1\n bias = np.zeros((out.shape[1]))\n out = BN(out,bias,alpha[bn_key],beta[bn_key])\n out[out<-128] = -128\n out[out>127] = 127\n filter_count = filter_count + 1\n FeatureWrite(layer_name_bin[feature_file_count],out)\n out = torch.Tensor(out)\n out = self.relu(out)\n\n filter_key = filter_name[filter_count]\n print(len(bn_name))\n print(filter_count)\n bn_key = bn_name[filter_count]\n out = np.int8(out)\n out = Conv2dInt8(out,Filter[filter_key],Filter2[filter_key],1,0)\n layer_count = layer_count + 1\n feature_file_count = feature_file_count + 1\n bias = np.zeros((out.shape[1]))\n out = BN(out,bias,alpha[bn_key],beta[bn_key])\n filter_count = filter_count + 1\n #out = np.int16(out)\n out[out<-128] = -128\n out[out>127] = 127\n FeatureWrite(layer_name_bin[feature_file_count],out)\n out = np.int16(out)\n if self.downsample is not None:\n filter_key = filter_name[filter_count]\n bn_key = bn_name[filter_count]\n identity = Conv2dInt8(identity,Filter[filter_key],Filter2[filter_key],stride,0)\n feature_file_count = feature_file_count + 1 # add by shenfw\n bias = np.zeros((identity.shape[1]))\n identity = BN(identity,bias,alpha[bn_key],beta[bn_key])\n #identity = np.int16(identity)\n identity[identity<-128] = -128\n identity[identity>127] = 127\n FeatureWrite(layer_name_bin[feature_file_count],identity) # add by shenfw\n identity = np.int16(identity)\n filter_count = filter_count + 1\n \n out += identity\n out[out>127] = 127\n out[out<-128] = -128\n #FeatureWrite(layer_name_bin[layer_count],out)\n out = torch.Tensor(out)\n out = self.relu(out) \n feature_file_count = feature_file_count + 1\n FeatureWrite(layer_name_bin[feature_file_count],out)\n return out\n\nclass ResNet(nn.Module):\n \n def __init__(self,block,layers,num_classes=1000,zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None):\n super(ResNet,self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(3,self.inplanes,kernel_size=7,stride=2,padding=3,bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3,stride=2,padding=1)\n \n self.layer1 = self._make_layer(block,64,layers[0])\n self.layer2 = self._make_layer(block,128,layers[1],stride=2,dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block,256,layers[2],stride=2,dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block,512,layers[3],stride=2,dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n self.fc = nn.Linear(512*block.expansion,num_classes)\n \n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,mode='fan_out',nonlinearity='relu')\n elif isinstance(m,(nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight,1)\n nn.init.constant_(m.bias,0)\n \n if zero_init_residual:\n for m in self.modules():\n if isinstance(m,Bottleneck):\n nn.init.constant_(m.bn3.weight,0)\n elif isinstance(m,BasicBolck):\n nn.init.constant_(m.bn2.weight,0)\n \n def _make_layer(self,block,planes,blocks,stride=1,dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes*block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes,planes*block.expansion,stride),\n norm_layer(planes*block.expansion),\n )\n \n layers = []\n layers.append(block(self.inplanes,planes,stride,downsample))\n self.inplanes = planes*block.expansion\n for _ in range(1,blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n \n return nn.Sequential(*layers)\n \n def forward(self,x):\n global Q, fc_weight, fc_bias, layer_name_bin,filter_name,bn_name,layer_count,filter_count,feature_file_count,Filter,Filter2,bias_power,fc_bias,alpha,beta\n filter_key = filter_name[filter_count]\n bn_key = bn_name[filter_count]\n \n x = Conv2dInt8(x,Filter[filter_key],Filter2[filter_key],2,3)\n feature_file_count = feature_file_count + 1\n layer_count = layer_count + 1\n bias_power = np.zeros((x.shape[1]))\n x = BN(x,bias_power,alpha[bn_key],beta[bn_key])\n filter_count = filter_count + 1\n x[x<-128] = -128\n x[x>127] = 127\n FeatureWrite(layer_name_bin[feature_file_count],x)\n x = torch.Tensor(x)\n x = self.relu(x)\n #npd = ((0,0),(0,0),(0,1),(0,1))\n #x = np.lib.pad(x,npd,'constant',constant_values=0)\n #pd = (0,1,0,1)\n #x = F.pad(x,pd,'constant',0)\n x = self.maxpool(x)\n FeatureWrite('pool1',x);\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n \n \n x = self.avgpool(x)\n x = torch.round(x)\n FeatureWriteFC('pool5.txt',x)\n x = x.view(x.size(0),-1)\n Q2_key = layer_name_binQ[50]\n bias = GetBias(fc_bias,Q2_key)\n print(filter_count)\n print(len(filter_name))\n filter_key = filter_name[filter_count]\n out = FC(x,Filter[filter_key],Filter2[filter_key],bias)\n \n FeatureWriteFC('fc1000.txt',out)\n filter_count = 0\n layer_count = 0\n feature_file_count = 0\n print('end')\n Q_fc = Q[layer_name_binQ[50]]\n for i in range(1000):\n out[0,i] = out[0,i]*pow(2.,-Q_fc[i]) \n FeatureWriteFC('fc-dequantization.txt',out)\n return out\n \n def resnet50(pretrained=False,**kwargs):\n model = ResNet(Bottleneck,[3,4,6,3],**kwargs)\n return model\n\ncnn = ResNet.resnet50()\ncnn.eval()\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel2 = tvmodel.resnet50()#.to(device)\nmodel2 = torch.nn.DataParallel(model2)#.cuda()\nmodel2.eval()\ncheckpoint = torch.load('../weights/resnet50.pth.tar',map_location='cpu')\nmodel2.load_state_dict(checkpoint['state_dict'])\n\nfilter_name = []\nbn_name = []\nINFLAT = np.int8(15) #15\nQ = OrderedDict()\nFilter = OrderedDict()\nFilter2 = OrderedDict()\nalpha = OrderedDict()\nbeta = OrderedDict()\nbias = []\nfc_weight = torch.ones(1000,2048)\nfc_bias = []\nPars = model2.state_dict()\ncount = 0\nfor key in Pars:\n if count%6 == 0:\n Pars2 = torch.abs(Pars[key])\n Pars2[Pars2>0] = torch.round(torch.log2(Pars2[Pars2>0]))\n Filter[key] = Pars2\n Filter[key] = Filter[key].cpu().numpy()\n Filter[key] = np.int8(Filter[key])\n Filter2[key] = Pars[key]\n Filter2[key][abs(Filter2[key])<0.00001] = 0\n Filter2[key][Filter2[key]<0] = -1\n Filter2[key][Filter2[key]>0] = 1\n Filter2[key] = Filter2[key].cpu().numpy()\n Filter2[key] = np.int8(Filter2[key])\n filter_name.append(key)\n if count == 318:\n fc_weight = Pars[key].cpu().numpy()\n #if count == 1: // pytorch has no bias for conv\n # bias_num = Pars[key].size(0)\n # for i in range(bias_num):\n # bias.append(Pars[key][i]) \n if count == 319:\n bias_num = Pars[key].size(0)\n for i in range(bias_num):\n fc_bias.append(Pars[key][i])\n if (count-1)%6 == 0 and count != 319:\n alpha0 = Pars[key]\n if (count-2)%6 == 0:\n beta0 = Pars[key]\n if (count-3)%6 == 0:\n mean = Pars[key]\n if (count-4)%6 == 0:\n bn_name.append(key)\n var = Pars[key]\n alpha[key] = alpha0/(torch.sqrt(var+0.00001))\n beta[key] = -alpha[key]*mean + beta0 \n alpha[key] = alpha[key].cpu().numpy()\n beta[key] = beta[key].cpu().numpy() \n \n count = count + 1\n\nQ = GetQ(Q)\nFilter = GetRealWeight(Filter,Q)\n\nQ2_key = layer_name_binQ[1]\n#bias_power = GetBias(bias,Q2_key)\n#bias_power = np.array(bias_power)\n\ndef accuracy(output,target,topk=(1,5)):\n maxk = max(topk)\n batch_size = target.size(0)\n _,pred = output.topk(maxk,1,True,True)\n pred = pred.t()\n print(pred)\n correct = pred.eq(target.view(1,-1).expand_as(pred))\n print(correct)\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0,keepdim=True)\n res.append(correct_k.mul_(100.0/batch_size))\n return res\n\nInputList = []\nInputX = np.zeros((1,3,224,224))\nprint(InputX.shape)\nwith open('Feature_float/resnet50/data.bin','rb') as fig:\n for i in range(150528):\n data = fig.read(4)\n data_float = struct.unpack(\"f\",data)[0]\n InputList.append(data_float)\n\nIndex = 0\nfor i in range(1):\n for j in range(3):\n for k in range(224):\n for l in range(224):\n InputX[i][j][k][l] = InputList[Index]\n Index = Index + 1\nfor i in range(InputX.shape[1]):\n InputX[0][i] = InputX[0][i]*pow(2,5)\n\nInputX = np.round(InputX)\nInputX[InputX<-128] = -128\nInputX[InputX>127] = 127\nInputX = np.int8(InputX)\nTemp1 = []\nTemp5 = []\n\nfor j,(test_x,test_y) in enumerate(ValLoader):\n test_x = InputX\n print('The test x is:')\n print(test_x)\n output_test = cnn(test_x)\n prec1,prec5 = accuracy(output_test.data,test_y,topk=(1,5))\n prec1 = prec1.cpu().numpy()\n prec5 = prec5.cpu().numpy()\n Temp1.append(prec1[0])\n Temp5.append(prec5[0])\n break\n\nTop1 = np.array(Temp1)\nTop5 = np.array(Temp5)\nprint('Top1 = ',end=' ')\nprint(Top1.mean(),end=' ')\nprint('Top5 = ',end=' ')\nprint(Top5.mean())\n#\"\"\"\n" ]
[ [ "torch.abs", "torch.load", "numpy.round", "numpy.max", "torch.cuda.is_available", "torch.ones", "torch.sqrt", "numpy.arange", "torch.round", "numpy.int8", "torch.log2", "numpy.float32", "numpy.zeros", "numpy.lib.pad", "torch.nn.Sequential", "numpy.min", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.DataParallel", "numpy.array", "numpy.sum", "torch.Tensor", "numpy.int32", "numpy.ones", "numpy.int16", "torch.nn.MaxPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
franklu2014/foocat
[ "5d452996ba139693fcbcda05c7500b24b9d1ad50" ]
[ "foocat/foocat.py" ]
[ "import pandas as pd\n\n\ndef catbind(a, b):\n \"\"\"\n Concatenates two pandas categoricals.\n\n Parameters\n ----------\n a : pandas.core.arrays.categorical.Categorical\n A pandas categorical.\n b : pandas.core.arrays.categorical.Categorical\n A pandas categorical that you wish to concatenate to a.\n\n Returns\n -------\n pandas.core.arrays.categorical.Categorical\n The new concatenated pandas categorical.\n\n Examples\n --------\n >>> from foocat import foocat\n >>> a = pd.Categorical([\"character\", \"hits\", \"your\", \"eyeballs\"])\n >>> b = pd.Categorical([\"but\", \"integer\", \"where it\", \"counts\"])\n >>> foocat.catbind(a, b)\n [character, hits, your, eyeballs, but, integer, where it, counts]\n Categories (8, object): [but, character, counts,\n eyeballs, hits, integer, where it, your]\n \"\"\"\n concatenated = pd.concat([pd.Series(a.astype(\"str\")),\n pd.Series(b.astype(\"str\"))])\n return pd.Categorical(concatenated)\n" ]
[ [ "pandas.Categorical" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Hzfinfdu/BBT_CPM
[ "ee095727c714902fa6f8f5deebf1c30dc1956520" ]
[ "mpu/layers.py" ]
[ "# coding=utf-8\n# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Parts of the code here are adapted from PyTorch\n# repo: https://github.com/pytorch/pytorch\n\n\nimport math\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom torch.nn.parameter import Parameter\n\nfrom apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm\n\nfrom .initialize import get_model_parallel_rank\nfrom .initialize import get_model_parallel_world_size\nfrom .mappings import copy_to_model_parallel_region\nfrom .mappings import gather_from_model_parallel_region\nfrom .mappings import reduce_from_model_parallel_region\nfrom .mappings import scatter_to_model_parallel_region\nfrom .random import get_cuda_rng_tracker\nfrom .utils import divide\nfrom .utils import split_tensor_along_last_dim\nfrom .utils import VocabUtility\n\n\ndef _initialize_affine_weight(weight, output_size, input_size,\n per_partition_size, partition_dim, init_method,\n stride=1, return_master_weight=False):\n \"\"\"Initialize affine weight for model parallel.\n\n Build the master weight on all processes and scatter\n the relevant chunk.\"\"\"\n # If we only use 1 process for model parallelism, bypass scatter.\n world_size = get_model_parallel_world_size()\n if world_size == 1:\n init_method(weight)\n if return_master_weight:\n return weight\n return None\n\n # Initialize master weight\n master_weight = torch.empty(output_size, input_size,\n dtype=weight.dtype,\n requires_grad=False)\n init_method(master_weight)\n\n # Split and copy\n per_partition_per_stride_size = divide(per_partition_size, stride)\n weight_list = torch.split(master_weight, per_partition_per_stride_size,\n dim=partition_dim)\n rank = get_model_parallel_rank()\n my_weight_list = weight_list[rank::world_size]\n\n with torch.no_grad():\n torch.cat(my_weight_list, dim=partition_dim, out=weight)\n if return_master_weight:\n return master_weight\n return None\n\n\nclass VocabParallelEmbedding(torch.nn.Module):\n \"\"\"Embedding parallelized in the vocabulary dimension.\n\n This is mainly adapted from torch.nn.Embedding and all the default\n values are kept.\n Arguments:\n num_embeddings: vocabulary size.\n embedding_dim: size of hidden state.\n init_method: method to initialize weights.\n \"\"\"\n def __init__(self, num_embeddings, embedding_dim,\n init_method=init.xavier_normal_):\n super(VocabParallelEmbedding, self).__init__()\n # Keep the input dimensions.\n self.num_embeddings = num_embeddings\n self.embedding_dim = embedding_dim\n # Set the detauls for compatibility.\n self.padding_idx = None\n self.max_norm = None\n self.norm_type = 2.\n self.scale_grad_by_freq = False\n self.sparse = False\n self._weight = None\n # Divide the weight matrix along the vocaburaly dimension.\n self.vocab_start_index, self.vocab_end_index = \\\n VocabUtility.vocab_range_from_global_vocab_size(\n self.num_embeddings, get_model_parallel_rank(),\n get_model_parallel_world_size())\n self.num_embeddings_per_partition = self.vocab_end_index - \\\n self.vocab_start_index\n\n # Allocate weights.\n self.weight = Parameter(torch.Tensor(self.num_embeddings_per_partition,\n self.embedding_dim))\n self.weight.model_parallel = True\n # And initialize.\n _initialize_affine_weight(\n self.weight, self.num_embeddings, self.embedding_dim,\n self.num_embeddings_per_partition, 0, init_method)\n\n def forward(self, input_):\n # Build the mask.\n input_mask = (input_ < self.vocab_start_index) | \\\n (input_ >= self.vocab_end_index)\n # Mask the input.\n masked_input = input_.clone() - self.vocab_start_index\n masked_input[input_mask] = 0\n # Get the embeddings.\n output_parallel = F.embedding(masked_input, self.weight,\n self.padding_idx, self.max_norm,\n self.norm_type, self.scale_grad_by_freq,\n self.sparse)\n # Mask the output embedding.\n output_parallel[input_mask, :] = 0.0\n # Reduce across all the model parallel GPUs.\n output = reduce_from_model_parallel_region(output_parallel)\n return output\n\n\nclass ParallelEmbedding(torch.nn.Module):\n \"\"\"Embedding parallelized in the embedding dimension.\n\n This is mainly adapted from torch.nn.Embedding and all the default\n values are kept.\n Arguments:\n num_embeddings: vocabulary size.\n embedding_dim: size of hidden state.\n init_method: method to initialize weights.\n \"\"\"\n def __init__(self, num_embeddings, embedding_dim,\n init_method=init.xavier_normal_,\n keep_master_weight_for_test=False):\n super(ParallelEmbedding, self).__init__()\n # Keep the input dimensions.\n self.num_embeddings = num_embeddings\n self.embedding_dim = embedding_dim\n # Set some detauls for compatibility.\n self.padding_idx = None\n self.max_norm = None\n self.norm_type = 2.\n self.scale_grad_by_freq = False\n self.sparse = False\n self._weight = None\n # Divide the weight matrix along the embedding dimension.\n world_size = get_model_parallel_world_size()\n self.embedding_dim_per_partition = divide(self.embedding_dim,\n world_size)\n\n # Allocate weights.\n self.weight = Parameter(torch.Tensor(self.num_embeddings,\n self.embedding_dim_per_partition))\n self.weight.model_parallel = True\n # And initialize. split the weights to different model parallel devices\n _initialize_affine_weight(\n self.weight, self.num_embeddings, self.embedding_dim,\n self.embedding_dim_per_partition, 1, init_method,\n stride=1, return_master_weight=False)\n\n def forward(self, input_):\n input_parallel = copy_to_model_parallel_region(input_)\n output_parallel = F.embedding(input_parallel, self.weight,\n self.padding_idx, self.max_norm,\n self.norm_type, self.scale_grad_by_freq,\n self.sparse)\n output = gather_from_model_parallel_region(output_parallel)\n return output\n\n\nclass ColumnParallelLinear(torch.nn.Module):\n \"\"\"Linear layer with column parallelism.\n\n NOTE: This function will NOT do all-reduce unless gather_output is True\n\n The linear layer is defined as Y = XA + b. A is parallelized along\n its second dimension as A = [A_1, ..., A_p].\n\n Arguments:\n input_size: first dimension of matrix A.\n output_size: second dimension of matrix A.\n bias: If true, add bias\n gather_output: If true, call all-gether on output and make Y avaiable\n to all GPUs, otherwise, every GPU will have its output\n which is Y_i = XA_i\n init_method: method to initialize weights. Note that bias is always set\n to zero.\n stride: For the strided linear layers.\n keep_master_weight_for_test: This was added for testing and should be\n set to False. It returns the master weights\n used for initialization.\n \"\"\"\n def __init__(self, input_size, output_size, bias=True, gather_output=True,\n init_method=init.xavier_normal_, stride=1,\n keep_master_weight_for_test=False):\n super(ColumnParallelLinear, self).__init__()\n\n # Keep input parameters\n self.input_size = input_size\n self.output_size = output_size\n self.gather_output = gather_output\n # Divide the weight matrix along the last dimension.\n world_size = get_model_parallel_world_size()\n self.output_size_per_partition = divide(output_size, world_size)\n\n # Parameters.\n # Note: torch.nn.functional.linear performs XA^T + b and as a result\n # we allocate the transpose.\n self.weight = Parameter(torch.Tensor(self.output_size_per_partition,\n self.input_size))\n self.weight.model_parallel = True\n if bias:\n self.bias = Parameter(torch.Tensor(self.output_size_per_partition))\n self.bias.model_parallel = True\n # Always initialize bias to zero.\n with torch.no_grad():\n self.bias.zero_()\n else:\n self.register_parameter('bias', None)\n\n # Initialize weight.\n self.master_weight = _initialize_affine_weight(\n self.weight, self.output_size, self.input_size,\n self.output_size_per_partition, 0, init_method,\n stride=stride, return_master_weight=keep_master_weight_for_test)\n\n def forward(self, input_):\n # Set up backprop all-reduce.\n input_parallel = copy_to_model_parallel_region(input_)\n # Matrix multiply.\n output_parallel = F.linear(input_parallel, self.weight, self.bias)\n if self.gather_output:\n # All-gather across the partitions.\n output = gather_from_model_parallel_region(output_parallel)\n else:\n output = output_parallel\n return output\n\n\nclass RowParallelLinear(torch.nn.Module):\n \"\"\"Linear layer with row parallelism.\n\n NOTE: This function will do all-reduce\n\n The linear layer is defined as Y = XA + b. A is parallelized along\n its first dimension and X along its second dimension as:\n - -\n | A_1 |\n | . |\n A = | . | X = [X_1, ..., X_p]\n | . |\n | A_p |\n - -\n Arguments:\n input_size: first dimension of matrix A.\n output_size: second dimension of matrix A.\n bias: If true, add bias. Note that bias is not parallelized.\n input_is_parallel: If true, we assume that the input is already\n split across the GPUs and we do not split\n again.\n init_method: method to initialize weights. Note that bias is always set\n to zero.\n stride: For the strided linear layers.\n keep_master_weight_for_test: This was added for testing and should be\n set to False. It returns the master weights\n used for initialization.\n \"\"\"\n def __init__(self, input_size, output_size, bias=True,\n input_is_parallel=False,\n init_method=init.xavier_normal_, stride=1,\n keep_master_weight_for_test=False):\n super(RowParallelLinear, self).__init__()\n\n # Keep input parameters\n self.input_size = input_size\n self.output_size = output_size\n self.input_is_parallel = input_is_parallel\n # Divide the weight matrix along the last dimension.\n world_size = get_model_parallel_world_size()\n self.input_size_per_partition = divide(input_size, world_size)\n\n # Parameters.\n # Note: torch.nn.functional.linear performs XA^T + b and as a result\n # we allocate the transpose.\n self.weight = Parameter(torch.Tensor(self.output_size,\n self.input_size_per_partition))\n self.weight.model_parallel = True\n if bias:\n self.bias = Parameter(torch.Tensor(self.output_size))\n # Always initialize bias to zero.\n with torch.no_grad():\n self.bias.zero_()\n else:\n self.register_parameter('bias', None)\n\n # Initialize weight.\n self.master_weight = _initialize_affine_weight(\n self.weight, self.output_size, self.input_size,\n self.input_size_per_partition, 1, init_method,\n stride=stride, return_master_weight=keep_master_weight_for_test)\n\n def forward(self, input_):\n # Set up backprop all-reduce.\n if self.input_is_parallel:\n input_parallel = input_\n else:\n input_parallel = scatter_to_model_parallel_region(input_)\n # Matrix multiply.\n output_parallel = F.linear(input_parallel, self.weight)\n # All-reduce across all the partitions.\n output_ = reduce_from_model_parallel_region(output_parallel)\n if self.bias is not None:\n output = output_ + self.bias\n else:\n output = output_\n return output\n" ]
[ [ "torch.nn.functional.embedding", "torch.empty", "torch.Tensor", "torch.cat", "torch.no_grad", "torch.split", "torch.nn.functional.linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wjj19950828/tvm
[ "9c63f4fc318652f6fff68342da2d11b26592a3e0", "9c63f4fc318652f6fff68342da2d11b26592a3e0", "9c63f4fc318652f6fff68342da2d11b26592a3e0", "9c63f4fc318652f6fff68342da2d11b26592a3e0" ]
[ "tests/python/frontend/pytorch/test_forward.py", "python/tvm/testing.py", "tests/python/unittest/test_tir_ir_builder.py", "tests/python/contrib/test_onnx.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=import-self, invalid-name, unused-argument\n\"\"\"Unit tests for various models and operators\"\"\"\nimport os\nimport sys\nfrom time import time\n\nimport numpy as np\nimport torch\nimport torchvision\nimport tvm\nimport tvm.testing\nfrom packaging import version as package_version\nfrom scipy.stats import t as tdistr\nfrom torch.nn import Module\nfrom torch.nn import functional as F\nfrom tvm import relay\nfrom tvm.contrib import graph_executor\nfrom tvm.contrib.nvcc import have_fp16\nimport pytest\n\nsys.setrecursionlimit(10000)\n\n\ndef list_ops(expr):\n class OpLister(tvm.relay.ExprVisitor):\n def visit_op(self, expr):\n if expr not in self.node_set:\n self.node_list.append(expr)\n return super().visit_op(expr)\n\n def list_nodes(self, expr):\n self.node_set = {}\n self.node_list = []\n self.visit(expr)\n return self.node_list\n\n return OpLister().list_nodes(expr)\n\n\ndef assert_shapes_match(tru, est):\n if tru.shape != est.shape:\n msg = \"Output shapes {} and {} don't match\"\n raise AssertionError(msg.format(tru.shape, est.shape))\n\n\ndef load_torchvision(model_name):\n \"\"\"Given a model name, returns a Torchvision model in eval mode as well\n as an example input.\"\"\"\n with torch.no_grad():\n if model_name.startswith(\"inception\"):\n height = width = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n else:\n height = width = 224\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_shape = [1, 3, height, width]\n input_data = torch.randn(input_shape).float()\n for channel in range(3):\n input_data[:, channel] -= mean[channel]\n input_data[:, channel] /= std[channel]\n\n if model_name.startswith(\"googlenet\"):\n model = getattr(torchvision.models, model_name)(pretrained=True, aux_logits=True)\n else:\n model = getattr(torchvision.models, model_name)(pretrained=True)\n model = model.float().eval()\n return model, [input_data]\n\n\ndef load_pretrainedmodels(model_name):\n \"\"\"Given a model name, returns a pretrainedmodels.pytorch model in eval\n mode as well as an example input.\"\"\"\n import pretrainedmodels # https://github.com/Cadene/pretrained-models.pytorch\n\n model = getattr(pretrainedmodels, model_name)().float().eval()\n input_shape = [1, *model.input_size]\n input_data = torch.rand(input_shape).float() * 256\n for channel in range(3):\n input_data[:, channel] -= model.mean[channel]\n input_data[:, channel] /= model.std[channel]\n return model, [input_data]\n\n\ndef load_model(model_name):\n \"\"\"Given a model name, returns a model as well as an example input.\"\"\"\n if hasattr(torchvision.models, model_name):\n return load_torchvision(model_name)\n try:\n import pretrainedmodels\n\n if hasattr(pretrainedmodels, model_name):\n return load_pretrainedmodels(model_name)\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Please install pretrainedmodels.pytorch\")\n raise RuntimeError(\"Model not supported\")\n\n\ndef confidence_interval(mean, stdev, count, alpha=0.01):\n \"\"\"Returns the lower and upper bounds of the confidence interval of a random\n variable. Confidence is 1 - alpha (default confidence is 99%).\"\"\"\n stdval = tdistr.ppf(1 - alpha / 2, count - 1)\n lower, upper = mean + np.array([-1, 1]) * stdval * stdev / np.sqrt(count)\n return lower, upper\n\n\ndef measure_latency(model, input_shapes, output_shapes, thresh, dryruns=40):\n \"\"\"Compute the latency of the given model\"\"\"\n latencies = []\n count = 0\n while True:\n if isinstance(model, Module):\n input_data = [torch.rand(shape).float() for shape in input_shapes]\n if torch.cuda.is_available():\n input_data = list(map(lambda x: x.cuda(), input_data))\n model = model.cuda()\n t_start = time()\n with torch.no_grad():\n model(*input_data)\n t_end = time()\n latencies.append(t_end - t_start)\n else:\n input_data = {}\n for i, shape in enumerate(input_shapes):\n name = \"input\" + str(i)\n arr = np.random.random(shape).astype(\"float32\")\n input_data[name] = tvm.nd.array(arr)\n t_start = time()\n model.set_input(**input_data)\n model.run()\n for i, shape in enumerate(output_shapes):\n arr = np.zeros(shape).astype(\"float32\")\n model.get_output(i, tvm.nd.array(arr))\n t_end = time()\n count += 1\n if count < dryruns:\n continue\n latencies.append(t_end - t_start)\n mean = np.mean(latencies)\n stdev = np.std(latencies)\n sample_size = len(latencies)\n if sample_size > dryruns:\n lower, upper = confidence_interval(mean, stdev, sample_size)\n est = (upper + lower) / 2\n err = (upper - lower) / 2\n if err < thresh:\n return est\n\n\ndef verify_model(\n model_name, input_data=[], custom_convert_map={}, rtol=1e-5, atol=1e-5, expected_ops=[]\n):\n \"\"\"Assert that the output of a compiled model matches with that of its\n baseline.\"\"\"\n if isinstance(model_name, str):\n baseline_model, baseline_input = load_model(model_name)\n elif isinstance(input_data, list):\n baseline_model = model_name\n baseline_input = input_data\n elif isinstance(input_data, torch.Tensor) or len(input_data.shape) == 0:\n baseline_model = model_name\n baseline_input = [input_data]\n else:\n assert False, \"Unexpected input format\"\n\n if torch.cuda.is_available():\n if isinstance(baseline_model, torch.nn.Module):\n baseline_model = baseline_model.cuda()\n baseline_input = [inp.cuda() for inp in baseline_input]\n\n with torch.no_grad():\n baseline_outputs = baseline_model(*[input.clone() for input in baseline_input])\n\n if isinstance(baseline_outputs, tuple):\n baseline_outputs = tuple(out.cpu().numpy() for out in baseline_outputs)\n else:\n baseline_outputs = (baseline_outputs.cpu().numpy(),)\n\n trace = torch.jit.trace(baseline_model, [input.clone() for input in baseline_input])\n if isinstance(baseline_model, torch.nn.Module):\n trace = trace.float().eval()\n\n if torch.cuda.is_available():\n trace = trace.cuda()\n else:\n trace = trace.cpu()\n\n input_names = [\"input{}\".format(idx) for idx, inp in enumerate(baseline_input)]\n input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))\n mod, params = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)\n for arg in mod[\"main\"].params[: len(input_names)]:\n assert arg.name_hint in input_names\n compiled_input = dict(zip(input_names, [inp.clone().cpu().numpy() for inp in baseline_input]))\n\n with tvm.transform.PassContext(opt_level=3):\n for target, dev in tvm.testing.enabled_targets():\n relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)\n relay_model = graph_executor.create(relay_graph, relay_lib, dev)\n relay_model.set_input(**relay_params)\n for name, inp in compiled_input.items():\n relay_model.set_input(name, inp)\n relay_model.run()\n\n for i, baseline_output in enumerate(baseline_outputs):\n compiled_output = relay_model.get_output(i).numpy()\n\n assert_shapes_match(baseline_output, compiled_output)\n tvm.testing.assert_allclose(baseline_output, compiled_output, rtol=rtol, atol=atol)\n\n if expected_ops:\n\n def visit(op):\n if isinstance(op, tvm.ir.op.Op):\n if op.name in expected_ops:\n expected_ops.remove(op.name)\n\n tvm.relay.analysis.post_order_visit(mod[\"main\"].body, visit)\n\n if expected_ops:\n msg = \"TVM Relay do not contain expected ops {}\"\n raise AssertionError(msg.format(expected_ops))\n\n del model_name\n del baseline_model\n torch.cuda.empty_cache()\n\n\n# Single operator tests\[email protected]_gpu\ndef test_forward_pixel_shuffle():\n torch.set_grad_enabled(False)\n input_shape = [1, 144, 16, 16]\n\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.PixelShuffle(2).float().eval(), input_data=input_data)\n verify_model(torch.nn.PixelShuffle(3).float().eval(), input_data=input_data)\n verify_model(torch.nn.PixelShuffle(4).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_add():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Add1(Module):\n def forward(self, *args):\n return args[0] + args[0]\n\n class Add2(Module):\n def forward(self, *args):\n return args[0] + 1\n\n class Add3(Module):\n def forward(self, *args):\n ones = torch.ones(input_shape, dtype=torch.float)\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] + ones\n\n class Add4(Module):\n def forward(self, *args):\n ones = torch.ones([], dtype=torch.float)\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] + ones\n\n input_data = torch.rand(input_shape).float()\n verify_model(Add1().float().eval(), input_data=input_data)\n verify_model(Add2().float().eval(), input_data=input_data)\n verify_model(Add3().float().eval(), input_data=input_data)\n verify_model(Add4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_subtract():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Subtract1(Module):\n def forward(self, *args):\n return args[0] - args[0]\n\n class Subtract2(Module):\n def forward(self, *args):\n return args[0] - 1\n\n class Subtract3(Module):\n def forward(self, *args):\n ones = torch.ones(input_shape)\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] - ones\n\n class Subtract4(Module):\n def forward(self, *args):\n ones = torch.ones([])\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] - ones\n\n input_data = torch.rand(input_shape).float()\n verify_model(Subtract1().float().eval(), input_data=input_data)\n verify_model(Subtract2().float().eval(), input_data=input_data)\n verify_model(Subtract3().float().eval(), input_data=input_data)\n verify_model(Subtract4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_multiply():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Multiply1(Module):\n def forward(self, *args):\n return args[0] * args[0]\n\n class Multiply2(Module):\n def forward(self, *args):\n return args[0] * 1.0\n\n class Multiply3(Module):\n def forward(self, *args):\n ones = torch.ones(input_shape)\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] * ones\n\n class Multiply4(Module):\n def forward(self, *args):\n ones = torch.ones([])\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] * ones\n\n input_data = torch.rand(input_shape).float()\n verify_model(Multiply1().float().eval(), input_data=input_data)\n verify_model(Multiply2().float().eval(), input_data=input_data)\n verify_model(Multiply3().float().eval(), input_data=input_data)\n verify_model(Multiply4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_min_max():\n class Max(Module):\n def forward(self, inp):\n return torch.max(inp)\n\n class Min(Module):\n def forward(self, inp):\n return torch.min(inp)\n\n class Max2(Module):\n def forward(self, inp):\n out, _ = torch.max(inp, 1, keepdim=True)\n return out\n\n class Min2(Module):\n def forward(self, inp):\n out, _ = torch.min(inp, 0, keepdim=False)\n return out\n\n class Max3(Module):\n def forward(self, lhs, rhs):\n return torch.max(lhs, rhs)\n\n class Min3(Module):\n def forward(self, lhs, rhs):\n return torch.min(lhs, rhs)\n\n input_data = [torch.rand((10, 10)), torch.rand((10, 10))]\n\n verify_model(Max(), input_data=input_data[0])\n verify_model(Min(), input_data=input_data[0])\n verify_model(Max2(), input_data=input_data[0])\n verify_model(Min2(), input_data=input_data[0])\n verify_model(Max3(), input_data=input_data)\n verify_model(Min3(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_reciprocal():\n torch.set_grad_enabled(False)\n input_shape = [2, 1, 10, 1, 10]\n\n class Reciprocal1(Module):\n def forward(self, *args):\n return args[0].reciprocal()\n\n input_data = torch.rand(input_shape).float()\n verify_model(Reciprocal1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_repeat():\n torch.set_grad_enabled(False)\n input_shape = [1, 3]\n\n class Repeat1(Module):\n def forward(self, *args):\n return args[0].repeat(1, 1)\n\n class Repeat2(Module):\n def forward(self, *args):\n return args[0].repeat(4, 2)\n\n class Repeat3(Module):\n def forward(self, *args):\n return args[0].repeat(4, 2, 1)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Repeat1().float().eval(), input_data=input_data)\n verify_model(Repeat2().float().eval(), input_data=input_data)\n verify_model(Repeat3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_repeat_interleave():\n torch.set_grad_enabled(False)\n input_shape = [2, 2, 3]\n\n class RepeatInterleave1(Module):\n def forward(self, *args):\n return args[0].repeat_interleave(2)\n\n class RepeatInterleave2(Module):\n def forward(self, *args):\n return args[0].repeat_interleave(3, dim=0)\n\n class RepeatInterleave3(Module):\n def forward(self, *args):\n return args[0].repeat_interleave(2, dim=1)\n\n class RepeatInterleave4(Module):\n def forward(self, *args):\n return args[0].repeat_interleave(4, dim=2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(RepeatInterleave1().float().eval(), input_data=input_data)\n verify_model(RepeatInterleave2().float().eval(), input_data=input_data)\n verify_model(RepeatInterleave3().float().eval(), input_data=input_data)\n verify_model(RepeatInterleave4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_unsqueeze():\n torch.set_grad_enabled(False)\n input_shape = [10, 10]\n\n class Unsqueeze1(Module):\n def forward(self, *args):\n return args[0].unsqueeze(2)\n\n class Unsqueeze2(Module):\n def forward(self, *args):\n _ = args[0].unsqueeze_(2)\n # Check whether operations after inplace unsqueeze works as expected\n y = args[0].squeeze(2)\n return torch.add(y, y)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Unsqueeze1().float().eval(), input_data=input_data)\n verify_model(Unsqueeze2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_squeeze():\n torch.set_grad_enabled(False)\n input_shape = [2, 1, 10, 1, 10]\n\n class Squeeze1(Module):\n def forward(self, *args):\n return args[0].squeeze()\n\n class Squeeze2(Module):\n def forward(self, *args):\n return args[0].squeeze(1)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Squeeze1().float().eval(), input_data=input_data)\n verify_model(Squeeze2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_arange():\n torch.set_grad_enabled(False)\n\n class Arange1(Module):\n def forward(self, *args):\n return torch.arange(5)\n\n class Arange2(Module):\n def forward(self, *args):\n return torch.arange(2.5)\n\n class Arange3(Module):\n def forward(self, *args):\n return torch.arange(1, 4)\n\n class Arange4(Module):\n def forward(self, *args):\n return torch.arange(1, 2.5, 0.5)\n\n class Arange5(Module):\n def forward(self, *args):\n return torch.arange(1, 2, 1, dtype=torch.int32)\n\n class Arange6(Module):\n def forward(self, *args):\n return torch.arange(start=1, end=6, step=2)\n\n class Arange7(Module):\n def forward(self, *args):\n return torch.arange(1, 4, dtype=torch.float32)\n\n class Arange8(Module):\n def forward(self, *args):\n return torch.arange(1, 2, 1, dtype=torch.int16)\n\n class Arange9(Module):\n def forward(self, *args):\n end = torch.add(torch.tensor(4), 1)\n return torch.arange(end) + torch.ones((5,), dtype=torch.int64)\n\n class Arange10(Module):\n def forward(self, *args):\n end = torch.add(torch.tensor(4.0), torch.tensor(1.0))\n return torch.arange(end) + torch.ones((5,), dtype=torch.float)\n\n class Arange11(Module):\n def forward(self, *args):\n start = torch.add(torch.tensor(1), 1)\n end = torch.add(torch.tensor(4), 1)\n step = torch.add(torch.tensor(2), 1)\n out = torch.arange(start, end, step)\n return out + torch.ones((3,), dtype=torch.int64)\n\n class Arange12(Module):\n def forward(self, *args):\n start = torch.add(torch.tensor(1), 1)\n end = torch.add(torch.tensor(4), 1)\n step = torch.add(torch.tensor(2.5), torch.tensor(4.1))\n out = torch.arange(start, end, step)\n return out + torch.ones((3,), dtype=torch.float)\n\n verify_model(Arange1().float().eval())\n verify_model(Arange2().float().eval())\n verify_model(Arange3().float().eval())\n verify_model(Arange4().float().eval())\n verify_model(Arange5().float().eval())\n verify_model(Arange6().float().eval())\n verify_model(Arange7().float().eval())\n verify_model(Arange8().float().eval())\n verify_model(Arange9().float().eval())\n verify_model(Arange10().float().eval())\n verify_model(Arange11().float().eval())\n verify_model(Arange12().float().eval())\n\n\[email protected]_gpu\ndef test_forward_mesh_grid():\n torch.set_grad_enabled(False)\n\n class MeshGrid1(Module):\n def forward(self, *args):\n x = torch.tensor([1, 2, 3])\n y = torch.tensor([4, 5, 6])\n grid_x, grid_y = torch.meshgrid([x, y])\n return grid_x, grid_y\n\n class MeshGrid2(Module):\n def forward(self, *args):\n x = torch.tensor([1, 2, 3], dtype=torch.float32)\n y = torch.add(torch.tensor(5, dtype=torch.float32), 1)\n grid_x, grid_y = torch.meshgrid([x, y])\n return grid_x, grid_y\n\n verify_model(MeshGrid1().float().eval())\n verify_model(MeshGrid2().float().eval())\n\n\[email protected]_gpu\ndef test_forward_abs():\n torch.set_grad_enabled(False)\n input_shape = [2, 1, 10, 1, 10]\n\n class Abs1(Module):\n def forward(self, *args):\n return args[0].abs()\n\n input_data = torch.rand(input_shape).float()\n verify_model(Abs1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_concatenate():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Concatenate1(Module):\n def forward(self, *args):\n return torch.cat([args[0][:, 0].unsqueeze(1), args[0][:, 1].unsqueeze(1)], 1)\n\n class Concatenate2(Module):\n def forward(self, *args):\n a = (args[0][:, :, 0] + 2) * 7\n b = (args[0][:, :, 1] + 3) * 11\n c = (args[0][:, :, 2] + 5) * 13\n return torch.cat([t.unsqueeze(2) for t in [a, b, c]], 2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Concatenate1().float().eval(), input_data=input_data)\n verify_model(Concatenate2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_relu():\n torch.set_grad_enabled(False)\n input_shape = [10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.ReLU().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_prelu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.PReLU(num_parameters=3).eval(), input_data=input_data)\n # Test when input channel > 1 and num parameters = 1\n verify_model(torch.nn.PReLU(num_parameters=1).eval(), input_data=input_data)\n # Test when input dims < 2\n verify_model(torch.nn.PReLU(num_parameters=1).eval(), input_data=torch.randn(2))\n\n\[email protected]_gpu\ndef test_forward_leakyrelu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.LeakyReLU().eval(), input_data=input_data)\n verify_model(torch.nn.LeakyReLU(negative_slope=0.05).eval(), input_data=input_data)\n verify_model(torch.nn.LeakyReLU(negative_slope=1.0, inplace=True).eval(), input_data=input_data)\n verify_model(\n torch.nn.LeakyReLU(negative_slope=1.25, inplace=True).eval(), input_data=input_data\n )\n\n\[email protected]_gpu\ndef test_forward_elu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.ELU().eval(), input_data=input_data)\n verify_model(torch.nn.ELU(alpha=0.3).eval(), input_data=input_data)\n verify_model(torch.nn.ELU(alpha=1.0).eval(), input_data=input_data)\n verify_model(torch.nn.ELU(alpha=1.3).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_celu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.CELU().eval(), input_data=input_data)\n verify_model(torch.nn.CELU(alpha=0.3).eval(), input_data=input_data)\n verify_model(torch.nn.CELU(alpha=1.0).eval(), input_data=input_data)\n verify_model(torch.nn.CELU(alpha=1.3).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_gelu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.GELU().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_selu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.SELU().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_softplus():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Softplus().eval(), input_data=input_data)\n verify_model(torch.nn.Softplus(beta=1.5, threshold=20).eval(), input_data=input_data)\n verify_model(torch.nn.Softplus(beta=5, threshold=10).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_softsign():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Softsign().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_log_sigmoid():\n torch.set_grad_enabled(False)\n input_shape = [10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.LogSigmoid().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_adaptiveavgpool():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.AdaptiveAvgPool2d([1, 1]).eval(), input_data=input_data)\n verify_model(torch.nn.AdaptiveAvgPool2d([10, 10]).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_maxpool2d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n\n verify_model(torch.nn.MaxPool2d(kernel_size=[1, 1]).eval(), input_data)\n verify_model(torch.nn.MaxPool2d(kernel_size=[2, 2], dilation=[2, 3]).eval(), input_data)\n verify_model(torch.nn.MaxPool2d(kernel_size=[10, 10]).eval(), input_data)\n verify_model(torch.nn.MaxPool2d(kernel_size=[4, 4], padding=2, stride=2).eval(), input_data)\n\n # A functional variant (default strides = None case)\n class MaxPool2D(Module):\n def forward(self, *args):\n return torch.nn.functional.max_pool2d(args[0], kernel_size=[10, 10])\n\n verify_model(MaxPool2D(), input_data=input_data)\n\n class MaxPool2DWithIndices(Module):\n def __init__(self):\n super(MaxPool2DWithIndices, self).__init__()\n self.pool = torch.nn.MaxPool2d(kernel_size=[1, 1], return_indices=True)\n\n def forward(self, *args):\n output, indices = self.pool(args[0])\n return output\n\n class MaxPool2DWithIntStrides(Module):\n def forward(self, *args):\n # Makes kernel_size and strides a Relay expr to test converting back to int\n x_shape = args[0].shape\n kernel_size = [torch.tensor(x_shape[1]).int(), torch.tensor(x_shape[1]).int()]\n strides = [torch.tensor(x_shape[0]).int(), torch.tensor(x_shape[0]).int()]\n return torch.nn.functional.max_pool2d(args[0], kernel_size=[4, 4], stride=strides)\n\n verify_model(MaxPool2DWithIndices().float().eval(), input_data=input_data)\n verify_model(MaxPool2DWithIntStrides().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_maxpool1d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10]\n input_data = torch.rand(input_shape).float()\n\n verify_model(torch.nn.MaxPool1d(kernel_size=1).eval(), input_data)\n verify_model(torch.nn.MaxPool1d(kernel_size=2, dilation=[1]).eval(), input_data)\n verify_model(torch.nn.MaxPool1d(kernel_size=10).eval(), input_data)\n verify_model(torch.nn.MaxPool1d(kernel_size=4, padding=2, stride=2).eval(), input_data)\n\n # A functional variant (default strides = None case)\n class MaxPool1D(Module):\n def forward(self, *args):\n return torch.nn.functional.max_pool1d(args[0], kernel_size=10)\n\n verify_model(MaxPool1D(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_maxpool3d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10, 10]\n input_data = torch.rand(input_shape).float()\n\n verify_model(torch.nn.MaxPool3d(kernel_size=[1, 1, 1]).eval(), input_data)\n verify_model(torch.nn.MaxPool3d(kernel_size=[2, 2, 2], dilation=[1, 2, 3]).eval(), input_data)\n verify_model(torch.nn.MaxPool3d(kernel_size=[10, 10, 10]).eval(), input_data)\n verify_model(torch.nn.MaxPool3d(kernel_size=[4, 4, 4], padding=2, stride=2).eval(), input_data)\n\n # A functional variant (default strides = None case)\n class MaxPool3D(Module):\n def forward(self, *args):\n return torch.nn.functional.max_pool3d(args[0], kernel_size=[10, 10, 10])\n\n verify_model(MaxPool3D(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_split():\n torch.set_grad_enabled(False)\n input_shape = [4, 10]\n\n class Split(Module):\n def __init__(self, split_size_or_sections, dim):\n super(Split, self).__init__()\n self.split_size_or_sections = split_size_or_sections\n self.dim = dim\n\n def forward(self, *args):\n return torch.split(args[0], self.split_size_or_sections, self.dim)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Split(2, 0).float().eval(), input_data=input_data)\n verify_model(Split(3, 1).float().eval(), input_data=input_data)\n verify_model(Split(4, 1).float().eval(), input_data=input_data)\n verify_model(Split([2, 3, 5], 1).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_avgpool1d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10]\n\n class AvgPool1D2(Module):\n def forward(self, *args):\n return torch.nn.functional.avg_pool1d(args[0], kernel_size=[10])\n\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.AvgPool1d(kernel_size=[10]).eval(), input_data=input_data)\n verify_model(AvgPool1D2().float().eval(), input_data=input_data)\n verify_model(\n torch.nn.AvgPool1d(kernel_size=[5], stride=2, padding=2).eval(), input_data=input_data\n )\n\n\[email protected]_gpu\ndef test_forward_avgpool2d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class AvgPool2D2(Module):\n def forward(self, *args):\n return torch.nn.functional.avg_pool2d(args[0], kernel_size=[10, 10])\n\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.AvgPool2d(kernel_size=[10, 10]).eval(), input_data=input_data)\n verify_model(AvgPool2D2().float().eval(), input_data=input_data)\n verify_model(\n torch.nn.AvgPool2d(kernel_size=5, stride=2, padding=2).eval(), input_data=input_data\n )\n\n\[email protected]_gpu\ndef test_forward_avgpool3d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10, 10]\n\n class AvgPool3D1(Module):\n def forward(self, *args):\n return torch.nn.functional.avg_pool3d(args[0], kernel_size=[10, 10, 10])\n\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.AvgPool3d(kernel_size=[10, 10, 10]).eval(), input_data=input_data)\n verify_model(AvgPool3D1().float().eval(), input_data=input_data)\n verify_model(\n torch.nn.AvgPool3d(kernel_size=5, stride=2, padding=2).eval(), input_data=input_data\n )\n\n\[email protected]_gpu\ndef test_forward_hardtanh():\n torch.set_grad_enabled(False)\n input_shape = [10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Hardtanh().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_conv():\n torch.set_grad_enabled(False)\n conv1d_input_shape = [1, 3, 10]\n conv2d_input_shape = [1, 3, 10, 10]\n\n class Conv2D1(Module):\n def __init__(self):\n super(Conv2D1, self).__init__()\n self.conv = torch.nn.Conv2d(3, 6, 7, bias=True)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv2D2(Module):\n def __init__(self):\n super(Conv2D2, self).__init__()\n self.conv = torch.nn.Conv2d(3, 6, 7, bias=False)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv2D3(Module):\n def __init__(self):\n super(Conv2D3, self).__init__()\n self.conv = torch.nn.Conv2d(3, 6, 7, groups=3, bias=False)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv1D1(Module):\n def __init__(self):\n super(Conv1D1, self).__init__()\n self.conv = torch.nn.Conv1d(3, 6, 7)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv1D2(Module):\n def __init__(self):\n super(Conv1D2, self).__init__()\n self.conv = torch.nn.Conv1d(3, 6, 7, bias=False)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv1D3(Module):\n def __init__(self):\n super(Conv1D3, self).__init__()\n self.conv = torch.nn.Conv1d(3, 6, 7, groups=3, bias=False)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n conv2d_input_data = torch.rand(conv2d_input_shape).float()\n verify_model(Conv2D1().float().eval(), input_data=conv2d_input_data)\n verify_model(Conv2D2().float().eval(), input_data=conv2d_input_data)\n # depth wise conv with channel mult 2\n verify_model(Conv2D3().float().eval(), input_data=conv2d_input_data)\n # group conv\n verify_model(\n torch.nn.Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), groups=2).eval(),\n input_data=torch.randn((1, 8, 16, 16)),\n )\n\n conv1d_input_data = torch.rand(conv1d_input_shape).float()\n verify_model(Conv1D1().float().eval(), input_data=conv1d_input_data)\n verify_model(Conv1D2().float().eval(), input_data=conv1d_input_data)\n verify_model(Conv1D3().float().eval(), input_data=conv1d_input_data)\n\n\[email protected]_gpu\[email protected](\"in_channels\", [3], ids=lambda x: \"in_channels=\" + str(x))\[email protected](\"out_channels\", [5], ids=lambda x: \"out_channels=\" + str(x))\[email protected](\"kernel_size\", [3], ids=lambda x: \"kernel_size=\" + str(x))\[email protected](\"output_padding\", [0, 1, 2], ids=lambda x: \"output_padding=\" + str(x))\[email protected](\"groups\", [1], ids=lambda x: \"groups=\" + str(x))\[email protected](\"bias\", [True, False], ids=lambda x: \"bias=\" + str(x))\ndef test_forward_conv_transpose(\n in_channels, out_channels, kernel_size, output_padding, bias, groups\n):\n # Note we do not test with groups > 1 because that is not supported\n # in tvm for conv transpose operations\n\n # Output padding must be smaller than either stride or dilation so we\n # opt to make the stride 1 + output padding\n stride = output_padding + 1\n\n # Conv 3D Transpose Tests\n conv3d_input_shape = [1, in_channels, 16, 16, 16]\n conv3d_input_data = torch.rand(conv3d_input_shape).float()\n conv3d_transpose = torch.nn.ConvTranspose3d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n output_padding=output_padding,\n groups=groups,\n bias=bias,\n ).eval()\n verify_model(conv3d_transpose, conv3d_input_data)\n\n # Conv 2D Transpose Tests\n conv2d_input_shape = [1, in_channels, 128, 256]\n conv2d_input_data = torch.rand(conv2d_input_shape).float()\n conv2d_transpose = torch.nn.ConvTranspose2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n output_padding=output_padding,\n groups=groups,\n bias=bias,\n ).eval()\n verify_model(conv2d_transpose, conv2d_input_data)\n\n # # Conv 1D Transpose Tests\n conv1d_input_shape = [1, in_channels, 10]\n conv1d_input_data = torch.rand(conv1d_input_shape).float()\n conv1d_transpose = torch.nn.ConvTranspose1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n output_padding=output_padding,\n groups=groups,\n bias=bias,\n ).eval()\n verify_model(conv1d_transpose, conv1d_input_data)\n\n\ndef test_forward_deform_conv():\n torch.set_grad_enabled(False)\n\n def test_run(\n batch_size,\n in_channels,\n out_channels,\n in_height,\n in_width,\n out_height,\n out_width,\n offset_groups,\n kh,\n kw,\n groups,\n ):\n input_shape = [batch_size, in_channels, in_height, in_width]\n offset_shape = [batch_size, 2 * offset_groups * kh * kw, out_height, out_width]\n weight_shape = [out_channels, in_channels // groups, kh, kw]\n input_data = torch.rand(input_shape)\n offset_data = torch.rand(offset_shape)\n weight_data = torch.rand(weight_shape)\n\n class DeformConv2D(Module):\n def forward(self, *args):\n return torchvision.ops.deform_conv2d(args[0], args[1], args[2])\n\n verify_model(\n DeformConv2D().float().eval(),\n input_data=[input_data, offset_data, weight_data],\n rtol=1e-4,\n atol=1e-4,\n )\n\n batch_size = 4\n in_channels, out_channels = 4, 6\n in_height, in_width = 10, 10\n out_height, out_width = 8, 8\n offset_groups = 2\n kh, kw = 3, 3\n groups = 1\n\n test_run(\n batch_size,\n in_channels,\n out_channels,\n in_height,\n in_width,\n out_height,\n out_width,\n offset_groups,\n kh,\n kw,\n groups,\n )\n\n batch_size = 5\n in_channels, out_channels = 4, 6\n in_height, in_width = 10, 10\n out_height, out_width = 8, 8\n offset_groups = 1\n kh, kw = 3, 3\n groups = 1\n\n test_run(\n batch_size,\n in_channels,\n out_channels,\n in_height,\n in_width,\n out_height,\n out_width,\n offset_groups,\n kh,\n kw,\n groups,\n )\n\n\[email protected]_gpu\ndef test_forward_threshold():\n torch.set_grad_enabled(False)\n input_shape = [1, 3]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Threshold(0, 0).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_contiguous():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Contiguous1(Module):\n def forward(self, *args):\n return args[0].contiguous()\n\n input_data = torch.rand(input_shape).float()\n verify_model(Contiguous1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_batchnorm():\n def init_weight(m):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n torch.nn.init.normal_(m.bias)\n\n inp_2d = torch.rand((1, 16, 10, 10))\n inp_3d = torch.rand((1, 16, 10, 10, 10))\n\n for bn, inp in [(torch.nn.BatchNorm2d(16), inp_2d), (torch.nn.BatchNorm3d(16), inp_3d)]:\n init_weight(bn.eval())\n verify_model(bn.eval(), input_data=inp)\n\n\[email protected]_gpu\ndef test_forward_instancenorm():\n inp_2d = torch.rand((1, 16, 10, 10))\n inp_3d = torch.rand((1, 16, 10, 10, 10))\n\n for ins_norm, inp in [\n (torch.nn.InstanceNorm2d(16), inp_2d),\n (torch.nn.InstanceNorm3d(16), inp_3d),\n ]:\n verify_model(ins_norm.eval(), input_data=inp)\n\n\[email protected]_gpu\ndef test_forward_layernorm():\n def init_weight(m):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n torch.nn.init.normal_(m.bias, 0.02)\n\n inp_2d = torch.rand((1, 16, 10, 10))\n inp_3d = torch.rand((1, 16, 10, 10, 10))\n for ln, inp in [(torch.nn.LayerNorm(10), inp_2d), (torch.nn.LayerNorm(10), inp_3d)]:\n init_weight(ln.eval())\n verify_model(ln.eval(), input_data=inp)\n\n\[email protected]_gpu\ndef test_forward_groupnorm():\n input_shape = [10, 6, 5, 5]\n input_data = torch.rand(input_shape).float()\n\n # Separate 6 channels into 3 groups\n verify_model(torch.nn.GroupNorm(3, 6).eval(), input_data=input_data)\n\n # Put all 6 channels into a single group (equivalent with LayerNorm)\n verify_model(torch.nn.GroupNorm(1, 6).eval(), input_data=input_data)\n\n # Separate 6 channels into 6 groups (equivalent with InstanceNorm)\n verify_model(torch.nn.GroupNorm(6, 6).eval(), input_data=input_data)\n\n input_shape = [1, 10, 4, 7]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.GroupNorm(1, 10).eval(), input_data=input_data)\n verify_model(torch.nn.GroupNorm(2, 10).eval(), input_data=input_data)\n verify_model(torch.nn.GroupNorm(5, 10).eval(), input_data=input_data)\n verify_model(torch.nn.GroupNorm(10, 10).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_reshape():\n torch.set_grad_enabled(False)\n input_shape = [2, 1, 10, 1, 10]\n new_shape = [2, 1, 10, 10]\n\n class Reshape1(Module):\n def forward(self, *args):\n return args[0].reshape(new_shape)\n\n class Reshape2(Module):\n def forward(self, *args):\n return args[0].reshape([-1])\n\n class Reshape3(torch.nn.Module):\n def forward(self, x):\n x_shape = x.shape\n return x.reshape((x_shape[0] * x_shape[1], x_shape[2]))\n\n input_data = torch.rand(input_shape).float()\n verify_model(Reshape1(), input_data=input_data)\n verify_model(Reshape2(), input_data=input_data)\n verify_model(Reshape3(), input_data=torch.randn(2, 3, 4))\n\n\[email protected]_gpu\ndef test_flatten():\n class Flatten(Module):\n def forward(self, x):\n return torch.flatten(x)\n\n class BatchFlatten(Module):\n def forward(self, x):\n return torch.flatten(x, start_dim=1)\n\n inp = torch.rand((5, 2, 2))\n verify_model(Flatten(), input_data=inp)\n verify_model(BatchFlatten(), input_data=inp)\n\n\[email protected]_gpu\ndef test_forward_transpose():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Transpose1(Module):\n def forward(self, *args):\n return args[0].transpose(2, 3)\n\n class Transpose2(Module):\n def forward(self, *args):\n return args[0].transpose(-2, -1)\n\n class Transpose3(Module):\n def forward(self, *args):\n return args[0].permute(0, 2, 3, 1)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Transpose1().float().eval(), input_data=input_data)\n verify_model(Transpose2().float().eval(), input_data=input_data)\n verify_model(Transpose3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_size():\n torch.set_grad_enabled(False)\n input_shape = [1, 3]\n\n class Size1(Module):\n def forward(self, *args):\n return float(args[0].size(0)) * args[0]\n\n input_data = torch.rand(input_shape).float()\n verify_model(Size1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_type_as():\n torch.set_grad_enabled(False)\n input_shape = [1, 3]\n\n def _create_module(dtype):\n class TypeAs(Module):\n def forward(self, *args):\n expected_type_tensor = torch.zeros(1, 3, dtype=dtype)\n return args[0].type_as(expected_type_tensor)\n\n return TypeAs()\n\n input_data = torch.randn(input_shape).float()\n verify_model(_create_module(torch.float64), input_data=input_data)\n verify_model(_create_module(torch.float32), input_data=input_data)\n verify_model(_create_module(torch.int64), input_data=input_data)\n verify_model(_create_module(torch.int32), input_data=input_data)\n verify_model(_create_module(torch.int16), input_data=input_data)\n verify_model(_create_module(torch.int8), input_data=input_data)\n\n if torch.cuda.is_available():\n check_fp16 = False\n try:\n # Only check half precision on supported hardwares.\n if have_fp16(tvm.cuda(0).compute_version):\n check_fp16 = True\n except Exception as e:\n # If GPU is not enabled in TVM, skip the fp16 test.\n pass\n\n # Temporary disable fp16 test\n check_fp16 = False\n\n if check_fp16:\n verify_model(_create_module(torch.float16), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_view():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class View1(Module):\n def forward(self, *args):\n return args[0].view((1, 3 * 10 * 10))\n\n class View2(Module):\n def forward(self, *args):\n return args[0].view(args[0].shape[0], -1)\n\n class View3(Module):\n def forward(self, *args):\n d1 = torch.tensor(3) * torch.tensor(10) * torch.tensor(10)\n return args[0].view(args[0].shape[0], d1)\n\n input_data = torch.rand(input_shape).float()\n verify_model(View1().float().eval(), input_data=input_data)\n verify_model(View2().float().eval(), input_data=input_data)\n verify_model(View3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_select():\n torch.set_grad_enabled(False)\n input_shape = [5, 3, 10, 10]\n\n class Select1(Module):\n def forward(self, *args):\n return args[0].select(1, 1)\n\n class IndexedSelect(Module):\n def __init__(self, inp, dim):\n super().__init__()\n self.inp = inp\n self.dim = dim\n if torch.cuda.is_available():\n self.inp = self.inp.cuda()\n\n def forward(self, index):\n return torch.index_select(self.inp, self.dim, index)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Select1().float().eval(), input_data=input_data)\n\n # test negative indexing\n verify_model(lambda x: x[-1], input_data=input_data)\n\n x = torch.randn(3, 4)\n indices = torch.tensor([0, 2])\n verify_model(IndexedSelect(x, 0).eval(), input_data=indices)\n verify_model(IndexedSelect(x, 1).eval(), input_data=indices)\n\n\[email protected]_gpu\ndef test_forward_clone():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Clone1(Module):\n def forward(self, *args):\n return args[0].clone()\n\n input_data = torch.rand(input_shape).float()\n verify_model(Clone1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_gather():\n torch.set_grad_enabled(False)\n\n class Gather1(Module):\n def forward(self, *args):\n return torch.gather(args[0], 0, args[1])\n\n class Gather2(Module):\n def forward(self, *args):\n return torch.gather(args[0], 1, args[1])\n\n class Gather3(Module):\n def forward(self, *args):\n return torch.gather(args[0], 2, args[1])\n\n input_data = torch.rand((4,)).float()\n index = torch.tensor([1])\n verify_model(Gather1().float().eval(), input_data=[input_data, index])\n\n input_data = torch.rand((2, 2)).float()\n index = torch.tensor([[1, 0], [0, 1]])\n verify_model(Gather1().float().eval(), input_data=[input_data, index])\n\n input_data = torch.tensor([[1, 2], [3, 4]])\n index = torch.tensor([[0, 0], [1, 0]])\n verify_model(Gather2().float().eval(), input_data=[input_data, index])\n\n input_data = torch.rand((2, 2)).float()\n index = torch.tensor([[1, 0], [0, 1]])\n verify_model(Gather2().float().eval(), input_data=[input_data, index])\n\n input_data = torch.rand((3, 3, 3)).float()\n index = torch.tensor(\n [\n [[1, 0, 0], [1, 0, 1], [0, 1, 1]],\n [[1, 1, 1], [1, 2, 1], [1, 0, 1]],\n [[1, 2, 1], [1, 2, 1], [1, 2, 1]],\n ]\n )\n verify_model(Gather3().float().eval(), input_data=[input_data, index])\n\n\[email protected]_gpu\ndef test_forward_logsoftmax():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class LogSoftmax1(Module):\n def forward(self, *args):\n return torch.nn.LogSoftmax(dim=1)(args[0][0, 0])\n\n input_data = torch.rand(input_shape).float()\n verify_model(LogSoftmax1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_norm():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Norm1(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"inf\"), dim=None, keepdim=False)\n\n class Norm2(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"-inf\"), dim=None, keepdim=False)\n\n class Norm3(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"-inf\"), dim=None, keepdim=True)\n\n class Norm4(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"inf\"), dim=(1, 2), keepdim=False)\n\n class Norm5(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"inf\"), dim=(1), keepdim=True)\n\n class Norm6(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(0.5), dim=(1), keepdim=True)\n\n class Norm7(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(1), dim=None, keepdim=False)\n\n class Norm8(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(2.0), dim=(1), keepdim=True)\n\n class Norm9(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(-0.5), dim=(1, 2), keepdim=True)\n\n class Norm10(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(-2), dim=(1), keepdim=False)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Norm1().float().eval(), input_data=input_data)\n verify_model(Norm2().float().eval(), input_data=input_data)\n verify_model(Norm3().float().eval(), input_data=input_data)\n verify_model(Norm4().float().eval(), input_data=input_data)\n verify_model(Norm5().float().eval(), input_data=input_data)\n verify_model(Norm6().float().eval(), input_data=input_data)\n verify_model(Norm7().float().eval(), input_data=input_data)\n verify_model(Norm8().float().eval(), input_data=input_data)\n verify_model(Norm9().float().eval(), input_data=input_data)\n verify_model(Norm10().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_frobenius_norm():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class FroNorm1(Module):\n def forward(self, *args):\n return torch.norm(args[0])\n\n class FroNorm2(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=\"fro\", dim=None, keepdim=True)\n\n class FroNorm3(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=\"fro\", dim=(1), keepdim=True)\n\n class FroNorm4(Module):\n def forward(self, *args):\n return torch.norm(args[0], dim=None, keepdim=False)\n\n input_data = torch.rand(input_shape).float()\n verify_model(FroNorm1().float().eval(), input_data=input_data)\n verify_model(FroNorm2().float().eval(), input_data=input_data)\n verify_model(FroNorm3().float().eval(), input_data=input_data)\n verify_model(FroNorm4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_sigmoid():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Sigmoid().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_dense():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Dense1(Module):\n def __init__(self):\n super(Dense1, self).__init__()\n self.linear = torch.nn.Linear(10, 7, bias=True)\n\n def forward(self, *args):\n return self.linear(args[0][0, 0])\n\n class Dense2(Module):\n def __init__(self):\n super(Dense2, self).__init__()\n self.linear = torch.nn.Linear(10, 7, bias=False)\n\n def forward(self, *args):\n return self.linear(args[0][0, 0])\n\n input_data = torch.rand(input_shape).float()\n verify_model(Dense1().float().eval(), input_data=input_data)\n verify_model(Dense2().float().eval(), input_data=input_data)\n\n trace = torch.jit.trace(Dense1(), [input_data])\n mod, params = relay.frontend.from_pytorch(\n trace,\n [(\"input\", input_shape)],\n )\n assert not any([op.name == \"multiply\" for op in list_ops(mod[\"main\"])])\n\n\[email protected]_gpu\ndef test_forward_linear():\n torch.set_grad_enabled(False)\n\n class Linear(Module):\n def forward(self, input, weight, bias):\n return F.linear(input, weight, bias)\n\n class LinearNoBias(Module):\n def forward(self, input, weight):\n return F.linear(input, weight)\n\n input2d = torch.rand([2, 2]).float()\n weight1d = torch.rand([2]).float()\n weight2d = torch.rand([2, 2]).float()\n bias1d = torch.rand([2]).float()\n bias2d = torch.rand([2, 2]).float()\n # 2D input, 2D weight, 1D bias\n verify_model(Linear(), input_data=[input2d, weight2d, bias1d])\n # 2D input, 2D weight, 2D bias\n verify_model(Linear(), input_data=[input2d, weight2d, bias2d])\n # 2D input, 2D weight, no bias\n verify_model(LinearNoBias(), input_data=[input2d, weight2d])\n # 2D input, 1D weight, 1D bias is not supported by torch.linear()\n # 2D input, 1D weight, no bias\n verify_model(LinearNoBias(), input_data=[input2d, weight1d])\n # TODO: Add the following cases when matmul(1D, _) is supported by TVM\n # 1D input, 2D weight, 1D bias\n # 1D input, 2D weight, no bias\n # 1D input, 1D weight, scalar bias\n # 1D input, 1D weight, no bias\n\n\[email protected]_gpu\ndef test_forward_dropout():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Dropout(p=0.5).eval(), input_data=input_data[0, 0])\n verify_model(torch.nn.Dropout2d(p=0.5).eval(), input_data=input_data[0])\n verify_model(torch.nn.Dropout3d(p=0.5).eval(), input_data=input_data)\n verify_model(torch.nn.AlphaDropout(p=0.5).eval(), input_data=input_data[0, 0])\n\n\[email protected]_gpu\ndef test_forward_slice():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Slice1(Module):\n def forward(self, *args):\n return args[0][:, :, :, :3]\n\n class Slice2(Module):\n def forward(self, *args):\n return args[0][0, :, :-3, :]\n\n class Slice3(Module):\n def forward(self, *args):\n x0 = torch.tensor(2) - torch.tensor(1)\n x1 = torch.tensor(3) + torch.tensor(1)\n return args[0][:, x0:, 1:x1, :]\n\n class SliceWithStride(torch.nn.Module):\n def forward(self, x):\n return x[..., 0::2] + x[..., 1::2]\n\n class SliceWithStride2(torch.nn.Module):\n def forward(self, x):\n return x[0::2, 0::2] + x[1::2, 1::2]\n\n class DynamicLengthSlice(torch.nn.Module):\n def forward(self, values, length):\n return values[0:length]\n\n input_data = torch.rand(input_shape).float()\n verify_model(Slice1(), input_data=input_data)\n verify_model(Slice2(), input_data=input_data)\n verify_model(Slice3(), input_data=input_data)\n verify_model(SliceWithStride(), input_data=torch.randn(1, 4))\n verify_model(SliceWithStride2(), input_data=torch.randn(4, 4))\n\n inp = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n slice_len = torch.tensor(2)\n targets = [\"llvm\", \"cuda\"]\n verify_trace_model(DynamicLengthSlice(), [inp, slice_len], targets)\n\n\[email protected]_gpu\ndef test_forward_narrow():\n torch.set_grad_enabled(False)\n input_shape = [3, 3]\n\n class Narrow1(Module):\n def forward(self, *args):\n return torch.narrow(args[0], 0, 0, 2)\n\n class Narrow2(Module):\n def forward(self, *args):\n return torch.narrow(args[0], 1, 1, 2)\n\n class Narrow3(Module):\n def forward(self, *args):\n begin = torch.tensor(2) - torch.tensor(1)\n length = torch.tensor(1) * torch.tensor(2)\n return torch.narrow(args[0], 1, begin, length)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Narrow1(), input_data=input_data)\n verify_model(Narrow2(), input_data=input_data)\n verify_model(Narrow3(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_mean():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Mean1(Module):\n def forward(self, *args):\n return args[0].mean(2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Mean1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_expand():\n torch.set_grad_enabled(False)\n\n class Expand1(Module):\n def forward(self, *args):\n return args[0].expand((3, -1, -1, -1))\n\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(Expand1().float().eval(), input_data=input_data)\n\n class Expand2(Module):\n def forward(self, *args):\n return args[0].expand((3, 3, 3, 1))\n\n input_shape = [3, 1]\n input_data = torch.rand(input_shape).float()\n verify_model(Expand2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_pow():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Pow1(Module):\n def forward(self, *args):\n return args[0] ** 2\n\n input_data = torch.rand(input_shape).float()\n verify_model(Pow1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_chunk():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 14, 14]\n\n class Chunk1(Module):\n def forward(self, *args):\n chunks = args[0].chunk(7, 2)\n return torch.cat(chunks, 2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Chunk1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_upsample():\n class Upsample(Module):\n def __init__(self, size=None, scale=None, mode=\"nearest\", align_corners=None):\n super().__init__()\n self.size = size\n self.scale = scale\n self.mode = mode\n self.align_corners = align_corners\n\n def forward(self, x):\n return torch.nn.functional.interpolate(\n x,\n size=self.size,\n scale_factor=self.scale,\n mode=self.mode,\n align_corners=self.align_corners,\n )\n\n inp = torch.rand((1, 3, 32, 32))\n verify_model(Upsample(size=(64, 64), mode=\"nearest\"), inp)\n verify_model(Upsample(scale=2, mode=\"nearest\"), inp)\n verify_model(Upsample(size=(50, 50), mode=\"nearest\"), inp)\n verify_model(Upsample(size=(64, 64), mode=\"bilinear\", align_corners=True), inp)\n verify_model(Upsample(scale=2, mode=\"bilinear\", align_corners=True), inp)\n verify_model(Upsample(size=(50, 50), mode=\"bilinear\", align_corners=True), inp)\n\n\[email protected]_gpu\ndef test_to():\n \"\"\"test for aten::to(...)\"\"\"\n\n class ToCPU(Module):\n def forward(self, x):\n return x.to(\"cpu\")\n\n class ToFloat(Module):\n def forward(self, x):\n return x.float()\n\n class ToInt(Module):\n def forward(self, x):\n return x.int()\n\n class ToLong(Module):\n def forward(self, x):\n return x.long()\n\n class ToDouble(Module):\n def forward(self, x):\n return x.double()\n\n class ToFloat16(Module):\n def forward(self, x):\n return x.to(torch.float16)\n\n verify_model(ToCPU().eval(), torch.rand((1, 3, 32, 32)))\n verify_model(ToFloat().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))\n verify_model(ToFloat().eval(), torch.tensor(2, dtype=torch.int))\n verify_model(ToInt().eval(), torch.zeros((1, 3, 32, 32)))\n verify_model(ToInt().eval(), torch.tensor(0.8))\n verify_model(ToLong().eval(), torch.tensor(0.8))\n verify_model(ToDouble().eval(), torch.tensor(0.8))\n verify_model(ToFloat16().eval(), torch.tensor(2, dtype=torch.float32))\n verify_model(ToFloat16().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))\n\n\[email protected]_gpu\ndef test_adaptive_pool3d():\n for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:\n inp = torch.rand(ishape)\n verify_model(torch.nn.AdaptiveMaxPool3d((1, 1, 1)).eval(), inp)\n verify_model(torch.nn.AdaptiveMaxPool3d((2, 2, 2)).eval(), inp)\n verify_model(torch.nn.AdaptiveAvgPool3d((1, 1, 1)).eval(), inp)\n verify_model(torch.nn.AdaptiveAvgPool3d((2, 2, 2)).eval(), inp)\n verify_model(torch.nn.AdaptiveAvgPool3d((4, 8, 8)).eval(), inp)\n verify_model(torch.nn.AdaptiveMaxPool3d((7, 8, 9)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_functional_pad():\n torch.set_grad_enabled(False)\n pad = (0, 0)\n\n class Pad1(Module):\n def forward(self, *args):\n return torch.nn.functional.pad(args[0], pad, \"constant\", 0)\n\n input_data = torch.rand((3, 3, 4, 2))\n pad = (1, 1)\n verify_model(Pad1().float().eval(), input_data=input_data)\n\n pad = (1, 1, 2, 2)\n verify_model(Pad1().float().eval(), input_data=input_data)\n\n pad = (0, 1, 2, 1, 3, 3)\n verify_model(Pad1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_zero_pad2d():\n inp = torch.rand((1, 1, 3, 3))\n verify_model(torch.nn.ZeroPad2d(2).eval(), inp)\n verify_model(torch.nn.ZeroPad2d((1, 1, 2, 0)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_constant_pad1d():\n inp = torch.rand((1, 2, 4))\n verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)\n\n inp = torch.rand((1, 2, 3))\n verify_model(torch.nn.ConstantPad2d((3, 1), 3.5).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_constant_pad2d():\n inp = torch.rand((1, 2, 2, 2))\n verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)\n verify_model(torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_constant_pad3d():\n inp = torch.rand((1, 3, 2, 2, 2))\n verify_model(torch.nn.ConstantPad3d(3, 3.5).eval(), inp)\n verify_model(torch.nn.ConstantPad3d((3, 4, 5, 6, 0, 1), 3.5).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_reflection_pad1d():\n inp = torch.rand((1, 2, 4))\n verify_model(torch.nn.ReflectionPad1d(2).eval(), inp)\n verify_model(torch.nn.ReflectionPad1d((3, 1)).eval(), inp)\n\n inp = torch.rand((2, 4, 5))\n verify_model(torch.nn.ReflectionPad1d((2, 3)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_reflection_pad2d():\n inp = torch.rand((1, 1, 3, 3))\n verify_model(torch.nn.ReflectionPad2d(2).eval(), inp)\n verify_model(torch.nn.ReflectionPad2d((1, 1, 2, 0)).eval(), inp)\n\n inp = torch.rand((2, 4, 5, 6))\n verify_model(torch.nn.ReflectionPad2d((1, 3, 2, 4)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_replication_pad1d():\n inp = torch.rand((1, 2, 4))\n verify_model(torch.nn.ReplicationPad1d(2).eval(), inp)\n verify_model(torch.nn.ReplicationPad1d((3, 1)).eval(), inp)\n\n inp = torch.rand((2, 4, 5))\n verify_model(torch.nn.ReplicationPad1d((2, 3)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_replication_pad2d():\n inp = torch.rand((1, 1, 3, 3))\n verify_model(torch.nn.ReplicationPad2d(2).eval(), inp)\n verify_model(torch.nn.ReplicationPad2d((1, 1, 2, 0)).eval(), inp)\n\n inp = torch.rand((2, 4, 5, 6))\n verify_model(torch.nn.ReplicationPad2d((1, 3, 2, 4)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_replication_pad3d():\n inp = torch.rand((1, 1, 3, 3, 3))\n verify_model(torch.nn.ReplicationPad3d(3).eval(), inp)\n verify_model(torch.nn.ReplicationPad3d((1, 1, 2, 2, 1, 1)).eval(), inp)\n\n inp = torch.rand((7, 5, 4, 5, 6))\n verify_model(torch.nn.ReplicationPad3d((2, 3, 2, 5, 1, 4)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_upsample3d():\n inp = torch.arange(1, 9, dtype=torch.float32).view(1, 1, 2, 2, 2)\n verify_model(torch.nn.Upsample(scale_factor=2, mode=\"nearest\").eval(), inp)\n verify_model(torch.nn.Upsample(scale_factor=2, mode=\"trilinear\").eval(), inp)\n verify_model(\n torch.nn.Upsample(scale_factor=2, mode=\"trilinear\", align_corners=True).eval(), inp\n )\n\n\ndef test_forward_nms():\n \"\"\"dynamic Non-Maximum Suppression\"\"\"\n torch.set_grad_enabled(False)\n\n class NonMaxSupression(Module):\n def __init__(self, iou_thres):\n super().__init__()\n self.iou_threshold = iou_thres\n\n def forward(self, *args):\n return torchvision.ops.nms(args[0], args[1], self.iou_threshold)\n\n # Generate random input data\n def _gen_rand_inputs(num_boxes):\n box_len = 4\n boxes = torch.rand(num_boxes, box_len, dtype=torch.float) * 0.5\n boxes[:, 2] += boxes[:, 0]\n boxes[:, 3] += boxes[:, 1]\n scores = torch.from_numpy(np.random.uniform(-1, 1, size=(num_boxes,)).astype(np.float32))\n return boxes, scores\n\n targets = [\"llvm\", \"cuda\"]\n\n for num_boxes, iou_thres in [(10, 0.3), (100, 0.5), (500, 0.9)]:\n in_boxes, in_scores = _gen_rand_inputs(num_boxes)\n verify_trace_model(NonMaxSupression(iou_thres), [in_boxes, in_scores], targets)\n\n\ndef test_forward_roi_align():\n \"\"\"ROI align\"\"\"\n torch.set_grad_enabled(False)\n\n class ROIAlign(Module):\n def __init__(self, output_sizes, spatial_scale=1.0, sampling_ratio=-1):\n super().__init__()\n self.spatial_scale = spatial_scale\n self.sampling_ratio = sampling_ratio\n self.output_sizes = output_sizes\n\n def forward(self, *args):\n return torchvision.ops.roi_align(\n args[0],\n args[1],\n self.output_sizes,\n self.spatial_scale,\n self.sampling_ratio,\n )\n\n in_data = torch.Tensor(np.random.uniform(size=(1, 8, 100, 100)))\n in_boxes = torch.Tensor(np.random.uniform(0.0, 100.0, size=(35, 4)))\n in_batch = torch.zeros((35, 1), dtype=torch.float)\n in_boxes = torch.cat([in_batch, in_boxes], dim=1)\n\n verify_model(ROIAlign(7), [in_data, in_boxes])\n verify_model(ROIAlign((10, 10), 0.7, 5), [in_data, in_boxes])\n verify_model(ROIAlign(15, 0.9, 3), [in_data, in_boxes])\n\n\[email protected]_gpu\ndef test_conv3d():\n for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:\n inp = torch.rand(ishape)\n verify_model(torch.nn.Conv3d(32, 16, (3, 3, 3), padding=(1, 1, 1)).eval(), inp),\n verify_model(torch.nn.Conv3d(32, 16, (5, 5, 5), padding=(2, 2, 2)).eval(), inp),\n verify_model(torch.nn.Conv3d(32, 16, kernel_size=1).eval(), inp)\n # downsample\n verify_model(torch.nn.Conv3d(32, 16, kernel_size=1, stride=2).eval(), inp)\n\n\[email protected]_gpu\ndef test_conv3d_transpose():\n for ishape in [(1, 8, 10, 5, 10), (1, 8, 5, 8, 8), (1, 8, 13, 7, 7)]:\n inp = torch.rand(ishape)\n verify_model(\n torch.nn.ConvTranspose3d(\n in_channels=8, out_channels=33, kernel_size=3, stride=2\n ).eval(),\n inp,\n ),\n verify_model(\n torch.nn.ConvTranspose3d(\n in_channels=8,\n out_channels=20,\n kernel_size=(3, 5, 2),\n stride=(2, 1, 1),\n padding=(0, 4, 2),\n ).eval(),\n inp,\n ),\n verify_model(\n torch.nn.ConvTranspose3d(in_channels=8, out_channels=20, kernel_size=1).eval(), inp\n )\n verify_model(\n torch.nn.ConvTranspose3d(in_channels=8, out_channels=5, kernel_size=1, stride=2).eval(),\n inp,\n )\n\n\n# Model tests\[email protected]_gpu\ndef test_resnet18():\n torch.set_grad_enabled(False)\n verify_model(\"resnet18\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_squeezenet1_0():\n torch.set_grad_enabled(False)\n verify_model(\"squeezenet1_0\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_squeezenet1_1():\n torch.set_grad_enabled(False)\n verify_model(\"squeezenet1_1\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_densenet121():\n torch.set_grad_enabled(False)\n verify_model(\"densenet121\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_inception_v3():\n torch.set_grad_enabled(False)\n verify_model(\"inception_v3\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_googlenet():\n torch.set_grad_enabled(False)\n verify_model(\"googlenet\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_mnasnet0_5():\n torch.set_grad_enabled(False)\n verify_model(\"mnasnet0_5\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_mobilenet_v2():\n torch.set_grad_enabled(False)\n verify_model(\"mobilenet_v2\", atol=1e-4, rtol=1e-4)\n\n\n\"\"\"\n#TODO: Fix VGG and AlexNet issues (probably due to pooling)\[email protected]_gpu\ndef test_alexnet():\n torch.set_grad_enabled(False)\n verify_model(\"alexnet\")\n\[email protected]_gpu\ndef test_vgg11():\n torch.set_grad_enabled(False)\n verify_model(\"vgg11\")\n\[email protected]_gpu\ndef test_vgg11_bn():\n torch.set_grad_enabled(False)\n verify_model(\"vgg11_bn\")\n\"\"\"\n\n\[email protected]_gpu\ndef test_custom_conversion_map():\n def get_roi_align():\n pool_size = 5\n n_channels = 2 * (pool_size ** 2)\n x = torch.rand(2, n_channels, 10, 10)\n rois = torch.tensor(\n [\n [0, 0, 0, 9, 9], # format is (xyxy)\n [0, 0, 5, 4, 9],\n [0, 5, 5, 9, 9],\n [1, 0, 0, 9, 9],\n ],\n dtype=torch.float,\n )\n roi_align = torchvision.ops.RoIAlign(pool_size, spatial_scale=1, sampling_ratio=-1)\n return roi_align.eval(), [x, rois]\n\n def convert_roi_align():\n def _impl(inputs, input_types):\n spatial_scale = inputs[2]\n pooled_size = (inputs[3], inputs[4])\n sampling_ratio = inputs[5]\n return relay.op.vision.roi_align(\n inputs[0], inputs[1], pooled_size, spatial_scale, sampling_ratio\n )\n\n return _impl\n\n custom_map = {\"torchvision::roi_align\": convert_roi_align()}\n model, inputs = get_roi_align()\n\n verify_model(model, inputs, custom_map)\n\n\[email protected]_gpu\ndef test_segmentation_models():\n class SegmentationModelWrapper(Module):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def forward(self, inp):\n out = self.model(inp)\n return out[\"out\"]\n\n fcn = torchvision.models.segmentation.fcn_resnet101(pretrained=True)\n deeplab = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=True)\n\n inp = [torch.rand((1, 3, 300, 300), dtype=torch.float)]\n\n verify_model(SegmentationModelWrapper(fcn.eval()), inp, atol=1e-4, rtol=1e-4)\n verify_model(SegmentationModelWrapper(deeplab.eval()), inp, atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_3d_models():\n input_shape = (1, 3, 4, 56, 56)\n resnet3d = torchvision.models.video.r3d_18(pretrained=True).eval()\n verify_model(resnet3d, [torch.rand(input_shape)], atol=1e-4, rtol=1e-4)\n\n\ndef _get_default_vm_targets():\n return [tgt for (tgt, _) in tvm.testing.enabled_targets()]\n\n\ndef verify_script_model(pt_model, ishapes, targets, idtype=None):\n script_module = torch.jit.script(pt_model)\n\n verify_model_vm(script_module, ishapes, idtype=idtype, targets=targets)\n\n\ndef verify_trace_model(pt_model, idata, targets):\n traced_model = torch.jit.trace(pt_model, idata)\n ishapes = [data.shape for data in idata]\n verify_model_vm(traced_model, ishapes, idata=idata, targets=targets)\n\n\ndef convert_pt_to_tvm_type(idtype):\n \"\"\"Accepts a pytorch dtype and returns string TVM dtype.\"\"\"\n # TVM does not support PyTorch complex dtypes\n if idtype == torch.float64:\n curr_dtype = \"float64\"\n elif idtype == torch.float32:\n curr_dtype = \"float32\"\n elif idtype == torch.float16:\n curr_dtype = \"float16\"\n elif idtype == torch.bfloat16:\n curr_dtype = \"bfloat16\"\n elif idtype == torch.int64:\n curr_dtype = \"int64\"\n elif idtype == torch.int32:\n curr_dtype = \"int32\"\n elif idtype == torch.int16:\n curr_dtype = \"int16\"\n elif idtype == torch.int8:\n curr_dtype = \"int8\"\n elif idtype == torch.uint8:\n curr_dtype = \"uint8\"\n elif idtype == torch.bool:\n curr_dtype = \"bool\"\n else:\n raise NotImplementedError(\"Unsupported dtype: {}\".format(idtype))\n return curr_dtype\n\n\ndef verify_model_vm(input_model, ishapes, idtype=None, idata=None, targets=[\"llvm\"]):\n if not idtype:\n idtype = torch.float\n\n input_names = [\"i{}\".format(idx) for idx, ish in enumerate(ishapes)]\n tvm_dtype = convert_pt_to_tvm_type(idtype)\n input_dtypes = [tvm_dtype] * len(input_names)\n input_shapes = list(zip(input_names, list(zip(ishapes, input_dtypes))))\n\n if idata:\n input_data = idata\n # If no input_data provided, generate random data of specified dtype\n else:\n if idtype == torch.bool:\n input_data = [\n torch.Tensor.bool(torch.randint(low=0, high=2, size=shape)) for shape in ishapes\n ]\n # Torch dtype can be float, complex, int, or Bool. Complex not supported, so if not float or Bool,\n # dtype must be int!\n elif not idtype.is_floating_point:\n input_data = [\n torch.randint(low=0, high=10, size=shape, dtype=idtype) for shape in ishapes\n ]\n else:\n input_data = [torch.randn(shape, dtype=idtype) for shape in ishapes]\n\n # Compile via VM\n mod, params = relay.frontend.from_pytorch(input_model, input_shapes)\n\n for tgt in targets:\n print(\"Running on target\", tgt)\n dev = tvm.device(tgt, 0)\n\n executor = relay.create_executor(\"vm\", mod=mod, device=dev, target=tgt)\n evaluator = executor.evaluate()\n\n # Inference\n for name, inp in zip(input_names, input_data):\n params[name] = inp.numpy()\n vm_res = evaluator(**params)\n\n # Baseline result\n with torch.no_grad():\n pt_result = input_model(*input_data)\n\n # Verify the accuracy\n if isinstance(pt_result, tuple):\n # handle multiple outputs\n for i in range(len(pt_result)):\n tvm_res = vm_res[i].numpy()\n tvm.testing.assert_allclose(tvm_res, pt_result[i].numpy(), rtol=1e-5, atol=1e-5)\n elif not isinstance(pt_result, torch.Tensor):\n tvm_res = vm_res.numpy().item()\n assert pt_result == tvm_res\n else:\n tvm.testing.assert_allclose(vm_res.numpy(), pt_result.numpy(), rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_control_flow():\n class SimpleIf(torch.nn.Module):\n def __init__(self, N, M):\n super().__init__()\n self.weight = torch.nn.Parameter(torch.rand(N, M))\n\n def forward(self, inp):\n if inp.sum() > 0.0:\n output = self.weight + inp\n else:\n output = self.weight - inp\n return output\n\n class NestedIf(torch.nn.Module):\n def __init__(self, N, M):\n super().__init__()\n self.weight = torch.nn.Parameter(torch.rand(N, M))\n\n def forward(self, inp):\n if inp.sum() > 0.0:\n if inp.mean() > 0.0:\n output = self.weight + inp\n else:\n output = self.weight - inp\n else:\n if inp.mean() >= 0.0:\n output = self.weight * inp\n else:\n output = self.weight / inp\n\n return output\n\n class ScalarLoop(torch.nn.Module):\n def forward(self, inp):\n a = 0\n for i in range(inp.size(0)):\n b = i * i\n b = b + 1\n a += b\n if a != 0:\n a += 1\n else:\n a += 2\n return a\n\n class SimpleLoop(torch.nn.Module):\n def forward(self, inp):\n a = inp\n for i in range(inp.size(0)):\n b = a * 2.0\n c = a + b\n a += c\n return a\n\n class LoopWithIf(torch.nn.Module):\n def forward(self, inp):\n a = inp\n for i in range(inp.size(0)):\n b = a * 2.0\n b = a + b\n if b.sum() > 0.0:\n a += b\n else:\n a -= b\n return a\n\n class NestedLoop(torch.nn.Module):\n def forward(self, inp):\n a = inp\n for i in range(inp.size(0)):\n b = a * float(i)\n for j in range(inp.size(1)):\n a += b * float(j)\n return a\n\n class SimpleScalarWhileLoop(torch.nn.Module):\n def forward(self, inp):\n a = 1\n i = 0\n while i <= inp.size(0):\n a += i\n i += 2\n i = 0\n # also test constant init cond\n while i < 10:\n a += i\n i += 3\n return a\n\n class SimpleWhileLoop(torch.nn.Module):\n def forward(self, inp):\n a = inp\n i = 0\n while i < inp.size(0):\n a += a * float(i) * 2.0\n i += 1\n return a\n\n models = [\n SimpleIf(10, 20),\n NestedIf(10, 20),\n ScalarLoop(),\n SimpleLoop(),\n LoopWithIf(),\n SimpleScalarWhileLoop(),\n SimpleWhileLoop(),\n NestedLoop(),\n ]\n\n for pt_model in models:\n verify_script_model(pt_model.eval(), [(10, 20)], _get_default_vm_targets())\n\n\[email protected]_gpu\ndef test_simple_rnn():\n # The mixed tracing and scripting example from\n # https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#mixing-scripting-and-tracing\n class DecisionGate(torch.nn.Module):\n def forward(self, x):\n if x.sum() > 0:\n return x\n else:\n return -x\n\n class Cell(torch.nn.Module):\n def __init__(self, dg):\n super(Cell, self).__init__()\n self.dg = dg\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, x, h):\n new_h = torch.tanh(self.dg(self.linear(x)) + h)\n return new_h, new_h\n\n class RNNLoop(torch.nn.Module):\n def __init__(self):\n super().__init__()\n x = torch.rand(10, 4, dtype=torch.float)\n h = torch.rand(10, 4, dtype=torch.float)\n self.cell = torch.jit.trace(Cell(DecisionGate()), (x, h))\n\n def forward(self, xs):\n h = torch.zeros(10, 4, dtype=torch.float)\n y = torch.zeros(10, 4, dtype=torch.float)\n for i in range(xs.size(0)):\n y, h = self.cell(xs[i], h)\n return y\n\n verify_script_model(RNNLoop().eval(), [(10, 10, 4)], _get_default_vm_targets())\n\n\[email protected]_gpu\ndef test_forward_reduce_sum():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ReduceSum1(Module):\n def forward(self, *args):\n return args[0].sum(1)\n\n class ReduceSum2(Module):\n def forward(self, *args):\n return args[0].sum(dim=1, keepdim=False)\n\n class ReduceSum3(Module):\n def forward(self, *args):\n return args[0].sum(dim=2, keepdim=True)\n\n class ReduceSum4(Module):\n def forward(self, *args):\n return args[0].sum(dim=(2, 3), keepdim=True)\n\n class ReduceSum5(Module):\n def forward(self, *args):\n return args[0].sum(dim=(2, 3), keepdim=False)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ReduceSum1().float().eval(), input_data=input_data)\n verify_model(ReduceSum2().float().eval(), input_data=input_data)\n verify_model(ReduceSum3().float().eval(), input_data=input_data)\n verify_model(ReduceSum4().float().eval(), input_data=input_data)\n verify_model(ReduceSum5().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_reduce_prod():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ReduceProd1(Module):\n def forward(self, *args):\n return args[0].prod(1)\n\n class ReduceProd2(Module):\n def forward(self, *args):\n return args[0].prod(dim=1, keepdim=False)\n\n class ReduceProd3(Module):\n def forward(self, *args):\n return args[0].prod(dim=2, keepdim=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ReduceProd1().float().eval(), input_data=input_data)\n verify_model(ReduceProd2().float().eval(), input_data=input_data)\n verify_model(ReduceProd3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_argmin():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ArgMin1(Module):\n def forward(self, *args):\n return args[0].argmin(1)\n\n class ArgMin2(Module):\n def forward(self, *args):\n return args[0].argmin(dim=1, keepdim=False)\n\n class ArgMin3(Module):\n def forward(self, *args):\n return args[0].argmin(dim=2, keepdim=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ArgMin1().float().eval(), input_data=input_data)\n verify_model(ArgMin2().float().eval(), input_data=input_data)\n verify_model(ArgMin3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_argmax():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ArgMax1(Module):\n def forward(self, *args):\n return args[0].argmax(1)\n\n class ArgMax2(Module):\n def forward(self, *args):\n return args[0].argmax(dim=1, keepdim=False)\n\n class ArgMax3(Module):\n def forward(self, *args):\n return args[0].argmax(dim=2, keepdim=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ArgMax1().float().eval(), input_data=input_data)\n verify_model(ArgMax2().float().eval(), input_data=input_data)\n verify_model(ArgMax3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_std():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Std1(Module):\n def forward(self, *args):\n return args[0].std(1, unbiased=False)\n\n class Std2(Module):\n def forward(self, *args):\n return args[0].std(dim=1, keepdim=False, unbiased=False)\n\n class Std3(Module):\n def forward(self, *args):\n return args[0].std(dim=2, keepdim=True, unbiased=False)\n\n class Std4(Module):\n def forward(self, *args):\n return args[0].std(dim=(2, 3), keepdim=True, unbiased=False)\n\n class Std5(Module):\n def forward(self, *args):\n return args[0].std(dim=(2, 3), keepdim=False, unbiased=False)\n\n class Std6(Module):\n def forward(self, *args):\n return args[0].std(unbiased=False)\n\n class Std7(Module):\n def forward(self, *args):\n return args[0].std(dim=1, keepdim=False, unbiased=True)\n\n class Std8(Module):\n def forward(self, *args):\n return args[0].std(dim=(2, 3), keepdim=True, unbiased=True)\n\n class Std9(Module):\n def forward(self, *args):\n return args[0].std(unbiased=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Std1().float().eval(), input_data=input_data)\n verify_model(Std2().float().eval(), input_data=input_data)\n verify_model(Std3().float().eval(), input_data=input_data)\n verify_model(Std4().float().eval(), input_data=input_data)\n verify_model(Std5().float().eval(), input_data=input_data)\n verify_model(Std6().float().eval(), input_data=input_data)\n verify_model(Std7().float().eval(), input_data=input_data)\n verify_model(Std8().float().eval(), input_data=input_data)\n verify_model(Std9().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_variance():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Variance1(Module):\n def forward(self, *args):\n return args[0].var(1, unbiased=False)\n\n class Variance2(Module):\n def forward(self, *args):\n return args[0].var(dim=1, keepdim=False, unbiased=False)\n\n class Variance3(Module):\n def forward(self, *args):\n return args[0].var(dim=2, keepdim=True, unbiased=False)\n\n class Variance4(Module):\n def forward(self, *args):\n return args[0].var(dim=(2, 3), keepdim=True, unbiased=False)\n\n class Variance5(Module):\n def forward(self, *args):\n return args[0].var(dim=(2, 3), keepdim=False, unbiased=False)\n\n class Variance6(Module):\n def forward(self, *args):\n return args[0].var(unbiased=False)\n\n class Variance7(Module):\n def forward(self, *args):\n return args[0].var(dim=1, keepdim=False, unbiased=True)\n\n class Variance8(Module):\n def forward(self, *args):\n return args[0].var(dim=(2, 3), keepdim=True, unbiased=True)\n\n class Variance9(Module):\n def forward(self, *args):\n return args[0].var(unbiased=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Variance1().float().eval(), input_data=input_data)\n verify_model(Variance2().float().eval(), input_data=input_data)\n verify_model(Variance3().float().eval(), input_data=input_data)\n verify_model(Variance4().float().eval(), input_data=input_data)\n verify_model(Variance5().float().eval(), input_data=input_data)\n verify_model(Variance6().float().eval(), input_data=input_data)\n verify_model(Variance7().float().eval(), input_data=input_data)\n verify_model(Variance8().float().eval(), input_data=input_data)\n verify_model(Variance9().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_rsub():\n torch.set_grad_enabled(False)\n\n class Rsub1(Module):\n def forward(self, *args):\n return torch.rsub(args[0], args[1])\n\n class Rsub2(Module):\n def forward(self, *args):\n return torch.rsub(args[0], args[1], alpha=0.5)\n\n d1 = torch.rand([1, 3]).float()\n d2 = torch.rand([1, 3]).float()\n d3 = torch.rand([1, 3]).int()\n verify_model(Rsub1().float().eval(), input_data=[d1, d2])\n verify_model(Rsub1().float().eval(), input_data=[d1, d3])\n verify_model(Rsub2().float().eval(), input_data=[d1, d2])\n verify_model(Rsub2().float().eval(), input_data=[d1, d3])\n\n\[email protected]_gpu\ndef test_forward_embedding():\n torch.set_grad_enabled(False)\n\n input_data = torch.randint(0, 10, [2, 4]).long()\n verify_model(torch.nn.Embedding(10, 3).float().eval(), input_data=input_data)\n\n input_data = torch.randint(0, 4, [2, 3, 4]).long()\n verify_model(torch.nn.Embedding(4, 5, sparse=False).float().eval(), input_data=input_data)\n\n input_data = torch.randint(0, 4, [2, 3, 4]).long()\n verify_model(torch.nn.Embedding(4, 5, sparse=True).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_onehot():\n torch.set_grad_enabled(False)\n\n class OneHot1(Module):\n def forward(self, *args):\n return torch.nn.functional.one_hot(args[0], num_classes=3)\n\n class OneHot2(Module):\n def forward(self, *args):\n return torch.nn.functional.one_hot(args[0], num_classes=5)\n\n input_data = torch.arange(0, 5) % 3\n verify_model(OneHot1().float().eval(), input_data=input_data)\n\n input_data = torch.arange(0, 5) % 4\n verify_model(OneHot2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_isfinite():\n torch.set_grad_enabled(False)\n\n class IsFinite1(Module):\n def forward(self, *args):\n return torch.isfinite(args[0])\n\n input_data = torch.tensor([1, float(\"inf\"), 2, float(\"-inf\"), float(\"nan\")]).float()\n verify_model(IsFinite1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_isnan():\n torch.set_grad_enabled(False)\n\n class IsNan1(Module):\n def forward(self, *args):\n return torch.isnan(args[0])\n\n input_data = torch.tensor([1, float(\"inf\"), 2, float(\"-inf\"), float(\"nan\")]).float()\n verify_model(IsNan1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_isinf():\n torch.set_grad_enabled(False)\n\n class IsInf1(Module):\n def forward(self, *args):\n return torch.isinf(args[0])\n\n input_data = torch.tensor([1, float(\"inf\"), 2, float(\"-inf\"), float(\"nan\")]).float()\n verify_model(IsInf1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_clamp():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Clamp1(Module):\n def forward(self, *args):\n return torch.clamp(args[0], min=-0.5, max=0.5)\n\n class Clamp2(Module):\n def forward(self, *args):\n return torch.clamp(args[0], min=-0.3)\n\n class Clamp3(Module):\n def forward(self, *args):\n return torch.clamp(args[0], max=1.0)\n\n class Clamp_MinExpr_MaxConstant(Module):\n def forward(self, *args):\n h, w = args[0].shape[2:]\n amin = h / 100.0\n return torch.clamp(args[0], min=amin, max=w)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Clamp1().float().eval(), input_data=input_data)\n verify_model(Clamp2().float().eval(), input_data=input_data)\n verify_model(Clamp3().float().eval(), input_data=input_data)\n verify_model(Clamp_MinExpr_MaxConstant().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_clamp_():\n torch.set_grad_enabled(False)\n\n class ClampInPlace(Module):\n def __init__(self, min, max):\n super(ClampInPlace, self).__init__()\n self.min = min\n self.max = max\n\n def forward(self, *args):\n return torch.clamp_(args[0], self.min, self.max)\n\n for ishape, min, max in (([4, 8], 0.1, 0.9), ([7, 6], 0.2, 0.5)):\n input_data = torch.rand(ishape).float()\n verify_model(ClampInPlace(min, max).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_ones():\n torch.set_grad_enabled(False)\n\n class Ones1(Module):\n def forward(self, *args):\n return torch.ones(2, 3)\n\n verify_model(Ones1().float().eval(), input_data=[])\n\n\[email protected]_gpu\ndef test_forward_ones_like():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class OnesLike1(Module):\n def forward(self, *args):\n return torch.ones_like(args[0])\n\n class OnesLike2(Module):\n def forward(self, *args):\n return torch.ones_like(args[0], dtype=torch.int8)\n\n class OnesLike3(Module):\n def forward(self, *args):\n return torch.ones_like(args[0], dtype=torch.float)\n\n input_data = torch.rand(input_shape).float()\n verify_model(OnesLike1().float().eval(), input_data=input_data)\n verify_model(OnesLike2().float().eval(), input_data=input_data)\n verify_model(OnesLike3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_zeros():\n torch.set_grad_enabled(False)\n\n class Zeros1(Module):\n def forward(self, *args):\n return torch.zeros(2, 3)\n\n verify_model(Zeros1().float().eval(), input_data=[])\n\n\[email protected]_gpu\ndef test_forward_zeros_like():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ZerosLike1(Module):\n def forward(self, *args):\n return torch.zeros_like(args[0])\n\n class ZerosLike2(Module):\n def forward(self, *args):\n return torch.zeros_like(args[0], dtype=torch.int32)\n\n class ZerosLike3(Module):\n def forward(self, *args):\n return torch.zeros_like(args[0], dtype=torch.float)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ZerosLike1().float().eval(), input_data=input_data)\n verify_model(ZerosLike2().float().eval(), input_data=input_data)\n verify_model(ZerosLike3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_full():\n torch.set_grad_enabled(False)\n\n class Full1(Module):\n def forward(self, *args):\n return torch.full((2, 3), 3.14)\n\n class Full2(Module):\n def forward(self, *args):\n return torch.full((1, 2, 3), 1.0, dtype=torch.int32)\n\n verify_model(Full1().float().eval(), input_data=[])\n verify_model(Full2().float().eval(), input_data=[])\n\n\[email protected]_gpu\ndef test_forward_full_like():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class FullLike1(Module):\n def forward(self, *args):\n return torch.full_like(args[0], 3.14)\n\n class FullLike2(Module):\n def forward(self, *args):\n return torch.full_like(args[0], 22.22, dtype=torch.int32)\n\n class FullLike3(Module):\n def forward(self, *args):\n return torch.full_like(args[0], 1.4, dtype=torch.float)\n\n input_data = torch.rand(input_shape).float()\n verify_model(FullLike1().float().eval(), input_data=input_data)\n verify_model(FullLike2().float().eval(), input_data=input_data)\n verify_model(FullLike3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_linspace():\n torch.set_grad_enabled(False)\n\n class Linspace1(Module):\n def forward(self, *args):\n return torch.linspace(5, 10, steps=100)\n\n class Linspace2(Module):\n def forward(self, *args):\n return torch.linspace(-10, 10, steps=5)\n\n class Linspace3(Module):\n def forward(self, *args):\n return torch.linspace(start=-10, end=10, steps=5)\n\n class Linspace4(Module):\n def forward(self, *args):\n return torch.linspace(start=-10, end=10, steps=1)\n\n class Linspace5(Module):\n def forward(self, *args):\n return torch.linspace(1, 2, 1, dtype=torch.int32)\n\n class Linspace6(Module):\n def forward(self, *args):\n return torch.linspace(start=1, end=6, steps=2)\n\n class Linspace7(Module):\n def forward(self, *args):\n return torch.linspace(1, 4, steps=100, dtype=torch.float32)\n\n class Linspace8(Module):\n def forward(self, *args):\n return torch.linspace(1, 2, 1, dtype=torch.int16)\n\n verify_model(Linspace1().float().eval())\n verify_model(Linspace2().float().eval())\n verify_model(Linspace3().float().eval())\n verify_model(Linspace4().float().eval())\n verify_model(Linspace5().float().eval())\n verify_model(Linspace6().float().eval())\n verify_model(Linspace7().float().eval())\n verify_model(Linspace8().float().eval())\n\n\[email protected]_gpu\ndef test_forward_take():\n torch.set_grad_enabled(False)\n\n class Take1(Module):\n def forward(self, *args):\n indices = torch.tensor([[0, 0], [1, 0]])\n if torch.cuda.is_available():\n indices = indices.cuda()\n return torch.take(args[0], indices)\n\n class Take2(Module):\n def forward(self, *args):\n return torch.take(args[0], args[1])\n\n input_data = torch.tensor([[1, 2], [3, 4]])\n verify_model(Take1().float().eval(), input_data=input_data)\n indices = torch.tensor([[0, 0], [1, 0]])\n verify_model(Take2().float().eval(), input_data=[input_data, indices])\n indices = torch.tensor([0, -1])\n verify_model(Take2().float().eval(), input_data=[input_data, indices])\n\n\[email protected]_gpu\ndef test_forward_topk():\n torch.set_grad_enabled(False)\n\n class Topk1(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3)\n\n class Topk2(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, dim=-2)\n\n class Topk3(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, dim=3)\n\n class Topk4(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, largest=True)\n\n class Topk5(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, largest=False)\n\n class Topk6(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, sorted=True)\n\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(Topk1().float().eval(), input_data=input_data)\n verify_model(Topk2().float().eval(), input_data=input_data)\n verify_model(Topk3().float().eval(), input_data=input_data)\n verify_model(Topk4().float().eval(), input_data=input_data)\n verify_model(Topk5().float().eval(), input_data=input_data)\n verify_model(Topk6().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_logical_not():\n torch.set_grad_enabled(False)\n\n class LogicalNot1(Module):\n def forward(self, *args):\n return torch.logical_not(args[0])\n\n input_data = torch.tensor([True, False])\n verify_model(LogicalNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([0, 1, -10], dtype=torch.int8)\n verify_model(LogicalNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)\n verify_model(LogicalNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)\n verify_model(LogicalNot1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_bitwise_not():\n torch.set_grad_enabled(False)\n\n class BitwiseNot1(Module):\n def forward(self, *args):\n return torch.bitwise_not(args[0])\n\n input_data = torch.tensor([0, 1, -10], dtype=torch.int8)\n verify_model(BitwiseNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)\n verify_model(BitwiseNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([True, False])\n verify_model(BitwiseNot1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_bitwise_xor():\n torch.set_grad_enabled(False)\n\n class BitwiseXor1(Module):\n def forward(self, *args):\n return torch.bitwise_xor(args[0], args[1])\n\n class BitwiseXor2(Module):\n def forward(self, *args):\n rhs = torch.tensor([1, 0, 3], dtype=torch.int8)\n if torch.cuda.is_available():\n rhs = rhs.cuda()\n return torch.bitwise_xor(args[0], rhs)\n\n lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)\n rhs = torch.tensor([1, 0, 3], dtype=torch.int8)\n verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])\n\n lhs = torch.tensor([True, True, False])\n rhs = torch.tensor([False, True, False])\n verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])\n\n lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)\n verify_model(BitwiseXor2().float().eval(), input_data=[lhs])\n\n\[email protected]_gpu\ndef test_forward_logical_xor():\n torch.set_grad_enabled(False)\n\n class LogicalXor1(Module):\n def forward(self, *args):\n return torch.logical_xor(args[0], args[1])\n\n class LogicalXor2(Module):\n def forward(self, *args):\n rhs = torch.tensor([1, 0, 3], dtype=torch.int8)\n if torch.cuda.is_available():\n rhs = rhs.cuda()\n return torch.logical_xor(args[0], rhs)\n\n lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)\n rhs = torch.tensor([1, 0, 3], dtype=torch.int8)\n verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])\n\n lhs = torch.tensor([True, True, False])\n rhs = torch.tensor([False, True, False])\n verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])\n\n lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)\n verify_model(LogicalXor2().float().eval(), input_data=[lhs])\n\n\[email protected]_gpu\ndef test_forward_unary():\n torch.set_grad_enabled(False)\n\n class Sqrt1(Module):\n def forward(self, *args):\n return torch.sqrt(args[0])\n\n class RSqrt1(Module):\n def forward(self, *args):\n return torch.rsqrt(args[0])\n\n class Ceil1(Module):\n def forward(self, *args):\n return torch.ceil(args[0])\n\n class Floor1(Module):\n def forward(self, *args):\n return torch.floor(args[0])\n\n class Round1(Module):\n def forward(self, *args):\n return torch.round(args[0])\n\n class Cos1(Module):\n def forward(self, *args):\n return torch.cos(args[0])\n\n class Sin1(Module):\n def forward(self, *args):\n return torch.sin(args[0])\n\n class Tan1(Module):\n def forward(self, *args):\n return torch.tan(args[0])\n\n class Tanh1(Module):\n def forward(self, *args):\n return torch.tanh(args[0])\n\n class Acos1(Module):\n def forward(self, *args):\n return torch.acos(args[0])\n\n class Asin1(Module):\n def forward(self, *args):\n return torch.asin(args[0])\n\n class Atan1(Module):\n def forward(self, *args):\n return torch.atan(args[0])\n\n class Log1(Module):\n def forward(self, *args):\n return torch.log(args[0])\n\n class Exp1(Module):\n def forward(self, *args):\n return torch.exp(args[0])\n\n class Erf1(Module):\n def forward(self, *args):\n return torch.erf(args[0])\n\n class Trunc1(Module):\n def forward(self, *args):\n return torch.trunc(args[0])\n\n class Sign1(Module):\n def forward(self, *args):\n return torch.sign(args[0])\n\n class Neg1(Module):\n def forward(self, *args):\n return torch.neg(args[0])\n\n class Sinh1(Module):\n def forward(self, *args):\n return torch.sinh(args[0])\n\n class Cosh1(Module):\n def forward(self, *args):\n return torch.cosh(args[0])\n\n class Log2_1(Module):\n def forward(self, *args):\n return torch.log2(args[0])\n\n class Log10_1(Module):\n def forward(self, *args):\n return torch.log10(args[0])\n\n class Log1p_1(Module):\n def forward(self, *args):\n return torch.log1p(args[0])\n\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(Sqrt1().float().eval(), input_data=input_data)\n verify_model(RSqrt1().float().eval(), input_data=input_data)\n verify_model(Ceil1().float().eval(), input_data=input_data)\n verify_model(Floor1().float().eval(), input_data=input_data)\n verify_model(Round1().float().eval(), input_data=input_data)\n verify_model(Cos1().float().eval(), input_data=input_data)\n verify_model(Cosh1().float().eval(), input_data=input_data)\n verify_model(Sin1().float().eval(), input_data=input_data)\n verify_model(Sinh1().float().eval(), input_data=input_data)\n verify_model(Tan1().float().eval(), input_data=input_data)\n verify_model(Tanh1().float().eval(), input_data=input_data)\n verify_model(Acos1().float().eval(), input_data=input_data)\n verify_model(Asin1().float().eval(), input_data=input_data)\n verify_model(Atan1().float().eval(), input_data=input_data)\n verify_model(Log1().float().eval(), input_data=input_data)\n verify_model(Log2_1().float().eval(), input_data=input_data)\n verify_model(Log10_1().float().eval(), input_data=input_data)\n verify_model(Log1p_1().float().eval(), input_data=input_data)\n verify_model(Exp1().float().eval(), input_data=input_data)\n verify_model(Erf1().float().eval(), input_data=input_data)\n verify_model(Trunc1().float().eval(), input_data=input_data)\n verify_model(Sign1().float().eval(), input_data=input_data)\n verify_model(Neg1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_where():\n torch.set_grad_enabled(False)\n\n class Where1(Module):\n def forward(self, *args):\n y = torch.ones([3, 2])\n if torch.cuda.is_available():\n y = y.cuda()\n return torch.where(args[0] > 0, args[0], y)\n\n class Where2(Module):\n def forward(self, *args):\n return torch.where(args[0] > 0, args[0], args[1])\n\n class Where3(Module):\n def forward(self, *args):\n return torch.where(args[0])[0]\n\n x = torch.rand([3, 2]).float()\n verify_model(Where1(), input_data=[x])\n y = torch.rand([3, 2])\n verify_model(Where2(), input_data=[x, y])\n\n # a single argument variant, equivalent to torch.nonzero(..., as_tuple=True)\n inp = torch.rand([10])\n inp[3:8] = 0\n verify_trace_model(Where3(), [inp], [\"llvm\"])\n\n\[email protected]_gpu\ndef test_forward_addcdiv():\n torch.set_grad_enabled(False)\n\n class Addcdiv1(Module):\n def forward(self, *args):\n t1 = torch.ones([3, 1])\n t2 = torch.ones([1, 3])\n if torch.cuda.is_available():\n t1 = t1.cuda()\n t2 = t2.cuda()\n return torch.addcdiv(args[0], 0.1, t1, t2)\n\n class Addcdiv2(Module):\n def forward(self, *args):\n return torch.addcdiv(args[0], 0.5, args[1], args[2])\n\n input_data = torch.rand([1, 3]).float()\n verify_model(Addcdiv1().float().eval(), input_data=input_data)\n t1 = torch.rand([3, 1]).float()\n t2 = torch.rand([1, 3]).float()\n verify_model(Addcdiv2().float().eval(), input_data=[input_data, t1, t2])\n\n\[email protected]_gpu\ndef test_forward_addcmul():\n torch.set_grad_enabled(False)\n\n class Addcmul1(Module):\n def forward(self, *args):\n t1 = torch.ones([3, 1])\n t2 = torch.ones([1, 3])\n if torch.cuda.is_available():\n t1 = t1.cuda()\n t2 = t2.cuda()\n return torch.addcmul(args[0], 0.1, t1, t2)\n\n class Addcmul2(Module):\n def forward(self, *args):\n return torch.addcmul(args[0], 0.5, args[1], args[2])\n\n input_data = torch.rand([1, 3]).float()\n verify_model(Addcmul1().float().eval(), input_data=input_data)\n t1 = torch.rand([3, 1]).float()\n t2 = torch.rand([1, 3]).float()\n verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2])\n\n\[email protected]_gpu\ndef test_forward_true_divide():\n if package_version.parse(torch.__version__) < package_version.parse(\"1.5.0\"):\n return\n torch.set_grad_enabled(False)\n\n class TrueDivide(Module):\n def forward(self, *args):\n return torch.true_divide(args[0], args[1])\n\n dividend = torch.rand([5, 3]).float()\n # divisor could be either tensor or scalar\n divisor_tensor = torch.rand([5, 3]).float() + 0.5\n divisor_scalar = torch.tensor(1.0, dtype=torch.float32)\n verify_model(\n TrueDivide().float().eval(), input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4\n )\n verify_model(\n TrueDivide().float().eval(), input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4\n )\n\n\[email protected]_gpu\ndef test_forward_is_floating_point():\n torch.set_grad_enabled(False)\n\n class IsFloatingPoint(Module):\n def forward(self, arg):\n # `torch.jit.trace` cannot accept something that outputs\n # a Bool, so `torch.jit.script` will be used instead\n return torch.is_floating_point(arg)\n\n targets = _get_default_vm_targets()\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float64)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float32)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float16)\n # todo(dvisnty): Run the test for bfloat16 when full bfloat16 support is implemented\n # verify_script_model(IsFloatingPoint(), [(1,1)], targets, idtype=torch.bfloat16)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int64)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int32)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int16)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int8)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.uint8)\n\n\[email protected]_gpu\ndef test_forward_traced_function():\n def fn(t1, t2):\n return t1 + t2\n\n tensor1 = torch.randn(3, 4)\n tensor2 = torch.randn(3, 4)\n verify_model(fn, input_data=[tensor1, tensor2])\n\n\[email protected]_gpu\ndef test_forward_dtypes():\n def fn(t1, t2):\n return 2.5 * t1 + t2\n\n for dt in [torch.int32, torch.int64, torch.double]:\n tensor1 = torch.randn(3, 4).to(dtype=dt)\n tensor2 = torch.randn(3, 4).to(dtype=dt)\n verify_model(fn, input_data=[tensor1, tensor2])\n\n class ModuleWithIntParameters(Module):\n def __init__(self, arr):\n super().__init__()\n self.param = torch.nn.Parameter(torch.LongTensor(arr), requires_grad=False)\n\n def forward(self, x):\n return x.long() + self.param\n\n shape = (10, 10)\n param = torch.ones(shape, dtype=torch.long)\n inp = torch.ones(shape, dtype=torch.int)\n verify_model(ModuleWithIntParameters(param), input_data=inp)\n\n\[email protected]_gpu\ndef test_weight_names():\n tm = torch.jit.trace(torch.nn.Linear(3, 4), [torch.randn(2, 3)])\n mod, params = relay.frontend.from_pytorch(tm, [(\"input\", (2, 3))])\n assert set(params.keys()) == set(n for n, p in tm.named_parameters())\n\n\[email protected]_gpu\ndef test_duplicate_weight_use():\n # The test cases doesn't make any sense as a neural network,\n # the issue popped up in shared input/output embeddings of bert,\n # but this is quicker\n class Test(Module):\n def __init__(self):\n super().__init__()\n self.lin = torch.nn.Linear(5, 3)\n\n def forward(self, x):\n x = self.lin(x)\n x = x @ self.lin.weight\n return x\n\n verify_model(Test(), input_data=[torch.randn(5, 5)])\n\n\[email protected]_gpu\ndef test_forward_matmul():\n torch.set_grad_enabled(False)\n\n class MatMul1(Module):\n def forward(self, *args):\n return torch.matmul(args[0], args[1])\n\n # matrix x vector\n tensor1 = torch.randn(3, 4)\n tensor2 = torch.randn(4)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])\n\n # matrix x matrix\n tensor1 = torch.randn(10, 4)\n tensor2 = torch.randn(4, 10)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=[\"nn.dense\"])\n\n # batched matrix x batched matrix\n tensor1 = torch.randn(10, 3, 4)\n tensor2 = torch.randn(10, 4, 5)\n verify_model(\n MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=[\"nn.batch_matmul\"]\n )\n\n # batched matrix x broadcasted matrix\n tensor1 = torch.randn(10, 3, 4)\n tensor2 = torch.randn(4, 5)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=[\"nn.dense\"])\n\n # broadcasted matrix x batched matrix\n tensor1 = torch.randn(10, 4)\n tensor2 = torch.randn(3, 4, 5)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=[\"nn.dense\"])\n\n # batched matrix x batched matrix\n tensor1 = torch.randn(1, 12, 14, 64)\n tensor2 = torch.randn(1, 12, 64, 14)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])\n\n\ndef test_forward_index():\n torch.set_grad_enabled(False)\n input_shape = [3, 4, 5, 6]\n\n class Index0(Module):\n def forward(self, x):\n return x[[0, 1], [0, 2], :2, 4]\n\n input_data = torch.rand(input_shape).float()\n verify_model(Index0().eval(), input_data=input_data)\n\n class Index1(Module):\n def forward(self, x):\n return x[[0], [1, 2, 3, 0], [3, 1, 2, 2], [4, 2, 1, 0]]\n\n input_data = torch.rand(input_shape).float()\n verify_model(Index1().eval(), input_data=input_data)\n\n\ndef test_logsumexp():\n class Logsumexp(Module):\n def __init__(self, dim, keepdim=False):\n super().__init__()\n self.dim = dim\n self.keepdim = keepdim\n\n def forward(self, x):\n return torch.logsumexp(x, self.dim, self.keepdim)\n\n input_shape = (100, 100)\n input_data = torch.rand(input_shape)\n\n verify_model(Logsumexp(0), input_data=input_data)\n verify_model(Logsumexp(0, keepdim=True), input_data=input_data)\n # Also test on double\n verify_model(Logsumexp(1, keepdim=True), input_data=input_data.double())\n\n\ndef test_stack():\n class Stack(torch.nn.Module):\n def __init__(self, axis=0):\n super().__init__()\n self.axis = axis\n\n def forward(self, x):\n return torch.stack((x, x), dim=self.axis)\n\n inp = torch.randn(8, 8, 8)\n verify_model(Stack(), input_data=inp)\n verify_model(Stack(axis=-1), input_data=inp)\n verify_model(Stack(axis=3), input_data=inp)\n verify_model(Stack(axis=-4), input_data=inp)\n\n\ndef test_stack_dynamic():\n class Stack(torch.nn.Module):\n def forward(self, x):\n tensor_list = []\n for i in range(x.size(0)):\n # this is a workaround to avoid generating impure aten::append op\n tensor_list += [x[i]]\n # relay tensor array only supports stacking on the first axis\n return torch.stack(tensor_list, dim=0)\n\n verify_script_model(Stack(), [(8, 8, 8)], _get_default_vm_targets())\n\n\ndef test_forward_unbind():\n class Unbind(torch.nn.Module):\n def __init__(self, axis=0):\n super().__init__()\n self.axis = axis\n\n def forward(self, x):\n return torch.unbind(x, self.axis)\n\n inp = torch.randn(8, 8, 8)\n verify_model(Unbind(0), input_data=inp)\n verify_model(Unbind(1), input_data=inp)\n verify_model(Unbind(2), input_data=inp)\n\n\ndef test_forward_nonzero():\n class Nonzero(Module):\n def __init__(self, as_tuple=False):\n super().__init__()\n self.as_tuple = as_tuple\n\n def forward(self, data):\n return torch.nonzero(data, as_tuple=self.as_tuple)\n\n inp = torch.Tensor(np.array([[0, 1, 0], [2, 0, 9], [-1, -1, 0]]).astype(\"float32\"))\n verify_trace_model(Nonzero(), [inp], [\"llvm\"])\n\n\ndef test_forward_scatter():\n # integer cannot be traced\n def test_fn_scatter(dim):\n return lambda data, index, src: torch.scatter(data, dim=dim, index=index, src=src)\n\n def test_fn_scatter_add(dim):\n return lambda data, index, src: torch.scatter_add(data, dim=dim, index=index, src=src)\n\n in_data = torch.zeros(3, 5)\n in_index = torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]])\n in_src = torch.rand(2, 5)\n\n targets = [\"llvm\", \"cuda\"]\n verify_trace_model(test_fn_scatter(0), [in_data, in_index, in_src], targets)\n verify_trace_model(test_fn_scatter_add(0), [in_data, in_index, in_src], targets)\n\n in_data = torch.zeros(2, 4)\n in_index = torch.tensor([[2], [3]])\n in_src = torch.rand(2, 1)\n\n verify_trace_model(test_fn_scatter(1), [in_data, in_index, in_src], targets)\n verify_trace_model(test_fn_scatter_add(1), [in_data, in_index, in_src], targets)\n\n\ndef test_forward_index_put():\n # torch.index_put for 2D tensor and default accumulate (False)\n def test_fn_index_put2():\n return lambda data, xidx, yidx, values: torch.index_put(\n data, indices=[xidx, yidx], values=values\n )\n\n # torch.index_put for 3D tensor and accumulate=True\n def test_fn_index_put3a():\n return lambda data, xidx, yidx, zidx, values: torch.index_put(\n data, indices=[xidx, yidx, zidx], values=values, accumulate=True\n )\n\n shape = (3, 5)\n in_data = torch.zeros(shape)\n xidx = torch.tensor([0, 1, 2, 2])\n yidx = torch.tensor([0, 1, 3, 4])\n values = torch.tensor([2.0, 4.0, 7.0, 9.0])\n\n targets = [\"llvm\", \"cuda\"]\n verify_trace_model(test_fn_index_put2(), [in_data, xidx, yidx, values], targets)\n\n shape = (3, 5, 3)\n in_data = torch.zeros(shape)\n xidx = torch.tensor([0, 1, 2, 2, 0])\n yidx = torch.tensor([0, 1, 3, 4, 0])\n zidx = torch.tensor([0, 1, 1, 2, 0])\n values = torch.tensor([2.0, 4.0, 7.0, 9.0, 1.0])\n\n verify_trace_model(test_fn_index_put3a(), [in_data, xidx, yidx, zidx, values], targets)\n\n\ndef test_numel():\n class Numel(Module):\n def forward(self, data):\n return torch.tensor(torch.numel(data))\n\n targets = _get_default_vm_targets()\n verify_script_model(Numel(), [(1,)], targets)\n verify_script_model(Numel(), [(3, 5)], targets)\n verify_script_model(Numel(), [(3, 5, 8)], targets)\n\n\ndef test_forward_pretrained_bert_base_uncased():\n ######################################################################\n # This is an example how to run BERT models using TVM\n # ---------------------------------------------------\n \"\"\"\n Refer the bert example given in https://pypi.org/project/pytorch-pretrained-bert\n\n # To get started, pretrained bert package needs to be installed as prerequisite.\n\n .. code-block:: bash\n\n # install bert package\n pip install pytorch_pretrained_bert==0.6.2 --user\n \"\"\"\n\n try:\n from pytorch_pretrained_bert import BertForMaskedLM, BertTokenizer\n except:\n print(\"Torch pretrained bert package must be installed to run this script.\")\n return\n\n ######################################################################\n # Load the tokenizer and tokenize the input\n # -----------------------------------------\n\n # Load pre-trained model tokenizer (vocabulary)\n tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\n\n # Tokenized input\n text = \"[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]\"\n tokenized_text = tokenizer.tokenize(text)\n\n # Mask a token that we will try to predict back with `BertForMaskedLM`\n masked_index = 8\n tokenized_text[masked_index] = \"[MASK]\"\n assert tokenized_text == [\n \"[CLS]\",\n \"who\",\n \"was\",\n \"jim\",\n \"henson\",\n \"?\",\n \"[SEP]\",\n \"jim\",\n \"[MASK]\",\n \"was\",\n \"a\",\n \"puppet\",\n \"##eer\",\n \"[SEP]\",\n ]\n\n # Convert token to vocabulary indices\n indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\n # Define sentence A and B indices associated to 1st and 2nd sentences (see paper)\n segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]\n\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segments_ids])\n\n ######################################################################\n # Load a pretrained PyTorch model bert-base-uncased\n # -------------------------------------------------\n\n # Bert Model with a language modeling\n model = BertForMaskedLM.from_pretrained(\"bert-base-uncased\")\n model.eval()\n\n ######################################################################\n # Predict all tokens with pytorch\n # -------------------------------\n\n with torch.no_grad():\n torch_preds = model(tokens_tensor, segments_tensors)\n\n ######################################################################\n # Make TorchScripted model via jit trace\n # --------------------------------------\n\n scripted_model = torch.jit.trace(model, (tokens_tensor, segments_tensors)).eval()\n\n ######################################################################\n # Import the graph to Relay\n # -------------------------\n # Convert PyTorch graph to Relay graph. The input name can be arbitrary.\n\n input_1 = \"input_ids\"\n input_2 = \"input.2\"\n shape_list = [(input_1, list(tokens_tensor.shape)), (input_2, list(segments_tensors.shape))]\n\n mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)\n\n ######################################################################\n # Compile the model with relay\n # ----------------------------\n\n target = \"llvm\"\n with tvm.transform.PassContext(opt_level=3):\n relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)\n\n ######################################################################\n # Execute on TVM\n # --------------\n\n dev = tvm.device(target, 0)\n relay_model = graph_executor.create(relay_graph, relay_lib, dev)\n relay_model.set_input(**relay_params)\n relay_model.set_input(input_1, tokens_tensor)\n relay_model.set_input(input_2, segments_tensors)\n relay_model.run()\n compiled_output = relay_model.get_output(0).numpy()\n\n ######################################################################\n # Validate the outputs\n # --------------------\n # Compare the torch and tvm outputs\n\n tvm.testing.assert_allclose(torch_preds, compiled_output, rtol=1e-3, atol=1e-3)\n\n ######################################################################\n # Process the output\n # ------------------\n # Process the model output to token.\n\n # Torch output to token\n torch_pred_idx = torch.argmax(torch_preds[0, masked_index]).item()\n torch_pred_token = tokenizer.convert_ids_to_tokens([torch_pred_idx])[0]\n\n # TVM output to token\n tvm_pred_idx = compiled_output[0, masked_index].argmax()\n tvm_pred_token = tokenizer.convert_ids_to_tokens([tvm_pred_idx])[0]\n\n assert torch_pred_idx == tvm_pred_idx\n assert torch_pred_token == tvm_pred_token\n\n # Print the outputs\n print(\"Torch top-1 id: {}, token: {}\".format(torch_pred_idx, torch_pred_token))\n print(\"TVM top-1 id: {}, token: {}\".format(tvm_pred_idx, tvm_pred_token))\n\n\ndef test_convert_torch_script_with_input_types():\n def model_fn(x, y):\n x = x.to(dtype=torch.int32)\n y = x + y\n return y\n\n ishape = (4, 5)\n input_x = torch.rand(ishape, dtype=torch.float32)\n input_y = torch.randint(low=0, high=100, size=ishape, dtype=torch.int32)\n inputs = [input_x, input_y]\n script_module = torch.jit.trace(model_fn, inputs)\n\n fname = \"tmp.pt\"\n torch.jit.save(script_module, fname)\n loaded = torch.jit.load(fname)\n os.remove(fname)\n\n verify_model(loaded.eval(), input_data=inputs)\n\n def expected(x_shape, y_shape):\n # use a fixed order of args so alpha equal check can pass\n x = relay.var(\"x\", shape=x_shape, dtype=\"float32\")\n y = relay.var(\"y\", shape=y_shape, dtype=\"int32\")\n args = [x, y]\n x1 = relay.cast(x, \"int32\")\n y1 = relay.add(x1, y)\n mod = tvm.IRModule.from_expr(relay.Function(args, y1))\n return mod[\"main\"]\n\n input_infos = [(\"input0\", (ishape, \"float\")), (\"input1\", (ishape, \"int\"))]\n mod, params = relay.frontend.from_pytorch(loaded, input_infos)\n\n expected_mod = expected(ishape, ishape)\n\n assert tvm.ir.structural_equal(expected_mod, mod[\"main\"], map_free_vars=True)\n\n\ndef test_bincount():\n def test_fn(x, weights=None):\n return torch.bincount(x, weights=weights)\n\n inp = torch.randint(0, 100, (10000,), dtype=torch.int64)\n weights = torch.linspace(0, 100, steps=10000)\n\n targets = [\"llvm\", \"cuda\"]\n verify_trace_model(test_fn, [inp], targets)\n verify_trace_model(test_fn, [inp, weights], targets)\n\n\ndef test_hard_swish():\n examples = [torch.rand(8).float(), torch.rand(8, 10).float(), torch.rand(1, 1, 10).float()]\n for input in examples:\n verify_model(torch.nn.Hardswish().eval(), input_data=input)\n verify_model(torch.nn.Hardswish(inplace=True).eval(), input_data=input)\n\n\ndef test_hard_sigmoid():\n examples = [torch.rand(8).float(), torch.rand(8, 10).float(), torch.rand(1, 1, 10).float()]\n for input in examples:\n verify_model(torch.nn.Hardsigmoid().eval(), input_data=input)\n verify_model(torch.nn.Hardsigmoid(inplace=True).eval(), input_data=input)\n\n\ndef test_cumsum():\n def test_fn(dim, dtype=None):\n return lambda x: torch.cumsum(x, dim=dim, dtype=dtype)\n\n inp = torch.randint(0, 100, (10000,), dtype=torch.int32)\n verify_model(test_fn(0), [inp])\n verify_model(test_fn(0), [inp.to(torch.int64)])\n verify_model(test_fn(0, dtype=torch.int64), [inp.to(torch.int64)])\n\n inp = torch.randn((100, 100), dtype=torch.float32)\n verify_model(test_fn(dim=0, dtype=torch.float64), [inp])\n verify_model(test_fn(dim=1), [inp])\n\n inp = torch.randn((100, 100), dtype=torch.float32) > 0.5\n verify_model(test_fn(dim=0, dtype=torch.int32), [inp])\n\n\ndef test_masked_fill():\n def test_fn(x, mask):\n return torch.masked_fill(x, mask, 0.0)\n\n inp = torch.randn(100, 100)\n verify_model(test_fn, [inp, inp > 0.5])\n verify_model(test_fn, [inp.to(torch.float64), inp > 0.5])\n\n\ndef test_transformer():\n model = torch.nn.Transformer(d_model=256, nhead=8, num_encoder_layers=6, num_decoder_layers=6)\n model = model.eval()\n src = torch.rand((10, 32, 256))\n tgt = torch.rand((20, 32, 256))\n verify_model(model.eval(), input_data=[src, tgt])\n\n\ndef test_argsort():\n def test_fn(dim, descending):\n return lambda x: torch.argsort(x, dim=dim, descending=descending)\n\n inp = torch.randn(100)\n verify_model(test_fn(0, True), [inp])\n verify_model(test_fn(0, False), [inp])\n\n inp = torch.randn(100, 100)\n verify_model(test_fn(0, True), [inp])\n verify_model(test_fn(0, False), [inp])\n verify_model(test_fn(1, True), [inp])\n verify_model(test_fn(1, False), [inp])\n\n\ndef test_sort():\n def test_fn(dim, descending):\n return lambda x: torch.sort(x, dim=dim, descending=descending)\n\n inp = torch.randn(100)\n verify_model(test_fn(0, True), [inp])\n verify_model(test_fn(-1, False), [inp])\n\n inp = torch.randn(100, 100)\n verify_model(test_fn(0, True), [inp])\n verify_model(test_fn(-2, False), [inp])\n verify_model(test_fn(1, True), [inp])\n verify_model(test_fn(-1, False), [inp])\n\n\ndef test_logical_and():\n def test_fn(x, y):\n return torch.logical_and(x, y)\n\n a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)\n b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)\n verify_model(test_fn, [a, b])\n\n a = torch.tensor([True, False, True])\n b = torch.tensor([True, False, False])\n verify_model(test_fn, [a, b])\n\n\ndef test_masked_select():\n def test_fn(x, mask):\n return torch.masked_select(x, mask)\n\n for shape in [(10,), (3, 4), (16, 32, 64)]:\n x = torch.randn(*shape)\n mask = x.ge(0.5)\n verify_trace_model(test_fn, [x, mask], [\"llvm\", \"cuda\", \"nvptx\"])\n\n\ndef test_unique():\n def test_fn(is_sorted, return_inverse, return_counts):\n return lambda x: torch.unique(x, is_sorted, return_inverse, return_counts)\n\n in_data = torch.randint(0, 20, (10,), dtype=torch.int32)\n targets = [\"llvm\", \"cuda\", \"nvptx\"]\n verify_trace_model(test_fn(True, True, True), [in_data], targets)\n verify_trace_model(test_fn(True, False, True), [in_data], targets)\n verify_trace_model(test_fn(True, True, False), [in_data], targets)\n verify_trace_model(test_fn(True, False, True), [in_data], targets)\n in_data = torch.randint(0, 20, (20,), dtype=torch.int64)\n verify_trace_model(test_fn(True, True, True), [in_data], targets)\n verify_trace_model(test_fn(True, False, True), [in_data], targets)\n verify_trace_model(test_fn(True, True, False), [in_data], targets)\n verify_trace_model(test_fn(True, False, True), [in_data], targets)\n\n\ndef test_forward_nll_loss():\n torch.set_grad_enabled(False)\n N, C = 10, 3\n predictions = torch.rand((N, C)).float()\n targets = torch.randint(0, 3, (N,))\n weights = torch.tensor([1, 2, 3]).float()\n verify_model(torch.nn.NLLLoss().eval(), input_data=[predictions, targets])\n verify_model(torch.nn.NLLLoss(weight=weights).eval(), input_data=[predictions, targets])\n verify_model(torch.nn.NLLLoss(ignore_index=1).eval(), input_data=[predictions, targets])\n verify_model(torch.nn.NLLLoss(reduction=\"sum\").eval(), input_data=[predictions, targets])\n verify_model(torch.nn.NLLLoss(reduction=\"none\").eval(), input_data=[predictions, targets])\n\n # multidimension nll loss (aten::nll_loss2d)\n d1, d2 = 2, 3\n predictions = torch.rand((N, C, d1, d2)).float()\n targets = torch.randint(0, 3, (N, d1, d2))\n verify_model(torch.nn.NLLLoss().eval(), input_data=[predictions, targets])\n verify_model(torch.nn.NLLLoss(weight=weights).eval(), input_data=[predictions, targets])\n verify_model(torch.nn.NLLLoss(ignore_index=1).eval(), input_data=[predictions, targets])\n verify_model(torch.nn.NLLLoss(reduction=\"sum\").eval(), input_data=[predictions, targets])\n verify_model(torch.nn.NLLLoss(reduction=\"none\").eval(), input_data=[predictions, targets])\n\n\[email protected]_gpu\ndef test_forward_flip():\n torch.set_grad_enabled(False)\n\n class Flip(Module):\n def __init__(self, axis=0):\n super().__init__()\n self.axis = axis\n\n def forward(self, x):\n return x.flip([self.axis])\n\n input = torch.randn(2, 3, 4)\n verify_model(Flip(axis=0), input_data=input)\n verify_model(Flip(axis=1), input_data=input)\n verify_model(Flip(axis=2), input_data=input)\n verify_model(Flip(axis=-1), input_data=input)\n\n\nif __name__ == \"__main__\":\n # some structural tests\n test_forward_traced_function()\n test_forward_dtypes()\n test_weight_names()\n test_duplicate_weight_use()\n\n # Single operator tests\n test_forward_pixel_shuffle()\n test_forward_add()\n test_forward_subtract()\n test_forward_multiply()\n test_forward_matmul()\n test_forward_rsub()\n test_forward_onehot()\n test_forward_embedding()\n test_forward_reshape()\n test_forward_reciprocal()\n test_forward_repeat()\n test_forward_repeat_interleave()\n test_forward_squeeze()\n test_forward_unsqueeze()\n test_forward_concatenate()\n test_forward_reduce_sum()\n test_forward_reduce_prod()\n test_forward_argmin()\n test_forward_argmax()\n test_forward_norm()\n test_forward_frobenius_norm()\n test_forward_std()\n test_forward_variance()\n test_forward_relu()\n test_forward_prelu()\n test_forward_leakyrelu()\n test_forward_elu()\n test_forward_celu()\n test_forward_gelu()\n test_forward_selu()\n test_forward_log_sigmoid()\n test_forward_adaptiveavgpool()\n test_forward_maxpool2d()\n test_forward_maxpool1d()\n test_forward_maxpool3d()\n test_forward_hardtanh()\n test_forward_conv()\n test_forward_conv_transpose()\n test_forward_threshold()\n test_forward_contiguous()\n test_forward_batchnorm()\n test_forward_instancenorm()\n test_forward_layernorm()\n test_forward_groupnorm()\n test_forward_transpose()\n test_forward_size()\n test_forward_view()\n test_forward_select()\n test_forward_take()\n test_forward_topk()\n test_forward_where()\n test_forward_addcdiv()\n test_forward_addcmul()\n test_forward_true_divide()\n test_forward_is_floating_point()\n test_forward_clone()\n test_forward_softplus()\n test_forward_softsign()\n test_forward_logsoftmax()\n test_forward_sigmoid()\n test_forward_dense()\n test_forward_avgpool1d()\n test_forward_avgpool2d()\n test_forward_avgpool3d()\n test_forward_dropout()\n test_forward_slice()\n test_forward_narrow()\n test_forward_mean()\n test_forward_expand()\n test_forward_pow()\n test_forward_unary()\n test_forward_clamp()\n test_forward_clamp_()\n test_forward_logical_not()\n test_forward_bitwise_not()\n test_forward_bitwise_xor()\n test_forward_logical_xor()\n test_forward_isfinite()\n test_forward_isnan()\n test_forward_isinf()\n test_forward_ones()\n test_forward_ones_like()\n test_forward_zeros()\n test_forward_zeros_like()\n test_forward_full()\n test_forward_full_like()\n test_forward_linspace()\n test_forward_arange()\n test_forward_mesh_grid()\n test_forward_chunk()\n test_forward_split()\n test_forward_gather()\n test_upsample()\n test_forward_upsample3d()\n test_forward_nms()\n test_forward_roi_align()\n test_to()\n test_flatten()\n test_type_as()\n test_forward_functional_pad()\n test_forward_zero_pad2d()\n test_forward_constant_pad1d()\n test_forward_constant_pad2d()\n test_forward_constant_pad3d()\n test_forward_reflection_pad1d()\n test_forward_reflection_pad2d()\n test_forward_replication_pad1d()\n test_forward_replication_pad2d()\n test_forward_replication_pad3d()\n test_adaptive_pool3d()\n test_conv3d()\n test_conv3d_transpose()\n test_forward_index()\n test_min_max()\n test_logsumexp()\n test_stack()\n test_stack_dynamic()\n test_forward_unbind()\n test_forward_nonzero()\n test_forward_scatter()\n test_forward_index_put()\n test_numel()\n test_bincount()\n test_cumsum()\n test_masked_fill()\n test_transformer()\n test_sort()\n test_argsort()\n test_logical_and()\n test_masked_select()\n test_unique()\n test_hard_swish()\n test_hard_sigmoid()\n test_forward_nll_loss()\n test_forward_flip()\n\n # Model tests\n test_resnet18()\n test_squeezenet1_0()\n test_squeezenet1_1()\n test_densenet121()\n # disable inception test for now, since loading it takes ~5min on torchvision-0.5 due to scipy bug\n # See https://discuss.pytorch.org/t/torchvisions-inception-v3-takes-much-longer-to-load-than-other-models/68756\n # test_inception_v3()\n test_googlenet()\n test_mnasnet0_5()\n test_mobilenet_v2()\n\n test_custom_conversion_map()\n\n test_segmentation_models()\n test_3d_models()\n\n # Quantization test\n from qnn_test import test_quantized_imagenet, test_quantized_modules\n\n test_quantized_modules()\n test_quantized_imagenet()\n\n # Test simple conditionals and loop\n test_control_flow()\n test_simple_rnn()\n\n # More complex recurrent models\n from test_lstm import test_custom_lstm\n\n test_custom_lstm()\n\n # Test bert model\n test_forward_pretrained_bert_base_uncased()\n\n # Test convert torch script(jit) with specific inputs' types\n test_convert_torch_script_with_input_types()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=invalid-name,unnecessary-comprehension\n\"\"\" TVM testing utilities\n\nTesting Markers\n***************\n\nWe use pytest markers to specify the requirements of test functions. Currently\nthere is a single distinction that matters for our testing environment: does\nthe test require a gpu. For tests that require just a gpu or just a cpu, we\nhave the decorator :py:func:`requires_gpu` that enables the test when a gpu is\navailable. To avoid running tests that don't require a gpu on gpu nodes, this\ndecorator also sets the pytest marker `gpu` so we can use select the gpu subset\nof tests (using `pytest -m gpu`).\n\nUnfortunately, many tests are written like this:\n\n.. python::\n\n def test_something():\n for target in all_targets():\n do_something()\n\nThe test uses both gpu and cpu targets, so the test needs to be run on both cpu\nand gpu nodes. But we still want to only run the cpu targets on the cpu testing\nnode. The solution is to mark these tests with the gpu marker so they will be\nrun on the gpu nodes. But we also modify all_targets (renamed to\nenabled_targets) so that it only returns gpu targets on gpu nodes and cpu\ntargets on cpu nodes (using an environment variable).\n\nInstead of using the all_targets function, future tests that would like to\ntest against a variety of targets should use the\n:py:func:`tvm.testing.parametrize_targets` functionality. This allows us\ngreater control over which targets are run on which testing nodes.\n\nIf in the future we want to add a new type of testing node (for example\nfpgas), we need to add a new marker in `tests/python/pytest.ini` and a new\nfunction in this module. Then targets using this node should be added to the\n`TVM_TEST_TARGETS` environment variable in the CI.\n\"\"\"\nimport collections\nimport copy\nimport functools\nimport logging\nimport os\nimport sys\nimport time\nimport pickle\nimport pytest\nimport _pytest\nimport numpy as np\nimport tvm\nimport tvm.arith\nimport tvm.tir\nimport tvm.te\nimport tvm._ffi\nfrom tvm.contrib import nvcc\nfrom tvm.error import TVMError\n\n\ndef assert_allclose(actual, desired, rtol=1e-7, atol=1e-7):\n \"\"\"Version of np.testing.assert_allclose with `atol` and `rtol` fields set\n in reasonable defaults.\n\n Arguments `actual` and `desired` are not interchangeable, since the function\n compares the `abs(actual-desired)` with `atol+rtol*abs(desired)`. Since we\n often allow `desired` to be close to zero, we generally want non-zero `atol`.\n \"\"\"\n actual = np.asanyarray(actual)\n desired = np.asanyarray(desired)\n np.testing.assert_allclose(actual.shape, desired.shape)\n np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol, verbose=True)\n\n\ndef check_numerical_grads(\n function, input_values, grad_values, function_value=None, delta=1e-3, atol=1e-2, rtol=0.1\n):\n \"\"\"A helper function that checks that numerical gradients of a function are\n equal to gradients computed in some different way (analytical gradients).\n\n Numerical gradients are computed using finite difference approximation. To\n reduce the number of function evaluations, the number of points used is\n gradually increased if the error value is too high (up to 5 points).\n\n Parameters\n ----------\n function\n A function that takes inputs either as positional or as keyword\n arguments (either `function(*input_values)` or `function(**input_values)`\n should be correct) and returns a scalar result. Should accept numpy\n ndarrays.\n\n input_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]\n A list of values or a dict assigning values to variables. Represents the\n point at which gradients should be computed.\n\n grad_values : Dict[str, numpy.ndarray] or List[numpy.ndarray]\n Gradients computed using a different method.\n\n function_value : float, optional\n Should be equal to `function(**input_values)`.\n\n delta : float, optional\n A small number used for numerical computation of partial derivatives.\n The default 1e-3 is a good choice for float32.\n\n atol : float, optional\n Absolute tolerance. Gets multiplied by `sqrt(n)` where n is the size of a\n gradient.\n\n rtol : float, optional\n Relative tolerance.\n \"\"\"\n # If input_values is a list then function accepts positional arguments\n # In this case transform it to a function taking kwargs of the form {\"0\": ..., \"1\": ...}\n if not isinstance(input_values, dict):\n input_len = len(input_values)\n input_values = {str(idx): val for idx, val in enumerate(input_values)}\n\n def _function(_input_len=input_len, _orig_function=function, **kwargs):\n return _orig_function(*(kwargs[str(i)] for i in range(input_len)))\n\n function = _function\n\n grad_values = {str(idx): val for idx, val in enumerate(grad_values)}\n\n if function_value is None:\n function_value = function(**input_values)\n\n # a helper to modify j-th element of val by a_delta\n def modify(val, j, a_delta):\n val = val.copy()\n val.reshape(-1)[j] = val.reshape(-1)[j] + a_delta\n return val\n\n # numerically compute a partial derivative with respect to j-th element of the var `name`\n def derivative(x_name, j, a_delta):\n modified_values = {\n n: modify(val, j, a_delta) if n == x_name else val for n, val in input_values.items()\n }\n return (function(**modified_values) - function_value) / a_delta\n\n def compare_derivative(j, n_der, grad):\n der = grad.reshape(-1)[j]\n return np.abs(n_der - der) < atol + rtol * np.abs(n_der)\n\n for x_name, grad in grad_values.items():\n if grad.shape != input_values[x_name].shape:\n raise AssertionError(\n \"Gradient wrt '{}' has unexpected shape {}, expected {} \".format(\n x_name, grad.shape, input_values[x_name].shape\n )\n )\n\n ngrad = np.zeros_like(grad)\n\n wrong_positions = []\n\n # compute partial derivatives for each position in this variable\n for j in range(np.prod(grad.shape)):\n # forward difference approximation\n nder = derivative(x_name, j, delta)\n\n # if the derivative is not equal to the analytical one, try to use more\n # precise and expensive methods\n if not compare_derivative(j, nder, grad):\n # central difference approximation\n nder = (derivative(x_name, j, -delta) + nder) / 2\n\n if not compare_derivative(j, nder, grad):\n # central difference approximation using h = delta/2\n cnder2 = (\n derivative(x_name, j, delta / 2) + derivative(x_name, j, -delta / 2)\n ) / 2\n # five-point derivative\n nder = (4 * cnder2 - nder) / 3\n\n # if the derivatives still don't match, add this position to the\n # list of wrong positions\n if not compare_derivative(j, nder, grad):\n wrong_positions.append(np.unravel_index(j, grad.shape))\n\n ngrad.reshape(-1)[j] = nder\n\n wrong_percentage = int(100 * len(wrong_positions) / np.prod(grad.shape))\n\n dist = np.sqrt(np.sum((ngrad - grad) ** 2))\n grad_norm = np.sqrt(np.sum(ngrad ** 2))\n\n if not (np.isfinite(dist) and np.isfinite(grad_norm)):\n raise ValueError(\n \"NaN or infinity detected during numerical gradient checking wrt '{}'\\n\"\n \"analytical grad = {}\\n numerical grad = {}\\n\".format(x_name, grad, ngrad)\n )\n\n # we multiply atol by this number to make it more universal for different sizes\n sqrt_n = np.sqrt(float(np.prod(grad.shape)))\n\n if dist > atol * sqrt_n + rtol * grad_norm:\n raise AssertionError(\n \"Analytical and numerical grads wrt '{}' differ too much\\n\"\n \"analytical grad = {}\\n numerical grad = {}\\n\"\n \"{}% of elements differ, first 10 of wrong positions: {}\\n\"\n \"distance > atol*sqrt(n) + rtol*grad_norm\\n\"\n \"distance {} > {}*{} + {}*{}\".format(\n x_name,\n grad,\n ngrad,\n wrong_percentage,\n wrong_positions[:10],\n dist,\n atol,\n sqrt_n,\n rtol,\n grad_norm,\n )\n )\n\n max_diff = np.max(np.abs(ngrad - grad))\n avg_diff = np.mean(np.abs(ngrad - grad))\n logging.info(\n \"Numerical grad test wrt '%s' of shape %s passes, \"\n \"dist = %f, max_diff = %f, avg_diff = %f\",\n x_name,\n grad.shape,\n dist,\n max_diff,\n avg_diff,\n )\n\n\ndef assert_prim_expr_equal(lhs, rhs):\n \"\"\"Assert lhs and rhs equals to each iother.\n\n Parameters\n ----------\n lhs : tvm.tir.PrimExpr\n The left operand.\n\n rhs : tvm.tir.PrimExpr\n The left operand.\n \"\"\"\n ana = tvm.arith.Analyzer()\n res = ana.simplify(lhs - rhs)\n equal = isinstance(res, tvm.tir.IntImm) and res.value == 0\n if not equal:\n raise ValueError(\"{} and {} are not equal\".format(lhs, rhs))\n\n\ndef check_bool_expr_is_true(bool_expr, vranges, cond=None):\n \"\"\"Check that bool_expr holds given the condition cond\n for every value of free variables from vranges.\n\n for example, 2x > 4y solves to x > 2y given x in (0, 10) and y in (0, 10)\n here bool_expr is x > 2y, vranges is {x: (0, 10), y: (0, 10)}, cond is 2x > 4y\n We creates iterations to check,\n for x in range(10):\n for y in range(10):\n assert !(2x > 4y) || (x > 2y)\n\n Parameters\n ----------\n bool_expr : tvm.ir.PrimExpr\n Boolean expression to check\n vranges: Dict[tvm.tir.expr.Var, tvm.ir.Range]\n Free variables and their ranges\n cond: tvm.ir.PrimExpr\n extra conditions needs to be satisfied.\n \"\"\"\n if cond is not None:\n bool_expr = tvm.te.any(tvm.tir.Not(cond), bool_expr)\n\n def _run_expr(expr, vranges):\n \"\"\"Evaluate expr for every value of free variables\n given by vranges and return the tensor of results.\n \"\"\"\n\n def _compute_body(*us):\n vmap = {v: u + r.min for (v, r), u in zip(vranges.items(), us)}\n return tvm.tir.stmt_functor.substitute(expr, vmap)\n\n A = tvm.te.compute([r.extent.value for v, r in vranges.items()], _compute_body)\n args = [tvm.nd.empty(A.shape, A.dtype)]\n sch = tvm.te.create_schedule(A.op)\n mod = tvm.build(sch, [A])\n mod(*args)\n return args[0].numpy()\n\n res = _run_expr(bool_expr, vranges)\n if not np.all(res):\n indices = list(np.argwhere(res == 0)[0])\n counterex = [(str(v), i + r.min) for (v, r), i in zip(vranges.items(), indices)]\n counterex = sorted(counterex, key=lambda x: x[0])\n counterex = \", \".join([v + \" = \" + str(i) for v, i in counterex])\n ana = tvm.arith.Analyzer()\n raise AssertionError(\n \"Expression {}\\nis not true on {}\\n\"\n \"Counterexample: {}\".format(ana.simplify(bool_expr), vranges, counterex)\n )\n\n\ndef check_int_constraints_trans_consistency(constraints_trans, vranges=None):\n \"\"\"Check IntConstraintsTransform is a bijective transformation.\n\n Parameters\n ----------\n constraints_trans : arith.IntConstraintsTransform\n Integer constraints transformation\n vranges: Dict[tvm.tir.Var, tvm.ir.Range]\n Free variables and their ranges\n \"\"\"\n if vranges is None:\n vranges = {}\n\n def _check_forward(constraints1, constraints2, varmap, backvarmap):\n ana = tvm.arith.Analyzer()\n all_vranges = vranges.copy()\n all_vranges.update({v: r for v, r in constraints1.ranges.items()})\n\n # Check that the transformation is injective\n cond_on_vars = tvm.tir.const(1, \"bool\")\n for v in constraints1.variables:\n if v in varmap:\n # variable mapping is consistent\n v_back = ana.simplify(tvm.tir.stmt_functor.substitute(varmap[v], backvarmap))\n cond_on_vars = tvm.te.all(cond_on_vars, v == v_back)\n # Also we have to check that the new relations are true when old relations are true\n cond_subst = tvm.tir.stmt_functor.substitute(\n tvm.te.all(tvm.tir.const(1, \"bool\"), *constraints2.relations), backvarmap\n )\n # We have to include relations from vranges too\n for v in constraints2.variables:\n if v in constraints2.ranges:\n r = constraints2.ranges[v]\n range_cond = tvm.te.all(v >= r.min, v < r.min + r.extent)\n range_cond = tvm.tir.stmt_functor.substitute(range_cond, backvarmap)\n cond_subst = tvm.te.all(cond_subst, range_cond)\n cond_subst = ana.simplify(cond_subst)\n check_bool_expr_is_true(\n tvm.te.all(cond_subst, cond_on_vars),\n all_vranges,\n cond=tvm.te.all(tvm.tir.const(1, \"bool\"), *constraints1.relations),\n )\n\n _check_forward(\n constraints_trans.src,\n constraints_trans.dst,\n constraints_trans.src_to_dst,\n constraints_trans.dst_to_src,\n )\n _check_forward(\n constraints_trans.dst,\n constraints_trans.src,\n constraints_trans.dst_to_src,\n constraints_trans.src_to_dst,\n )\n\n\ndef _get_targets(target_str=None):\n if target_str is None:\n target_str = os.environ.get(\"TVM_TEST_TARGETS\", \"\")\n\n if len(target_str) == 0:\n target_str = DEFAULT_TEST_TARGETS\n\n target_names = set(t.strip() for t in target_str.split(\";\") if t.strip())\n\n targets = []\n for target in target_names:\n target_kind = target.split()[0]\n is_enabled = tvm.runtime.enabled(target_kind)\n is_runnable = is_enabled and tvm.device(target_kind).exist\n targets.append(\n {\n \"target\": target,\n \"target_kind\": target_kind,\n \"is_enabled\": is_enabled,\n \"is_runnable\": is_runnable,\n }\n )\n\n if all(not t[\"is_runnable\"] for t in targets):\n if tvm.runtime.enabled(\"llvm\"):\n logging.warning(\n \"None of the following targets are supported by this build of TVM: %s.\"\n \" Try setting TVM_TEST_TARGETS to a supported target. Defaulting to llvm.\",\n target_str,\n )\n return _get_targets(\"llvm\")\n\n raise TVMError(\n \"None of the following targets are supported by this build of TVM: %s.\"\n \" Try setting TVM_TEST_TARGETS to a supported target.\"\n \" Cannot default to llvm, as it is not enabled.\" % target_str\n )\n\n return targets\n\n\nDEFAULT_TEST_TARGETS = (\n \"llvm;cuda;opencl;metal;rocm;vulkan -from_device=0;nvptx;\"\n \"llvm -device=arm_cpu;opencl -device=mali,aocl_sw_emu\"\n)\n\n\ndef device_enabled(target):\n \"\"\"Check if a target should be used when testing.\n\n It is recommended that you use :py:func:`tvm.testing.parametrize_targets`\n instead of manually checking if a target is enabled.\n\n This allows the user to control which devices they are testing against. In\n tests, this should be used to check if a device should be used when said\n device is an optional part of the test.\n\n Parameters\n ----------\n target : str\n Target string to check against\n\n Returns\n -------\n bool\n Whether or not the device associated with this target is enabled.\n\n Example\n -------\n >>> @tvm.testing.uses_gpu\n >>> def test_mytest():\n >>> for target in [\"cuda\", \"llvm\"]:\n >>> if device_enabled(target):\n >>> test_body...\n\n Here, `test_body` will only be reached by with `target=\"cuda\"` on gpu test\n nodes and `target=\"llvm\"` on cpu test nodes.\n \"\"\"\n assert isinstance(target, str), \"device_enabled requires a target as a string\"\n # only check if device name is found, sometime there are extra flags\n target_kind = target.split(\" \")[0]\n return any(target_kind == t[\"target_kind\"] for t in _get_targets() if t[\"is_runnable\"])\n\n\ndef enabled_targets():\n \"\"\"Get all enabled targets with associated devices.\n\n In most cases, you should use :py:func:`tvm.testing.parametrize_targets` instead of\n this function.\n\n In this context, enabled means that TVM was built with support for\n this target, the target name appears in the TVM_TEST_TARGETS\n environment variable, and a suitable device for running this\n target exists. If TVM_TEST_TARGETS is not set, it defaults to\n variable DEFAULT_TEST_TARGETS in this module.\n\n If you use this function in a test, you **must** decorate the test with\n :py:func:`tvm.testing.uses_gpu` (otherwise it will never be run on the gpu).\n\n Returns\n -------\n targets: list\n A list of pairs of all enabled devices and the associated context\n\n \"\"\"\n return [(t[\"target\"], tvm.device(t[\"target\"])) for t in _get_targets() if t[\"is_runnable\"]]\n\n\ndef _compose(args, decs):\n \"\"\"Helper to apply multiple markers\"\"\"\n if len(args) > 0:\n f = args[0]\n for d in reversed(decs):\n f = d(f)\n return f\n return decs\n\n\ndef uses_gpu(*args):\n \"\"\"Mark to differentiate tests that use the GPU in some capacity.\n\n These tests will be run on CPU-only test nodes and on test nodes with GPUs.\n To mark a test that must have a GPU present to run, use\n :py:func:`tvm.testing.requires_gpu`.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _uses_gpu = [pytest.mark.gpu]\n return _compose(args, _uses_gpu)\n\n\ndef requires_gpu(*args):\n \"\"\"Mark a test as requiring a GPU to run.\n\n Tests with this mark will not be run unless a gpu is present.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_gpu = [\n pytest.mark.skipif(\n not tvm.cuda().exist\n and not tvm.rocm().exist\n and not tvm.opencl().exist\n and not tvm.metal().exist\n and not tvm.vulkan().exist,\n reason=\"No GPU present\",\n ),\n *uses_gpu(),\n ]\n return _compose(args, _requires_gpu)\n\n\ndef requires_cuda(*args):\n \"\"\"Mark a test as requiring the CUDA runtime.\n\n This also marks the test as requiring a cuda gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_cuda = [\n pytest.mark.cuda,\n pytest.mark.skipif(not device_enabled(\"cuda\"), reason=\"CUDA support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_cuda)\n\n\ndef requires_nvptx(*args):\n \"\"\"Mark a test as requiring the NVPTX compilation on the CUDA runtime\n\n This also marks the test as requiring a cuda gpu, and requiring\n LLVM support.\n\n Parameters\n ----------\n f : function\n Function to mark\n\n \"\"\"\n _requires_nvptx = [\n pytest.mark.skipif(not device_enabled(\"nvptx\"), reason=\"NVPTX support not enabled\"),\n *requires_llvm(),\n *requires_gpu(),\n ]\n return _compose(args, _requires_nvptx)\n\n\ndef requires_cudagraph(*args):\n \"\"\"Mark a test as requiring the CUDA Graph Feature\n\n This also marks the test as requiring cuda\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_cudagraph = [\n pytest.mark.skipif(\n not nvcc.have_cudagraph(), reason=\"CUDA Graph is not supported in this environment\"\n ),\n *requires_cuda(),\n ]\n return _compose(args, _requires_cudagraph)\n\n\ndef requires_opencl(*args):\n \"\"\"Mark a test as requiring the OpenCL runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_opencl = [\n pytest.mark.opencl,\n pytest.mark.skipif(not device_enabled(\"opencl\"), reason=\"OpenCL support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_opencl)\n\n\ndef requires_rocm(*args):\n \"\"\"Mark a test as requiring the rocm runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_rocm = [\n pytest.mark.rocm,\n pytest.mark.skipif(not device_enabled(\"rocm\"), reason=\"rocm support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_rocm)\n\n\ndef requires_metal(*args):\n \"\"\"Mark a test as requiring the metal runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_metal = [\n pytest.mark.metal,\n pytest.mark.skipif(not device_enabled(\"metal\"), reason=\"metal support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_metal)\n\n\ndef requires_vulkan(*args):\n \"\"\"Mark a test as requiring the vulkan runtime.\n\n This also marks the test as requiring a gpu.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_vulkan = [\n pytest.mark.vulkan,\n pytest.mark.skipif(not device_enabled(\"vulkan\"), reason=\"vulkan support not enabled\"),\n *requires_gpu(),\n ]\n return _compose(args, _requires_vulkan)\n\n\ndef requires_tensorcore(*args):\n \"\"\"Mark a test as requiring a tensorcore to run.\n\n Tests with this mark will not be run unless a tensorcore is present.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_tensorcore = [\n pytest.mark.tensorcore,\n pytest.mark.skipif(\n not tvm.cuda().exist or not nvcc.have_tensorcore(tvm.cuda(0).compute_version),\n reason=\"No tensorcore present\",\n ),\n *requires_gpu(),\n ]\n return _compose(args, _requires_tensorcore)\n\n\ndef requires_llvm(*args):\n \"\"\"Mark a test as requiring llvm to run.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_llvm = [\n pytest.mark.llvm,\n pytest.mark.skipif(not device_enabled(\"llvm\"), reason=\"LLVM support not enabled\"),\n ]\n return _compose(args, _requires_llvm)\n\n\ndef requires_micro(*args):\n \"\"\"Mark a test as requiring microTVM to run.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_micro = [\n pytest.mark.skipif(\n tvm.support.libinfo().get(\"USE_MICRO\", \"OFF\") != \"ON\",\n reason=\"MicroTVM support not enabled. Set USE_MICRO=ON in config.cmake to enable.\",\n )\n ]\n return _compose(args, _requires_micro)\n\n\ndef requires_rpc(*args):\n \"\"\"Mark a test as requiring rpc to run.\n\n Parameters\n ----------\n f : function\n Function to mark\n \"\"\"\n _requires_rpc = [\n pytest.mark.skipif(\n tvm.support.libinfo().get(\"USE_RPC\", \"OFF\") != \"ON\",\n reason=\"RPC support not enabled. Set USE_RPC=ON in config.cmake to enable.\",\n )\n ]\n return _compose(args, _requires_rpc)\n\n\ndef _target_to_requirement(target):\n # mapping from target to decorator\n if target.startswith(\"cuda\"):\n return requires_cuda()\n if target.startswith(\"rocm\"):\n return requires_rocm()\n if target.startswith(\"vulkan\"):\n return requires_vulkan()\n if target.startswith(\"nvptx\"):\n return requires_nvptx()\n if target.startswith(\"metal\"):\n return requires_metal()\n if target.startswith(\"opencl\"):\n return requires_opencl()\n if target.startswith(\"llvm\"):\n return requires_llvm()\n return []\n\n\ndef _pytest_target_params(targets, excluded_targets=None, xfail_targets=None):\n # Include unrunnable targets here. They get skipped by the\n # pytest.mark.skipif in _target_to_requirement(), showing up as\n # skipped tests instead of being hidden entirely.\n if targets is None:\n if excluded_targets is None:\n excluded_targets = set()\n\n if xfail_targets is None:\n xfail_targets = set()\n\n target_marks = []\n for t in _get_targets():\n # Excluded targets aren't included in the params at all.\n if t[\"target_kind\"] not in excluded_targets:\n\n # Known failing targets are included, but are marked\n # as expected to fail.\n extra_marks = []\n if t[\"target_kind\"] in xfail_targets:\n extra_marks.append(\n pytest.mark.xfail(\n reason='Known failing test for target \"{}\"'.format(t[\"target_kind\"])\n )\n )\n\n target_marks.append((t[\"target\"], extra_marks))\n\n else:\n target_marks = [(target, []) for target in targets]\n\n return [\n pytest.param(target, marks=_target_to_requirement(target) + extra_marks)\n for target, extra_marks in target_marks\n ]\n\n\ndef _auto_parametrize_target(metafunc):\n \"\"\"Automatically applies parametrize_targets\n\n Used if a test function uses the \"target\" fixture, but isn't\n already marked with @tvm.testing.parametrize_targets. Intended\n for use in the pytest_generate_tests() handler of a conftest.py\n file.\n\n \"\"\"\n if \"target\" in metafunc.fixturenames:\n parametrized_args = [\n arg.strip()\n for mark in metafunc.definition.iter_markers(\"parametrize\")\n for arg in mark.args[0].split(\",\")\n ]\n\n if \"target\" not in parametrized_args:\n # Check if the function is marked with either excluded or\n # known failing targets.\n excluded_targets = getattr(metafunc.function, \"tvm_excluded_targets\", [])\n xfail_targets = getattr(metafunc.function, \"tvm_known_failing_targets\", [])\n metafunc.parametrize(\n \"target\",\n _pytest_target_params(None, excluded_targets, xfail_targets),\n scope=\"session\",\n )\n\n\ndef parametrize_targets(*args):\n \"\"\"Parametrize a test over a specific set of targets.\n\n Use this decorator when you want your test to be run over a\n specific set of targets and devices. It is intended for use where\n a test is applicable only to a specific target, and is\n inapplicable to any others (e.g. verifying target-specific\n assembly code matches known assembly code). In most\n circumstances, :py:func:`tvm.testing.exclude_targets` or\n :py:func:`tvm.testing.known_failing_targets` should be used\n instead.\n\n If used as a decorator without arguments, the test will be\n parametrized over all targets in\n :py:func:`tvm.testing.enabled_targets`. This behavior is\n automatically enabled for any target that accepts arguments of\n ``target`` or ``dev``, so the explicit use of the bare decorator\n is no longer needed, and is maintained for backwards\n compatibility.\n\n Parameters\n ----------\n f : function\n Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, dev)`:,\n where `xxxxxxxxx` is any name.\n targets : list[str], optional\n Set of targets to run against. If not supplied,\n :py:func:`tvm.testing.enabled_targets` will be used.\n\n Example\n -------\n >>> @tvm.testing.parametrize_targets(\"llvm\", \"cuda\")\n >>> def test_mytest(target, dev):\n >>> ... # do something\n \"\"\"\n\n def wrap(targets):\n def func(f):\n return pytest.mark.parametrize(\n \"target\", _pytest_target_params(targets), scope=\"session\"\n )(f)\n\n return func\n\n if len(args) == 1 and callable(args[0]):\n return wrap(None)(args[0])\n return wrap(args)\n\n\ndef exclude_targets(*args):\n \"\"\"Exclude a test from running on a particular target.\n\n Use this decorator when you want your test to be run over a\n variety of targets and devices (including cpu and gpu devices),\n but want to exclude some particular target or targets. For\n example, a test may wish to be run against all targets in\n tvm.testing.enabled_targets(), except for a particular target that\n does not support the capabilities.\n\n Applies pytest.mark.skipif to the targets given.\n\n Parameters\n ----------\n f : function\n Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, dev)`:,\n where `xxxxxxxxx` is any name.\n targets : list[str]\n Set of targets to exclude.\n\n Example\n -------\n >>> @tvm.testing.exclude_targets(\"cuda\")\n >>> def test_mytest(target, dev):\n >>> ... # do something\n\n Or\n\n >>> @tvm.testing.exclude_targets(\"llvm\", \"cuda\")\n >>> def test_mytest(target, dev):\n >>> ... # do something\n\n \"\"\"\n\n def wraps(func):\n func.tvm_excluded_targets = args\n return func\n\n return wraps\n\n\ndef known_failing_targets(*args):\n \"\"\"Skip a test that is known to fail on a particular target.\n\n Use this decorator when you want your test to be run over a\n variety of targets and devices (including cpu and gpu devices),\n but know that it fails for some targets. For example, a newly\n implemented runtime may not support all features being tested, and\n should be excluded.\n\n Applies pytest.mark.xfail to the targets given.\n\n Parameters\n ----------\n f : function\n Function to parametrize. Must be of the form `def test_xxxxxxxxx(target, dev)`:,\n where `xxxxxxxxx` is any name.\n targets : list[str]\n Set of targets to skip.\n\n Example\n -------\n >>> @tvm.testing.known_failing_targets(\"cuda\")\n >>> def test_mytest(target, dev):\n >>> ... # do something\n\n Or\n\n >>> @tvm.testing.known_failing_targets(\"llvm\", \"cuda\")\n >>> def test_mytest(target, dev):\n >>> ... # do something\n\n \"\"\"\n\n def wraps(func):\n func.tvm_known_failing_targets = args\n return func\n\n return wraps\n\n\ndef parameter(*values, ids=None):\n \"\"\"Convenience function to define pytest parametrized fixtures.\n\n Declaring a variable using ``tvm.testing.parameter`` will define a\n parametrized pytest fixture that can be used by test\n functions. This is intended for cases that have no setup cost,\n such as strings, integers, tuples, etc. For cases that have a\n significant setup cost, please use :py:func:`tvm.testing.fixture`\n instead.\n\n If a test function accepts multiple parameters defined using\n ``tvm.testing.parameter``, then the test will be run using every\n combination of those parameters.\n\n The parameter definition applies to all tests in a module. If a\n specific test should have different values for the parameter, that\n test should be marked with ``@pytest.mark.parametrize``.\n\n Parameters\n ----------\n values\n A list of parameter values. A unit test that accepts this\n parameter as an argument will be run once for each parameter\n given.\n\n ids : List[str], optional\n A list of names for the parameters. If None, pytest will\n generate a name from the value. These generated names may not\n be readable/useful for composite types such as tuples.\n\n Returns\n -------\n function\n A function output from pytest.fixture.\n\n Example\n -------\n >>> size = tvm.testing.parameter(1, 10, 100)\n >>> def test_using_size(size):\n >>> ... # Test code here\n\n Or\n\n >>> shape = tvm.testing.parameter((5,10), (512,1024), ids=['small','large'])\n >>> def test_using_size(shape):\n >>> ... # Test code here\n\n \"\"\"\n\n # Optional cls parameter in case a parameter is defined inside a\n # class scope.\n @pytest.fixture(params=values, ids=ids)\n def as_fixture(*_cls, request):\n return request.param\n\n return as_fixture\n\n\n_parametrize_group = 0\n\n\ndef parameters(*value_sets):\n \"\"\"Convenience function to define pytest parametrized fixtures.\n\n Declaring a variable using tvm.testing.parameters will define a\n parametrized pytest fixture that can be used by test\n functions. Like :py:func:`tvm.testing.parameter`, this is intended\n for cases that have no setup cost, such as strings, integers,\n tuples, etc. For cases that have a significant setup cost, please\n use :py:func:`tvm.testing.fixture` instead.\n\n Unlike :py:func:`tvm.testing.parameter`, if a test function\n accepts multiple parameters defined using a single call to\n ``tvm.testing.parameters``, then the test will only be run once\n for each set of parameters, not for all combinations of\n parameters.\n\n These parameter definitions apply to all tests in a module. If a\n specific test should have different values for some parameters,\n that test should be marked with ``@pytest.mark.parametrize``.\n\n Parameters\n ----------\n values : List[tuple]\n A list of parameter value sets. Each set of values represents\n a single combination of values to be tested. A unit test that\n accepts parameters defined will be run once for every set of\n parameters in the list.\n\n Returns\n -------\n List[function]\n Function outputs from pytest.fixture. These should be unpacked\n into individual named parameters.\n\n Example\n -------\n >>> size, dtype = tvm.testing.parameters( (16,'float32'), (512,'float16') )\n >>> def test_feature_x(size, dtype):\n >>> # Test code here\n >>> assert( (size,dtype) in [(16,'float32'), (512,'float16')])\n\n \"\"\"\n global _parametrize_group\n parametrize_group = _parametrize_group\n _parametrize_group += 1\n\n outputs = []\n for param_values in zip(*value_sets):\n\n # Optional cls parameter in case a parameter is defined inside a\n # class scope.\n def fixture_func(*_cls, request):\n return request.param\n\n fixture_func.parametrize_group = parametrize_group\n fixture_func.parametrize_values = param_values\n outputs.append(pytest.fixture(fixture_func))\n\n return outputs\n\n\ndef _parametrize_correlated_parameters(metafunc):\n parametrize_needed = collections.defaultdict(list)\n\n for name, fixturedefs in metafunc.definition._fixtureinfo.name2fixturedefs.items():\n fixturedef = fixturedefs[-1]\n if hasattr(fixturedef.func, \"parametrize_group\") and hasattr(\n fixturedef.func, \"parametrize_values\"\n ):\n group = fixturedef.func.parametrize_group\n values = fixturedef.func.parametrize_values\n parametrize_needed[group].append((name, values))\n\n for parametrize_group in parametrize_needed.values():\n if len(parametrize_group) == 1:\n name, values = parametrize_group[0]\n metafunc.parametrize(name, values, indirect=True)\n else:\n names = \",\".join(name for name, values in parametrize_group)\n value_sets = zip(*[values for name, values in parametrize_group])\n metafunc.parametrize(names, value_sets, indirect=True)\n\n\ndef fixture(func=None, *, cache_return_value=False):\n \"\"\"Convenience function to define pytest fixtures.\n\n This should be used as a decorator to mark functions that set up\n state before a function. The return value of that fixture\n function is then accessible by test functions as that accept it as\n a parameter.\n\n Fixture functions can accept parameters defined with\n :py:func:`tvm.testing.parameter`.\n\n By default, the setup will be performed once for each unit test\n that uses a fixture, to ensure that unit tests are independent.\n If the setup is expensive to perform, then the\n cache_return_value=True argument can be passed to cache the setup.\n The fixture function will be run only once (or once per parameter,\n if used with tvm.testing.parameter), and the same return value\n will be passed to all tests that use it. If the environment\n variable TVM_TEST_DISABLE_CACHE is set to a non-zero value, it\n will disable this feature and no caching will be performed.\n\n Example\n -------\n >>> @tvm.testing.fixture\n >>> def cheap_setup():\n >>> return 5 # Setup code here.\n >>>\n >>> def test_feature_x(target, dev, cheap_setup)\n >>> assert(cheap_setup == 5) # Run test here\n\n Or\n\n >>> size = tvm.testing.parameter(1, 10, 100)\n >>>\n >>> @tvm.testing.fixture\n >>> def cheap_setup(size):\n >>> return 5*size # Setup code here, based on size.\n >>>\n >>> def test_feature_x(cheap_setup):\n >>> assert(cheap_setup in [5, 50, 500])\n\n Or\n\n >>> @tvm.testing.fixture(cache_return_value=True)\n >>> def expensive_setup():\n >>> time.sleep(10) # Setup code here\n >>> return 5\n >>>\n >>> def test_feature_x(target, dev, expensive_setup):\n >>> assert(expensive_setup == 5)\n\n \"\"\"\n\n force_disable_cache = bool(int(os.environ.get(\"TVM_TEST_DISABLE_CACHE\", \"0\")))\n cache_return_value = cache_return_value and not force_disable_cache\n\n # Deliberately at function scope, so that caching can track how\n # many times the fixture has been used. If used, the cache gets\n # cleared after the fixture is no longer needed.\n scope = \"function\"\n\n def wraps(func):\n if cache_return_value:\n func = _fixture_cache(func)\n func = pytest.fixture(func, scope=scope)\n return func\n\n if func is None:\n return wraps\n\n return wraps(func)\n\n\ndef _fixture_cache(func):\n cache = {}\n\n # Can't use += on a bound method's property. Therefore, this is a\n # list rather than a variable so that it can be accessed from the\n # pytest_collection_modifyitems().\n num_uses_remaining = [0]\n\n # Using functools.lru_cache would require the function arguments\n # to be hashable, which wouldn't allow caching fixtures that\n # depend on numpy arrays. For example, a fixture that takes a\n # numpy array as input, then calculates uses a slow method to\n # compute a known correct output for that input. Therefore,\n # including a fallback for serializable types.\n def get_cache_key(*args, **kwargs):\n try:\n hash((args, kwargs))\n return (args, kwargs)\n except TypeError as e:\n pass\n\n try:\n return pickle.dumps((args, kwargs))\n except TypeError as e:\n raise TypeError(\n \"TVM caching of fixtures requires arguments to the fixture \"\n \"to be either hashable or serializable\"\n ) from e\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n cache_key = get_cache_key(*args, **kwargs)\n\n try:\n cached_value = cache[cache_key]\n except KeyError:\n cached_value = cache[cache_key] = func(*args, **kwargs)\n\n try:\n yield copy.deepcopy(cached_value)\n except TypeError as e:\n rfc_url = (\n \"https://github.com/apache/tvm-rfcs/blob/main/rfcs/\"\n \"0007-parametrized-unit-tests.md#unresolved-questions\"\n )\n message = (\n \"TVM caching of fixtures can only be used on serializable data types, not {}.\\n\"\n \"Please see {} for details/discussion.\"\n ).format(type(cached_value), rfc_url)\n raise TypeError(message) from e\n\n finally:\n # Clear the cache once all tests that use a particular fixture\n # have completed.\n num_uses_remaining[0] -= 1\n if not num_uses_remaining[0]:\n cache.clear()\n\n # Set in the pytest_collection_modifyitems()\n wrapper.num_uses_remaining = num_uses_remaining\n\n return wrapper\n\n\ndef _count_num_fixture_uses(items):\n # Helper function, counts the number of tests that use each cached\n # fixture. Should be called from pytest_collection_modifyitems().\n for item in items:\n is_skipped = item.get_closest_marker(\"skip\") or any(\n mark.args[0] for mark in item.iter_markers(\"skipif\")\n )\n if is_skipped:\n continue\n\n for fixturedefs in item._fixtureinfo.name2fixturedefs.values():\n # Only increment the active fixturedef, in a name has been overridden.\n fixturedef = fixturedefs[-1]\n if hasattr(fixturedef.func, \"num_uses_remaining\"):\n fixturedef.func.num_uses_remaining[0] += 1\n\n\ndef _remove_global_fixture_definitions(items):\n # Helper function, removes fixture definitions from the global\n # variables of the modules they were defined in. This is intended\n # to improve readability of error messages by giving a NameError\n # if a test function accesses a pytest fixture but doesn't include\n # it as an argument. Should be called from\n # pytest_collection_modifyitems().\n\n modules = set(item.module for item in items)\n\n for module in modules:\n for name in dir(module):\n obj = getattr(module, name)\n if hasattr(obj, \"_pytestfixturefunction\") and isinstance(\n obj._pytestfixturefunction, _pytest.fixtures.FixtureFunctionMarker\n ):\n delattr(module, name)\n\n\ndef identity_after(x, sleep):\n \"\"\"Testing function to return identity after sleep\n\n Parameters\n ----------\n x : int\n The input value.\n\n sleep : float\n The amount of time to sleep\n\n Returns\n -------\n x : object\n The original value\n \"\"\"\n if sleep:\n time.sleep(sleep)\n return x\n\n\ndef terminate_self():\n \"\"\"Testing function to terminate the process.\"\"\"\n sys.exit(-1)\n\n\ntvm._ffi._init_api(\"testing\", __name__)\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport tvm\nfrom tvm import te\nimport numpy as np\nimport tvm.testing\nfrom tvm.topi.math import cast\n\n\ndef test_for():\n ib = tvm.tir.ir_builder.create()\n n = te.size_var(\"n\")\n A = ib.allocate(\"float32\", n, name=\"A\", scope=\"global\")\n with ib.for_range(0, n, name=\"i\") as i:\n A[i] = A[i] + 1\n with ib.for_range(0, 10, name=\"j\") as j:\n A[j] = A[j] + 2\n\n body = ib.get()\n assert isinstance(body, tvm.tir.AttrStmt)\n body = body.body\n assert isinstance(body, tvm.tir.Allocate)\n body = body.body\n assert isinstance(body, tvm.tir.For)\n body = body.body\n assert isinstance(body, tvm.tir.SeqStmt)\n assert isinstance(body[1], tvm.tir.For)\n\n\ndef test_if():\n ib = tvm.tir.ir_builder.create()\n n = te.size_var(\"n\")\n A = ib.pointer(\"float32\", name=\"A\")\n tmod = tvm.tir.truncmod\n with ib.for_range(0, n, name=\"i\") as i:\n with ib.if_scope(tmod(i, 2) == 0):\n A[i] = A[i] + 1\n with ib.else_scope():\n A[0] = A[i] + 2\n\n body = ib.get()\n assert A == A\n assert isinstance(body, tvm.tir.For)\n body = body.body\n assert isinstance(body, tvm.tir.IfThenElse)\n assert isinstance(body.condition, tvm.tir.EQ)\n assert isinstance(body.then_case.index, tvm.tir.Var)\n assert body.else_case.index.value == 0\n\n\ndef test_prefetch():\n A = tvm.tir.decl_buffer((10, 20), name=\"A\")\n ib = tvm.tir.ir_builder.create()\n n = te.size_var(\"n\")\n\n with ib.for_range(0, n, name=\"i\") as i:\n ib.emit(\n tvm.tir.Prefetch(\n A, [tvm.ir.Range.from_min_extent(i + 1, 2), tvm.ir.Range.from_min_extent(0, 20)]\n )\n )\n body = ib.get()\n assert body.body.bounds[0].extent.value == 2\n\n\ndef test_cpu():\n n = 1024\n dtype = \"float32\"\n A = te.placeholder((n,), name=\"A\")\n B = te.placeholder((n,), name=\"B\")\n\n def test_device_ir(A, B, C):\n n = A.shape[0]\n max_threads = 8\n ib = tvm.tir.ir_builder.create()\n Aptr = ib.buffer_ptr(A)\n Bptr = ib.buffer_ptr(B)\n Cptr = ib.buffer_ptr(C)\n with ib.for_range(0, n, name=\"i\") as i:\n Cptr[i] = Aptr[i] + Bptr[i]\n body = ib.get()\n return body\n\n C = te.extern(\n A.shape,\n [A, B],\n lambda ins, outs: test_device_ir(ins[0], ins[1], outs[0]),\n name=\"vector_add\",\n dtype=dtype,\n )\n s = te.create_schedule(C.op)\n\n def check_target(target):\n if not tvm.testing.device_enabled(target):\n return\n # build and invoke the kernel.\n fadd = tvm.build(s, [A, B, C], target)\n dev = tvm.device(target, 0)\n # launch the kernel.\n a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)\n b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)\n c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)\n fadd(a, b, c)\n tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())\n\n check_target(\"llvm\")\n\n\[email protected]_gpu\ndef test_gpu():\n n = te.size_var(\"n\")\n dtype = \"float32\"\n A = te.placeholder((n,), name=\"A\")\n B = te.placeholder((n,), name=\"B\")\n idxd = tvm.tir.indexdiv\n\n def test_device_ir(A, B, C):\n n = A.shape[0]\n max_threads = 32\n ib = tvm.tir.ir_builder.create()\n bx = te.thread_axis(\"blockIdx.x\")\n tx = te.thread_axis(\"threadIdx.x\")\n ib.scope_attr(bx, \"thread_extent\", idxd(n + max_threads - 1, max_threads))\n ib.scope_attr(tx, \"thread_extent\", max_threads)\n idx = bx.var * max_threads + tx.var\n Aptr = ib.buffer_ptr(A)\n Bptr = ib.buffer_ptr(B)\n Cptr = ib.buffer_ptr(C)\n with ib.if_scope(ib.likely(idx < n)):\n Cptr[idx] = Aptr[idx] + Bptr[idx]\n body = ib.get()\n return body\n\n C = te.extern(\n A.shape,\n [A, B],\n lambda ins, outs: test_device_ir(ins[0], ins[1], outs[0]),\n name=\"vector_add\",\n dtype=dtype,\n )\n s = te.create_schedule(C.op)\n bounds = tvm.te.schedule.InferBound(s)\n stmt = tvm.te.schedule.ScheduleOps(s, bounds)\n\n def check_target(target):\n n = 1024\n if not tvm.testing.device_enabled(target):\n return\n # build and invoke the kernel.\n fadd = tvm.build(s, [A, B, C], target)\n dev = tvm.device(target, 0)\n # launch the kernel.\n a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)\n b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)\n c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)\n fadd(a, b, c)\n tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())\n\n check_target(\"opencl\")\n check_target(\"cuda\")\n\n\ndef test_while_vectorize():\n \"\"\"Test while loop + vectorized inner loop\"\"\"\n\n n = 64\n num_iter = 10\n\n def test_ir(A, B, C):\n ib = tvm.tir.ir_builder.create()\n n = C.shape[0]\n A = ib.buffer_ptr(A)\n B = ib.buffer_ptr(B)\n C = ib.buffer_ptr(C)\n i = ib.allocate(\"int32\", (1,), name=\"i\", scope=\"local\")\n i[0] = 0\n\n with ib.for_range(0, n) as j:\n C[j] = 0.0\n\n with ib.while_loop(i[0] < num_iter):\n with ib.for_range(0, n, kind=\"vectorize\") as j:\n C[j] += A[j] + B[j]\n i[0] += 1\n\n return ib.get()\n\n def check_target(target, ir):\n dtype = \"float32\"\n A = te.placeholder((n,), name=\"A\", dtype=dtype)\n B = te.placeholder((n,), name=\"B\", dtype=dtype)\n\n C = te.extern(\n (n,),\n [A, B],\n lambda ins, outs: ir(ins[0], ins[1], outs[0]),\n name=\"while_vectorize\",\n dtype=dtype,\n )\n s = te.create_schedule(C.op)\n\n with tvm.transform.PassContext(opt_level=3):\n func = tvm.build(s, [A, B, C], target)\n\n dev = tvm.device(target, 0)\n a_np = np.random.uniform(size=n).astype(A.dtype)\n b_np = np.random.uniform(size=n).astype(B.dtype)\n a = tvm.nd.array(a_np, dev)\n b = tvm.nd.array(b_np, dev)\n c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)\n func(a, b, c)\n ref = num_iter * (a_np + b_np)\n tvm.testing.assert_allclose(c.numpy(), ref, rtol=1e-5, atol=1e-5)\n\n check_target(\"llvm\", test_ir)\n\n\ndef test_while_collatz():\n \"\"\"Test while loop + if\"\"\"\n\n def collatz_ref(n):\n a = n\n i = 0\n while a > 1:\n if a % 2 == 1:\n a = 3 * a + 1\n else:\n a = a >> 1\n i += 1\n return i\n\n def collatz(ib, n, C):\n i = ib.allocate(\"int32\", (1,), name=\"i\", scope=\"local\")\n a = ib.allocate(\"int32\", (1,), name=\"a\", scope=\"local\")\n i[0] = 0\n a[0] = n\n with ib.while_loop(a[0] > 1):\n with ib.if_scope(tvm.tir.floormod(a[0], 2) == 1):\n a[0] = 3 * a[0] + 1\n with ib.else_scope():\n a[0] = a[0] >> 1\n i[0] += 1\n\n C[n] = i[0]\n\n def collatz_ir_cpu(C):\n ib = tvm.tir.ir_builder.create()\n n = C.shape[0]\n C = ib.buffer_ptr(C)\n\n with ib.for_range(0, n, name=\"i\", kind=\"parallel\") as i:\n collatz(ib, i, C)\n\n body = ib.get()\n\n return body\n\n n = 30\n\n def check_target(target, ir):\n C = te.extern(\n (n,),\n [],\n lambda ins, outs: ir(outs[0]),\n name=\"collatz\",\n dtype=\"int32\",\n )\n s = te.create_schedule(C.op)\n\n with tvm.transform.PassContext(opt_level=3):\n func = tvm.build(s, [C], target)\n\n dev = tvm.device(target, 0)\n c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)\n func(c)\n ref = np.array([collatz_ref(i) for i in range(n)])\n tvm.testing.assert_allclose(c.numpy(), ref)\n\n check_target(\"llvm\", collatz_ir_cpu)\n\n\ndef test_while_mandel():\n n = 160\n shape = (n * 2, n)\n t = 300\n\n def mandel_ref():\n def complex_sqr(z):\n return np.array([z[0] ** 2 - z[1] ** 2, z[1] * z[0] * 2])\n\n pixels = np.zeros(shape)\n\n for i in range(pixels.shape[0]):\n for j in range(pixels.shape[1]):\n c = np.array([-0.8, np.cos(t) * 0.2])\n z = np.array([i / n - 1, j / n - 0.5]) * 2\n iterations = 0\n\n while np.linalg.norm(z) < 20 and iterations < 50:\n z = complex_sqr(z) + c\n iterations += 1\n\n pixels[i, j] = 1 - iterations * 0.02\n\n return pixels\n\n def mandel(ib, i, j, pixels):\n z = ib.allocate(\"float32\", (2,), name=\"z\", scope=\"local\")\n tmp = ib.allocate(\"float32\", (1,), name=\"tmp\", scope=\"local\")\n iterations = ib.allocate(\"int32\", (1,), name=\"iterations\", scope=\"local\")\n\n z[0] = (i / float(n) - 1) * 2\n z[1] = (j / float(n) - 0.5) * 2\n iterations[0] = 0\n c = [-0.8, float(np.cos(t)) * 0.2]\n\n def norm(z):\n return tvm.tir.sqrt(z[0] * z[0] + z[1] * z[1])\n\n with ib.while_loop(tvm.tir.all(norm(z) < 20, iterations[0] < 50)):\n tmp[0] = z[0]\n z[0] = z[0] * z[0] - z[1] * z[1] + c[0]\n z[1] = z[1] * tmp[0] * 2 + c[1]\n iterations[0] += 1\n\n pixels[i, j] = 1 - iterations[0] * 0.02\n\n def mandel_ir_cpu(C):\n ib = tvm.tir.ir_builder.create()\n ny = C.shape[0]\n nx = C.shape[1]\n C = ib.buffer_ptr(C)\n\n with ib.for_range(0, ny, name=\"i\", kind=\"parallel\") as i:\n with ib.for_range(0, nx, name=\"j\") as j:\n mandel(ib, i, j, C)\n\n body = ib.get()\n\n return body\n\n def mandel_ir_gpu(C):\n ib = tvm.tir.ir_builder.create()\n ny = C.shape[0]\n nx = C.shape[1]\n C = ib.buffer_ptr(C)\n\n bx = te.thread_axis(\"blockIdx.x\")\n tx = te.thread_axis(\"threadIdx.x\")\n by = te.thread_axis(\"blockIdx.y\")\n ty = te.thread_axis(\"threadIdx.y\")\n\n max_threads = 16\n ib.scope_attr(bx, \"thread_extent\", tvm.tir.indexdiv(nx + max_threads - 1, max_threads))\n ib.scope_attr(tx, \"thread_extent\", max_threads)\n ib.scope_attr(by, \"thread_extent\", tvm.tir.indexdiv(ny + max_threads - 1, max_threads))\n ib.scope_attr(ty, \"thread_extent\", max_threads)\n\n tidx = bx * max_threads + tx\n tidy = by * max_threads + ty\n\n with ib.if_scope(tvm.tir.all(tidx < nx, tidy < ny)):\n mandel(ib, tidy, tidx, C)\n\n body = ib.get()\n\n return body\n\n ref = mandel_ref()\n\n def check_target(target, ir):\n if not tvm.testing.device_enabled(target):\n return\n\n C = te.extern(\n shape,\n [],\n lambda ins, outs: ir(outs[0]),\n name=\"mandel_ir\",\n dtype=\"float32\",\n )\n s = te.create_schedule(C.op)\n\n with tvm.transform.PassContext(opt_level=3):\n func = tvm.build(s, [C], target)\n\n dev = tvm.device(target, 0)\n c = tvm.nd.array(np.zeros(shape, dtype=C.dtype), dev)\n func(c)\n tvm.testing.assert_allclose(c.numpy(), ref, rtol=1e-5, atol=1e-5)\n\n check_target(\"llvm\", mandel_ir_cpu)\n check_target(\"npvtx\", mandel_ir_gpu)\n check_target(\"cuda\", mandel_ir_gpu)\n check_target(\"vulkan\", mandel_ir_gpu)\n\n\ndef test_while_binary_search():\n def binary_search(ib, n, i, Aptr, Bptr, Cptr):\n lo = ib.allocate(\"int32\", (1,), name=\"lo\", scope=\"local\")\n hi = ib.allocate(\"int32\", (1,), name=\"hi\", scope=\"local\")\n\n lo[0] = 0\n hi[0] = n\n v = Bptr[i]\n\n with ib.while_loop(lo[0] < hi[0]):\n mid = lo[0] + (hi[0] - lo[0] >> 1)\n with ib.if_scope(Aptr[mid] < v):\n lo[0] = mid + 1\n with ib.else_scope():\n hi[0] = mid\n\n Cptr[i] = lo[0]\n\n def searchsorted_ir_cpu(A, B, C, n):\n ib = tvm.tir.ir_builder.create()\n Aptr = ib.buffer_ptr(A)\n Bptr = ib.buffer_ptr(B)\n Cptr = ib.buffer_ptr(C)\n\n with ib.for_range(0, n, name=\"i\", kind=\"parallel\") as i:\n binary_search(ib, n, i, Aptr, Bptr, Cptr)\n\n body = ib.get()\n\n return body\n\n def searchsorted_ir_gpu(A, B, C, n):\n ib = tvm.tir.ir_builder.create()\n Aptr = ib.buffer_ptr(A)\n Bptr = ib.buffer_ptr(B)\n Cptr = ib.buffer_ptr(C)\n\n bx = te.thread_axis(\"blockIdx.x\")\n tx = te.thread_axis(\"threadIdx.x\")\n max_threads = 32\n ib.scope_attr(bx, \"thread_extent\", tvm.tir.indexdiv(n + max_threads - 1, max_threads))\n ib.scope_attr(tx, \"thread_extent\", max_threads)\n tid = bx * max_threads + tx\n\n with ib.if_scope(tid < n):\n binary_search(ib, n, tid, Aptr, Bptr, Cptr)\n\n body = ib.get()\n\n return body\n\n n = 1024\n dtype = \"float32\"\n A = te.placeholder((n,), name=\"A\", dtype=dtype)\n B = te.placeholder((n,), name=\"B\", dtype=dtype)\n\n def check_target(target, ir):\n if not tvm.testing.device_enabled(target):\n return\n\n C = te.extern(\n A.shape,\n [A, B],\n lambda ins, outs: ir(ins[0], ins[1], outs[0], n),\n name=\"searchsorted_ir\",\n dtype=\"int32\",\n )\n s = te.create_schedule(C.op)\n\n with tvm.transform.PassContext(opt_level=3):\n func = tvm.build(s, [A, B, C], target)\n\n dev = tvm.device(target, 0)\n a_np = np.random.uniform(size=n).astype(A.dtype)\n b_np = np.random.uniform(size=n).astype(B.dtype)\n a_np = np.sort(a_np)\n a = tvm.nd.array(a_np, dev)\n b = tvm.nd.array(b_np, dev)\n c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)\n func(a, b, c)\n ref = np.searchsorted(a_np, b_np)\n tvm.testing.assert_allclose(c.numpy(), ref)\n\n check_target(\"llvm\", searchsorted_ir_cpu)\n check_target(\"cuda\", searchsorted_ir_gpu)\n check_target(\"nvptx\", searchsorted_ir_gpu)\n check_target(\"vulkan\", searchsorted_ir_gpu)\n\n\[email protected]_gpu\ndef test_dyn_shared():\n n = te.size_var(\"n\")\n dtype = \"float32\"\n A = te.placeholder((n,), name=\"A\")\n\n def test_device_ir(A, B):\n n = A.shape[0]\n ib = tvm.tir.ir_builder.create()\n\n tx = te.thread_axis(\"threadIdx.x\")\n ib.scope_attr(tx, \"thread_extent\", n)\n\n temp = ib.allocate(dtype, (n,), scope=\"shared.dyn\") # n is symbolic size\n\n Aptr = ib.buffer_ptr(A)\n Bptr = ib.buffer_ptr(B)\n\n temp[tx] = Aptr[tx]\n depth = tvm.tir.log2(cast(n, \"float32\"))\n\n with ib.for_range(0, depth) as i:\n ib.emit(tvm.tir.Call(None, \"tir.tvm_storage_sync\", tvm.runtime.convert([\"shared\"])))\n d = n >> (i + 1)\n with ib.if_scope(tx < d):\n temp[tx] += temp[tx + d]\n\n Bptr[0] = temp[0]\n return ib.get()\n\n B = te.extern(\n (1,),\n [A],\n lambda ins, outs: test_device_ir(ins[0], outs[0]),\n name=\"reduce\",\n dtype=dtype,\n )\n s = te.create_schedule(B.op)\n\n def check_target(target):\n if not tvm.testing.device_enabled(target):\n return\n\n freduce = tvm.build(s, [A, B], target)\n dev = tvm.device(target, 0)\n\n for n in [512, 1024]:\n a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)\n b = tvm.nd.array(np.zeros(1, dtype=B.dtype), dev)\n freduce(a, b)\n tvm.testing.assert_allclose(b.numpy()[0], np.sum(a.numpy()), 1e-4, 1e-4)\n\n for target in [\"cuda\", \"nvptx\"]:\n check_target(target)\n\n\nif __name__ == \"__main__\":\n test_prefetch()\n test_if()\n test_for()\n test_cpu()\n test_gpu()\n test_while_vectorize()\n test_while_collatz()\n test_while_mandel()\n test_while_binary_search()\n test_dyn_shared()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Relay to ONNX serialization test cases\"\"\"\nimport pytest\n\npytest.importorskip(\"onnx\")\npytest.importorskip(\"onnxruntime\")\n\nimport numpy as np\nimport onnxruntime as rt\n\nimport tvm\nfrom tvm import relay\nfrom tvm.contrib.target.onnx import to_onnx\nfrom tvm.relay.testing import run_infer_type\n\n\ndef func_to_onnx(func, name):\n mod = tvm.IRModule()\n mod[\"main\"] = func\n onnx_model = to_onnx(mod, {}, name, path=None)\n return onnx_model.SerializeToString()\n\n\ndef run_onnx(onnx_model, input_data):\n sess = rt.InferenceSession(onnx_model)\n input_names = {}\n for input, data in zip(sess.get_inputs(), input_data):\n input_names[input.name] = data\n output_names = [out.name for out in sess.get_outputs()]\n res = sess.run(output_names, input_names)\n return res\n\n\ndef run_relay(func, data_tuple):\n target = \"llvm\"\n dev = tvm.device(\"llvm\", 0)\n intrp = relay.create_executor(\"graph\", device=dev, target=target)\n relay_res = intrp.evaluate(func)(*data_tuple)\n\n result = []\n relay_res = relay_res if isinstance(relay_res, list) else [relay_res]\n for res in relay_res:\n result.append(res.numpy())\n\n return result\n\n\ndef verify_results(relay_func, indata, test_name, rtol=1e-7, atol=0):\n relay_results = run_relay(relay_func, indata)\n onnx_results = run_onnx(func_to_onnx(relay_func, test_name), indata)\n\n for relay_res, onnx_res in zip(relay_results, onnx_results):\n np.testing.assert_allclose(relay_res, onnx_res, rtol=rtol, atol=atol)\n\n\ndef test_add():\n dtype = \"float32\"\n t1 = relay.TensorType((5, 10, 5))\n t2 = relay.TensorType((5, 10, 5))\n x = relay.var(\"x\", t1, dtype=dtype)\n y = relay.var(\"y\", t2, dtype=dtype)\n z = relay.add(x, y)\n func = relay.Function([x, y], z)\n\n x_data = np.random.rand(5, 10, 5).astype(dtype)\n y_data = np.random.rand(5, 10, 5).astype(dtype)\n\n verify_results(func, [x_data, y_data], \"test_add\")\n\n\ndef test_bias_add():\n for dtype in [\"float16\", \"float32\"]:\n xshape = (10, 2, 3, 4)\n bshape = (2,)\n rtol = 1e-2 if dtype == \"float16\" else 1e-5\n x = relay.var(\"x\", shape=xshape, dtype=dtype)\n bias = relay.var(\"bias\", shape=bshape, dtype=dtype)\n z = relay.nn.bias_add(x, bias)\n func = relay.Function([x, bias], z)\n\n x_data = np.random.uniform(size=xshape).astype(dtype)\n y_data = np.random.uniform(size=bshape).astype(dtype)\n\n verify_results(func, [x_data, y_data], \"test_bias_add\", rtol=rtol)\n\n\ndef test_conv2d():\n def verify_conv2d(\n dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs\n ):\n x = relay.var(\"x\", shape=dshape, dtype=dtype)\n w = relay.var(\"w\", shape=kshape, dtype=dtype)\n y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)\n func = relay.Function([x, w], y)\n data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)\n kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)\n verify_results(func, [data, kernel], \"test_conv2d\", rtol=1e-5, atol=1e-5)\n\n dshape = (1, 32, 18, 18)\n kshape = (32, 1, 3, 3)\n verify_conv2d(\n \"float32\", 1, dshape, kshape, padding=(1, 1), channels=32, groups=32, kernel_size=(3, 3)\n )\n\n dshape = (1, 32, 18, 18)\n kshape = (32, 4, 3, 3)\n verify_conv2d(\n \"float32\", 1, dshape, kshape, padding=(1, 1), channels=32, groups=8, kernel_size=(3, 3)\n )\n\n # also group conv2d\n dshape = (1, 32, 18, 18)\n kshape = (64, 1, 3, 3)\n verify_conv2d(\n \"float32\", 1, dshape, kshape, padding=(1, 1), channels=64, groups=32, kernel_size=(3, 3)\n )\n\n # normal conv2d\n dshape = (1, 3, 224, 224)\n kshape = (10, 3, 3, 3)\n verify_conv2d(\"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3))\n\n dshape = (1, 3, 224, 224)\n kshape = (10, 3, 3, 3)\n verify_conv2d(\"float32\", 1, dshape, kshape, padding=(2, 2), channels=10, kernel_size=(3, 3))\n\n dshape = (1, 3, 18, 18)\n kshape = (10, 3, 3, 3)\n verify_conv2d(\n \"float32\",\n 1,\n dshape,\n kshape,\n padding=(1, 1),\n channels=10,\n kernel_size=(3, 3),\n dilation=(3, 3),\n )\n\n dshape = (1, 3, 18, 18)\n kshape = (10, 3, 2, 2)\n verify_conv2d(\n \"float32\",\n 1,\n dshape,\n kshape,\n padding=(2, 2),\n channels=10,\n kernel_size=(2, 2),\n dilation=(1, 1),\n )\n\n dshape = (1, 3, 18, 18)\n kshape = (10, 3, 4, 4)\n verify_conv2d(\"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4))\n\n dshape = (1, 3, 18, 18)\n kshape = (10, 3, 4, 4)\n verify_conv2d(\"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4))\n\n\ndef test_conv2d_transpose():\n \"\"\"Conv2d_Transpose unit tests.\"\"\"\n\n def verify_conv2d_transpose(\n dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs\n ):\n x = relay.var(\"x\", shape=dshape, dtype=dtype)\n w = relay.var(\"w\", shape=kshape, dtype=dtype)\n y = relay.nn.conv2d_transpose(\n x, w, padding=padding, dilation=dilation, groups=groups, **attrs\n )\n func = relay.Function([x, w], y)\n data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)\n kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)\n verify_results(func, [data, kernel], \"test_conv2d_transpose\", rtol=1e-5, atol=1e-5)\n\n dshape = (1, 3, 224, 224)\n kshape = (3, 10, 3, 3)\n verify_conv2d_transpose(\n \"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3)\n )\n\n dshape = (1, 3, 224, 224)\n kshape = (3, 10, 3, 3)\n verify_conv2d_transpose(\n \"float32\", 1, dshape, kshape, padding=(2, 2), channels=10, kernel_size=(3, 3)\n )\n\n dshape = (1, 3, 18, 18)\n kshape = (3, 10, 2, 2)\n verify_conv2d_transpose(\n \"float32\",\n 1,\n dshape,\n kshape,\n padding=(2, 2),\n channels=10,\n kernel_size=(2, 2),\n dilation=(1, 1),\n )\n\n dshape = (1, 3, 18, 18)\n kshape = (3, 10, 4, 4)\n verify_conv2d_transpose(\n \"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4)\n )\n\n dshape = (1, 3, 18, 18)\n kshape = (3, 10, 4, 4)\n verify_conv2d_transpose(\n \"float32\", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4)\n )\n\n\ndef test_reshape():\n def verify_reshape(shape, newshape):\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n z = relay.reshape(x, newshape=newshape)\n\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(\"float32\")\n verify_results(func, [x_data], \"test_reshape\", rtol=1e-5, atol=1e-5)\n\n verify_reshape((2, 3, 4), tuple(np.array([4, 2, 3], dtype=np.int64)))\n verify_reshape((2, 3, 4), tuple(np.array([2, 0, 0], dtype=np.int64)))\n verify_reshape((2, 3, 4), tuple(np.array([0, -1], dtype=np.int64)))\n verify_reshape((2, 3, 4), tuple(np.array([-1, 0], dtype=np.int64)))\n\n\ndef test_transpose():\n def verify_reshape(shape, newshape):\n x = relay.var(\"x\", relay.TensorType(shape, \"float32\"))\n z = relay.transpose(x, newshape)\n func = relay.Function([x], z)\n x_data = np.random.uniform(low=-1, high=1, size=shape).astype(\"float32\")\n verify_results(func, [x_data], \"test_transpose\", rtol=1e-5, atol=1e-5)\n\n verify_reshape((1, 2, 3, 4), (0, 2, 3, 1))\n verify_reshape((1, 2, 3, 4), (0, 3, 2, 1))\n\n\ndef test_dense():\n def verify_dense(d_shape, w_shape):\n data = relay.var(\"data\", relay.TensorType(d_shape, \"float32\"))\n weight = relay.var(\"weight\", relay.TensorType(w_shape, \"float32\"))\n func = relay.Function([data, weight], relay.nn.dense(data, weight))\n x_data = np.random.uniform(size=d_shape).astype(\"float32\")\n w_data = np.random.uniform(size=w_shape).astype(\"float32\")\n verify_results(func, [x_data, w_data], \"test_dense\", rtol=1e-5, atol=1e-5)\n\n verify_dense((1, 8), (16, 8))\n verify_dense((1, 4), (3, 4))\n\n\ndef test_max_pool():\n def verify_max_pool(x_shape, pool_size, strides, padding, ceil_mode):\n x = relay.var(\"x\", relay.TensorType(x_shape, \"float32\"))\n y = tvm.relay.nn.max_pool2d(\n x, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode\n )\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=x_shape).astype(\"float32\")\n verify_results(func, [x_data], \"test_max_pool\", rtol=1e-5, atol=1e-5)\n\n verify_max_pool(\n (1, 4, 16, 16), pool_size=(2, 2), strides=(2, 2), padding=(0, 0), ceil_mode=False\n )\n\n\ndef test_batch_flatten():\n def verify_test_batch_flatten(d_shape):\n data = relay.var(\"data\", relay.TensorType(d_shape, \"float32\"))\n func = relay.Function([data], relay.nn.batch_flatten(data))\n x_data = np.random.uniform(size=d_shape).astype(\"float32\")\n verify_results(func, [x_data], \"test_batch_flatten\", rtol=1e-5, atol=1e-5)\n\n verify_test_batch_flatten((1, 2, 3, 4))\n verify_test_batch_flatten((1, 8))\n\n\ndef test_batch_norm():\n def verify_batch_norm(axis=1):\n for dtype in [\"float16\", \"float32\"]:\n data = relay.var(\"data\", relay.TensorType((2, 4, 4, 1), dtype))\n gamma_shape = (data.type_annotation.shape[axis].value,)\n beta = relay.var(\"beta\", relay.TensorType(gamma_shape, dtype))\n gamma = relay.var(\"gamma\", relay.TensorType(gamma_shape, dtype))\n moving_mean = relay.var(\"moving_mean\", relay.TensorType(gamma_shape, dtype))\n moving_var = relay.var(\"moving_var\", relay.TensorType(gamma_shape, dtype))\n y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var, axis=axis)\n func = relay.Function([data, gamma, beta, moving_mean, moving_var], y[0])\n\n x_data = np.random.uniform(size=(2, 4, 4, 1)).astype(dtype)\n beta = np.random.uniform(size=gamma_shape).astype(dtype)\n gamma = np.random.uniform(size=gamma_shape).astype(dtype)\n moving_mean = np.random.uniform(size=gamma_shape).astype(dtype)\n moving_var = np.random.uniform(size=gamma_shape).astype(dtype)\n verify_results(\n func,\n [x_data, gamma, beta, moving_mean, moving_var],\n \"test_batch_norm\",\n rtol=1e-1,\n atol=1e-1,\n )\n\n verify_batch_norm(axis=1)\n verify_batch_norm(axis=3)\n\n\ndef test_pad():\n \"\"\"Pad unit test.\"\"\"\n\n def verify_pad():\n dshape = (4, 10, 7, 7)\n x = relay.var(\"x\", shape=dshape, dtype=\"int32\")\n y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4)))\n func = relay.Function([x], y)\n func = run_infer_type(func)\n x_data = np.random.randint(low=-255, high=255, size=dshape).astype(np.int32)\n verify_results(func, [x_data], \"test_pad\", rtol=1e-5, atol=1e-5)\n\n verify_pad()\n\n\ndef test_sofmax():\n def verify_sofmax():\n for dtype in [\"float32\"]:\n shape = (10, 4)\n x = relay.var(\"x\", shape=shape, dtype=dtype)\n y = relay.nn.softmax(x, axis=1)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=shape).astype(dtype)\n verify_results(func, [x_data], \"test_softmax\", rtol=1e-5, atol=1e-5)\n\n verify_sofmax()\n\n\ndef test_squeeze():\n def verify_squeeze(shape, dtype, axis):\n x = relay.var(\"x\", relay.TensorType(shape, dtype))\n z = relay.squeeze(x, axis=axis)\n func = relay.Function([x], z)\n x_data = np.random.random_sample(shape).astype(dtype)\n verify_results(func, [x_data], \"test_squeeze\", rtol=1e-5, atol=1e-5)\n\n verify_squeeze((1, 3, 2, 5), \"float32\", None)\n verify_squeeze(\n (1, 3, 1),\n \"float32\",\n [\n 2,\n ],\n )\n verify_squeeze((1, 2, 1, 2, 1), \"float32\", [0, 2])\n\n\ndef test_mean():\n def verify_mean(data_shape, axis, exclude, keepdims):\n dtype = \"float32\"\n x = relay.var(\"x\", shape=data_shape, dtype=dtype)\n y = relay.mean(x, axis, keepdims, exclude)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=data_shape).astype(dtype)\n verify_results(func, [x_data], \"test_mean\", rtol=1e-5, atol=1e-5)\n\n verify_mean((1, 2), 0, False, False)\n verify_mean((1, 2), 0, True, False)\n verify_mean((1, 2), 0, True, True)\n verify_mean((1, 2), 1, True, True)\n verify_mean((3, 2, 1), 1, False, True)\n\n\ndef test_split():\n def verify_split(dshape, indices_or_sections, axis=None):\n dtype = \"float32\"\n x = relay.var(\"x\", relay.ty.TensorType(dshape, \"float32\"))\n y = relay.split(x, indices_or_sections, axis=axis)\n func = relay.Function([x], y.astuple())\n x_data = np.random.uniform(size=dshape).astype(dtype)\n\n verify_results(func, [x_data], \"test_split\", rtol=1e-5, atol=1e-5)\n\n verify_split((5, 5, 2, 2), 5, axis=1)\n verify_split((5, 5, 2, 2), 5, axis=0)\n verify_split((5, 5, 2, 2), [1, 3, 4], axis=0)\n verify_split((5, 5, 2, 2), [1, 3, 4], axis=1)\n\n\ndef test_concatenate():\n def verify_concatenate(shapes, axis, dtype=\"float32\"):\n in_vars = []\n in_data = []\n for i, shape in enumerate(shapes):\n in_vars.append(relay.var(\"x\" + str(i), relay.ty.TensorType(shape, dtype)))\n in_data.append(np.random.uniform(size=shape).astype(dtype))\n\n out_tensor = relay.concatenate(in_vars, axis)\n func = relay.Function(in_vars, out_tensor)\n verify_results(func, in_data, \"test_concatenate\", rtol=1e-5, atol=1e-5)\n\n verify_concatenate([(2,), (2,), (2,)], -1)\n verify_concatenate([(2, 3, 4), (2, 2, 4), (2, 5, 4)], 1)\n verify_concatenate([(1, 2, 4), (1, 2, 3), (1, 2, 7), (1, 2, 8), (1, 2, 1)], -1)\n verify_concatenate([(5, 6, 7, 3), (16, 6, 7, 3), (12, 6, 7, 3), (8, 6, 7, 3), (2, 6, 7, 3)], 0)\n verify_concatenate([(1, 14400), (1, 2400), (1, 640), (1, 240)], 1)\n\n\ndef test_strided_slice():\n def verify_strided_slice(dshape, begin, end, strides, mode):\n x = relay.var(\"x\", relay.TensorType(dshape, \"float32\"))\n if mode == \"size\":\n strides = None\n z = relay.strided_slice(x, begin=begin, end=end, strides=strides, slice_mode=mode)\n func = relay.Function([x], z)\n x_data = np.random.uniform(size=dshape).astype(\"float32\")\n verify_results(func, [x_data], \"test_strided_slice\", rtol=1e-5, atol=1e-5)\n\n for mode in [\"end\", \"size\"]:\n verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 2, 3], None, mode)\n verify_strided_slice((3, 4, 3), [1, -1, 0], [4, -1, 3], [1, 2], mode)\n verify_strided_slice(\n (3, 4, 3),\n [\n 1,\n ],\n [4, -3],\n None,\n mode,\n )\n verify_strided_slice((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], mode)\n verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, -3], [2, 1, 1], mode)\n verify_strided_slice((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], mode)\n verify_strided_slice((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2], mode)\n verify_strided_slice((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], mode)\n\n verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, mode)\n verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4], None, mode)\n verify_strided_slice((3, 4, 3), [1, 1], [4, 4, 3], None, mode)\n verify_strided_slice((3, 4, 3), [1, 1], [4, 4, 3], [1, 1, 2], mode)\n\n\ndef test_cmp_type():\n for op, ref in ((relay.greater, np.greater), (relay.less, np.less), (relay.equal, np.equal)):\n x_shape = (10, 4)\n y_shape = (5, 10, 1)\n t1 = relay.TensorType(x_shape)\n t2 = relay.TensorType(y_shape)\n x = relay.var(\"x\", t1)\n y = relay.var(\"y\", t2)\n z = op(x, y)\n x_data = np.random.rand(*x_shape).astype(t1.dtype)\n y_data = np.random.rand(*y_shape).astype(t2.dtype)\n func = relay.Function([x, y], z)\n verify_results(func, [x_data, y_data], \"test_cmp_type\", rtol=1e-5, atol=1e-5)\n\n\ndef test_unary_identity():\n for dtype in [\"int16\", \"float32\", \"float64\"]:\n for op, ref in [(relay.zeros_like, np.zeros_like), (relay.ones_like, np.ones_like)]:\n shape = (8, 9, 4)\n x = relay.var(\"x\", relay.TensorType(shape, dtype))\n y = op(x)\n func = relay.Function(\n [\n x,\n ],\n y,\n )\n x_data = np.random.rand(*shape).astype(dtype)\n verify_results(func, [x_data], \"test_cmp_type\", rtol=1e-5, atol=1e-5)\n\n\ndef test_binary_op():\n def check_binary_op(opfunc, dtype):\n t1 = relay.TensorType((5, 10, 5))\n t2 = relay.TensorType((5, 10, 5))\n x = relay.var(\"x\", t1, dtype=dtype)\n y = relay.var(\"y\", t2, dtype=dtype)\n z = opfunc(x, y)\n x_data = np.random.rand(5, 10, 5).astype(dtype)\n y_data = np.random.rand(5, 10, 5).astype(dtype)\n func = relay.Function([x, y], z)\n verify_results(func, [x_data, y_data], \"test_binary_op\", rtol=1e-5, atol=1e-5)\n\n for opfunc, ref in [\n (relay.add, np.add),\n (relay.subtract, np.subtract),\n (relay.multiply, np.multiply),\n (relay.divide, np.divide),\n ]:\n for dtype in [\"float32\"]:\n check_binary_op(opfunc, dtype)\n\n\ndef test_tuple_types():\n def verify_tuple_types(dshape, indices_or_sections, axis=None, dtype=\"float32\"):\n x = relay.var(\"x\", relay.ty.TensorType(dshape, dtype))\n y = relay.split(x, indices_or_sections, axis=axis)\n z = relay.concatenate(y, axis=axis)\n func = relay.Function([x], z)\n x_data = np.random.uniform(size=dshape).astype(dtype)\n verify_results(func, [x_data], \"test_tuple_types\", rtol=1e-5, atol=1e-5)\n\n split_z = relay.split(z, indices_or_sections, axis=axis)\n func = relay.Function([x], split_z.astuple())\n verify_results(func, [x_data], \"test_tuple_types\", rtol=1e-5, atol=1e-5)\n\n out = relay.Tuple([y[0] + y[1], y[0] - y[1]])\n func = relay.Function([x], out)\n verify_results(func, [x_data], \"test_tuple_types\", rtol=1e-5, atol=1e-5)\n\n z = relay.concatenate(out, axis=axis)\n func = relay.Function([x], z)\n verify_results(func, [x_data], \"test_tuple_types\", rtol=1e-5, atol=1e-5)\n\n verify_tuple_types((5, 5, 2, 2), 5, axis=1)\n verify_tuple_types((5, 5, 2, 2), 5, axis=0)\n verify_tuple_types((5, 5, 2, 2), [1, 3, 4], axis=0)\n verify_tuple_types((5, 5, 2, 2), [1, 3, 4], axis=1)\n\n\ndef test_layout_transform():\n def verify_layout_transform(dshape, src_layout, dst_layout, dtype=\"float32\"):\n x = relay.var(\"x\", relay.ty.TensorType(dshape, dtype))\n y = relay.layout_transform(x, src_layout, dst_layout)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=dshape).astype(dtype)\n verify_results(func, [x_data], \"test_layout_transform\", rtol=1e-5, atol=1e-5)\n\n verify_layout_transform((1, 3, 8, 8), \"NCHW\", \"NHWC\")\n verify_layout_transform((1, 8, 8, 3), \"NHWC\", \"NCHW\")\n\n\ndef test_clip():\n def verify_clip(dshape, a_min, a_max, dtype=\"float32\"):\n x = relay.var(\"x\", relay.ty.TensorType(dshape, dtype))\n y = relay.clip(x, a_min, a_max)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=dshape).astype(dtype)\n verify_results(func, [x_data], \"test_clip\", rtol=1e-5, atol=1e-5)\n\n verify_clip((5, 5, 2, 5), 0, 0.2)\n verify_clip((5, 5, 2, 5), 0.2, 0.5)\n\n\ndef test_expand_dims():\n def verify_expand_dims(dshape, axis, num_newaxis, dtype=\"float32\"):\n x = relay.var(\"x\", relay.ty.TensorType(dshape, dtype))\n y = relay.expand_dims(x, axis, num_newaxis)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=dshape).astype(dtype)\n verify_results(func, [x_data], \"test_expand_dims\", rtol=1e-5, atol=1e-5)\n\n verify_expand_dims((1, 1001), 0, 2)\n verify_expand_dims((1, 1, 1001), 2, 2)\n\n\ndef test_lrn():\n \"\"\"LRN unit test.\"\"\"\n\n def verify_lrn(xshape, size, dtype=\"float32\"):\n x = relay.var(\"x\", relay.ty.TensorType(xshape, dtype))\n y = relay.nn.lrn(x, size=size, axis=1, alpha=1.0, beta=1.0, bias=1.0)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=xshape).astype(dtype)\n verify_results(func, [x_data], \"test_lrn\", rtol=1e-5, atol=1e-5)\n\n isize = [(1, 1, 480, 640), (1, 3, 224, 224)]\n sizes = [1, 3]\n for i in isize:\n for s in sizes:\n verify_lrn(i, s)\n\n\ndef test_sigmoid():\n \"\"\"Sigmoid unit test.\"\"\"\n\n def verify_sigmoid(dshape, dtype=\"float32\"):\n x = relay.var(\"x\", relay.ty.TensorType(dshape, dtype))\n y = relay.sigmoid(x)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=dshape).astype(dtype)\n verify_results(func, [x_data], \"test_sigmoid\", rtol=1e-4, atol=1e-4)\n\n isize = [(1, 3, 480, 640), (1, 3, 224, 224)]\n\n for i in isize:\n verify_sigmoid(i)\n\n\ndef test_copy():\n \"\"\"Copy unit test.\"\"\"\n\n def verify_copy(dshape, dtype=\"float32\"):\n x = relay.var(\"x\", relay.ty.TensorType(dshape, dtype))\n y = relay.copy(x)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=dshape).astype(dtype)\n verify_results(func, [x_data], \"test_copy\", rtol=1e-4, atol=1e-4)\n\n isize = [(1, 3, 480, 640), (1, 3, 224, 224)]\n\n for i in isize:\n verify_copy(i)\n\n\ndef test_round():\n \"\"\"Round unit test.\"\"\"\n\n def verify_round(dshape, dtype=\"float32\"):\n x = relay.var(\"x\", relay.ty.TensorType(dshape, dtype))\n y = relay.round(x)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=dshape).astype(dtype)\n verify_results(func, [x_data], \"test_round\", rtol=1e-4, atol=1e-4)\n\n isize = [(1, 3, 480, 640), (1, 3, 224, 224)]\n\n for i in isize:\n verify_round(i)\n\n\ndef test_cast():\n \"\"\"Cast unit test.\"\"\"\n\n def verify_cast(dshape, dtype):\n x = relay.var(\"x\", relay.ty.TensorType(dshape, \"float32\"))\n y = relay.cast(x, dtype)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=dshape).astype(\"float32\")\n verify_results(func, [x_data], \"test_cast\", rtol=1e-4, atol=1e-4)\n\n isize = [(1, 3, 480, 640), (1, 3, 224, 224)]\n out_dtypes = [\"int8\", \"int16\", \"uint8\", \"uint16\"]\n\n for i in isize:\n for o_dtype in out_dtypes:\n verify_cast(i, o_dtype)\n\n\ndef test_resize():\n \"\"\"Resize unit test.\"\"\"\n\n def verify_resize(dshape, outsize, method, coord_trans, rounding_method, dtype=\"float32\"):\n x = relay.var(\"x\", relay.ty.TensorType(dshape, dtype))\n y = relay.image.resize2d(\n x,\n outsize,\n layout=\"NCHW\",\n method=method,\n coordinate_transformation_mode=coord_trans,\n rounding_method=rounding_method,\n )\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=dshape).astype(dtype)\n verify_results(func, [x_data], \"test_resize\", rtol=1e-4, atol=1e-4)\n\n method = [\"nearest_neighbor\", \"linear\", \"cubic\"]\n coord_trans = [\"half_pixel\", \"align_corners\", \"asymmetric\"]\n rounding_method = [\"round\", \"floor\", \"ceil\"]\n\n isize = (1, 3, 480, 640)\n\n # Downsample\n osize = (240, 320)\n for i in method:\n for j in coord_trans:\n for k in rounding_method:\n if (i == \"nearest_neighbor\" and j == \"align_corners\") or (\n i == \"cubic\" and j in [\"half_pixel\", \"align_corners\"]\n ):\n continue\n verify_resize(isize, osize, method=i, coord_trans=j, rounding_method=k)\n\n # Upsample\n osize = (960, 1280)\n for i in method:\n for j in coord_trans:\n for k in rounding_method:\n if (i == \"nearest_neighbor\" and j == \"align_corners\") or (i == \"cubic\"):\n continue\n verify_resize(isize, osize, method=i, coord_trans=j, rounding_method=k)\n\n\nif __name__ == \"__main__\":\n test_add()\n test_bias_add()\n test_conv2d()\n test_conv2d_transpose()\n test_reshape()\n test_transpose()\n test_dense()\n test_max_pool()\n test_batch_flatten()\n test_batch_norm()\n test_pad()\n test_mean()\n test_split()\n test_concatenate()\n test_sofmax()\n test_squeeze()\n test_strided_slice()\n test_cmp_type()\n test_binary_op()\n test_tuple_types()\n test_layout_transform()\n test_clip()\n test_expand_dims()\n test_lrn()\n test_sigmoid()\n test_copy()\n test_round()\n test_cast()\n test_resize()\n" ]
[ [ "torch.jit.load", "torch.randint", "numpy.sqrt", "torch.max", "torch.zeros", "torch.sin", "torch.neg", "torch.narrow", "torch.bitwise_xor", "torch.numel", "torch.rsqrt", "torch.acos", "torch.where", "torch.topk", "torch.log10", "torch.sqrt", "torch.randn", "torch.nn.Softsign", "torch.logical_xor", "torch.scatter", "torch.nn.SELU", "torch.rsub", "torch.masked_select", "torch.nn.GroupNorm", "numpy.zeros", "torch.ones_like", "torch.full", "torch.nn.ReplicationPad1d", "torch.min", "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d", "torch.exp", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.init.normal_", "torch.nn.InstanceNorm2d", "torch.log1p", "torch.nn.BatchNorm2d", "numpy.array", "torch.nn.InstanceNorm3d", "torch.take", "torch.nn.ReflectionPad2d", "torch.tan", "torch.nn.ConvTranspose3d", "scipy.stats.t.ppf", "torch.unbind", "torch.gather", "torch.nn.functional.avg_pool1d", "numpy.random.uniform", "torch.bincount", "torch.bitwise_not", "torch.sign", "torch.nn.Hardswish", "torch.set_grad_enabled", "torch.cuda.is_available", "torch.split", "torch.logical_and", "torch.nn.ReplicationPad2d", "torch.norm", "torch.nn.functional.max_pool3d", "torch.nn.ConstantPad2d", "torch.tensor", "torch.nn.Sigmoid", "numpy.std", "torch.nn.LogSigmoid", "torch.rand", "torch.nonzero", "torch.sort", "torch.argsort", "torch.atan", "torch.LongTensor", "torch.nn.LogSoftmax", "torch.isinf", "torch.floor", "torch.trunc", "torch.zeros_like", "torch.stack", "torch.nn.ReflectionPad1d", "torch.nn.Dropout3d", "torch.matmul", "torch.nn.Upsample", "torch.nn.ReLU", "torch.meshgrid", "torch.nn.BatchNorm3d", "torch.nn.AvgPool1d", "torch.nn.Dropout2d", "torch.cat", "torch.nn.ELU", "torch.addcdiv", "torch.nn.Embedding", "torch.unique", "numpy.mean", "torch.nn.functional.interpolate", "torch.full_like", "torch.scatter_add", "torch.nn.AvgPool3d", "torch.jit.trace", "torch.ones", "torch.add", "torch.addcmul", "torch.nn.Softplus", "torch.round", "torch.is_floating_point", "torch.nn.MaxPool1d", "torch.isfinite", "torch.nn.CELU", "torch.arange", "torch.index_select", "torch.nn.functional.pad", "torch.cos", "torch.index_put", "torch.nn.ConvTranspose2d", "torch.nn.AlphaDropout", "torch.nn.PixelShuffle", "torch.cuda.empty_cache", "torch.nn.Conv3d", "torch.log", "torch.nn.LeakyReLU", "torch.nn.Hardsigmoid", "torch.cosh", "torch.jit.save", "torch.ceil", "torch.nn.GELU", "torch.true_divide", "torch.nn.AdaptiveMaxPool3d", "torch.nn.AdaptiveAvgPool3d", "torch.cumsum", "torch.sinh", "torch.nn.functional.max_pool1d", "torch.nn.Softmax", "torch.clamp_", "torch.nn.ReplicationPad3d", "torch.nn.Transformer", "torch.masked_fill", "torch.tanh", "torch.no_grad", "torch.nn.functional.avg_pool3d", "torch.flatten", "torch.logsumexp", "torch.logical_not", "torch.jit.script", "torch.nn.Dropout", "torch.log2", "torch.asin", "torch.nn.ZeroPad2d", "torch.nn.functional.max_pool2d", "torch.nn.functional.linear", "torch.nn.Threshold", "torch.nn.ConstantPad3d", "torch.linspace", "torch.nn.NLLLoss", "torch.nn.PReLU", "torch.nn.Conv1d", "torch.nn.Hardtanh", "numpy.random.random", "torch.isnan", "torch.nn.LayerNorm", "torch.nn.MaxPool2d", "torch.nn.MaxPool3d", "torch.nn.AdaptiveAvgPool2d", "torch.erf", "torch.nn.functional.one_hot", "torch.nn.ConvTranspose1d", "torch.clamp", "torch.argmax" ], [ "numpy.abs", "numpy.isfinite", "numpy.argwhere", "numpy.all", "numpy.asanyarray", "numpy.zeros_like", "numpy.prod", "numpy.testing.assert_allclose", "numpy.unravel_index", "numpy.sum" ], [ "numpy.cos", "numpy.sort", "numpy.linalg.norm", "numpy.searchsorted", "numpy.random.uniform", "numpy.array", "numpy.zeros" ], [ "numpy.random.random_sample", "numpy.random.rand", "numpy.testing.assert_allclose", "numpy.random.uniform", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mehrad0711/bootleg
[ "f812b6200eb84b0163d353f0d4f73308a921fcfa", "f812b6200eb84b0163d353f0d4f73308a921fcfa", "f812b6200eb84b0163d353f0d4f73308a921fcfa" ]
[ "bootleg/layers/attn_networks.py", "bootleg/end2end/bootleg_annotator.py", "tutorials/downstream_tutorial/bootleg_utilities/add_bootleg_feature.py" ]
[ "\"\"\"Attention networks.\"\"\"\nimport logging\n\nimport torch\nimport torch.nn as nn\n\nimport bootleg.utils.model_utils\nfrom bootleg.layers.helper_modules import MLP, AttnBlock, NormAndSum, SelfAttnBlock\nfrom bootleg.symbols.constants import (\n BERT_WORD_DIM,\n DISAMBIG,\n KG_BIAS_LOAD_CLASS,\n MAIN_CONTEXT_MATRIX,\n)\nfrom bootleg.utils import model_utils\nfrom bootleg.utils.embedding_utils import get_max_candidates\n\nlogger = logging.getLogger(__name__)\n\n\nclass AttnNetwork(nn.Module):\n \"\"\"Base attention network.\n\n Args:\n args: args\n entity_symbols: entity symbols\n \"\"\"\n\n def __init__(self, args, entity_symbols):\n super(AttnNetwork, self).__init__()\n self.num_entities_with_pad_and_nocand = (\n entity_symbols.num_entities_with_pad_and_nocand\n )\n # Number of candidates\n self.K = get_max_candidates(entity_symbols, args.data_config)\n # Number of aliases\n self.M = args.data_config.max_aliases\n self.hidden_size = args.model_config.hidden_size\n self.num_heads = args.model_config.num_heads\n self.num_model_stages = args.model_config.num_model_stages\n assert (\n self.num_model_stages > 0\n ), f\"You must have > 0 model stages. You have {self.num_model_stages}\"\n self.num_fc_layers = args.model_config.num_fc_layers\n self.ff_inner_size = args.model_config.ff_inner_size\n\n def forward(\n self,\n sent_embedding,\n sent_embedding_mask,\n entity_embedding,\n entity_embedding_mask,\n start_span_idx,\n end_span_idx,\n batch_on_the_fly_data,\n ):\n \"\"\"Model forward.\n\n Args:\n sent_embedding: sentence embedding (B x N x L)\n sent_embedding_mask: sentence embedding mask (B x N)\n entity_embedding: entity embedding (B x M x K x H)\n entity_embedding_mask: entity embedding mask (B x M x K)\n start_span_idx: start mention index into sentence (B x M)\n end_span_idx: end mention index into sentence (B x M)\n batch_on_the_fly_data: batch on the fly dictionary with values (B x (M*K) x (M*K)) of KG adjacency matrices\n\n Returns: Dict of Dict of intermediate layer candidate scores (B x M x K),\n Dict of all output entity embeddings from each KG matrix (B x M x K x H)\n \"\"\"\n raise NotImplementedError\n\n\nclass Bootleg(AttnNetwork):\n \"\"\"Bootleg attention network V1.\n\n Args:\n args: args\n entity_symbols: entity symbols\n \"\"\"\n\n def __init__(self, args, entity_symbols):\n super(Bootleg, self).__init__(args, entity_symbols)\n self.dropout = args.train_config.dropout\n\n # For each stage, create a transformer block for phrase (entity_word) and co-occurrence (self_entity) modules\n self.attention_modules = nn.ModuleDict()\n self.combine_modules = nn.ModuleDict()\n for i in range(self.num_model_stages):\n self.attention_modules[f\"stage_{i}_entity_word\"] = AttnBlock(\n size=self.hidden_size,\n ff_inner_size=args.model_config.ff_inner_size,\n dropout=self.dropout,\n num_heads=self.num_heads,\n )\n self.attention_modules[f\"stage_{i}_self_entity\"] = SelfAttnBlock(\n size=self.hidden_size,\n ff_inner_size=args.model_config.ff_inner_size,\n dropout=self.dropout,\n num_heads=self.num_heads,\n )\n self.combine_modules[f\"stage_{i}_combine\"] = NormAndSum(self.hidden_size)\n\n # For the KG bias module\n self.kg_bias_list = []\n self.kg_bias_keys = []\n for emb in args.data_config.ent_embeddings:\n if emb.load_class == KG_BIAS_LOAD_CLASS:\n # self.kg_bias_weights[emb.key] = torch.nn.Parameter(torch.tensor(2.0))\n setattr(self, emb.key, torch.nn.Parameter(torch.tensor(2.0)))\n self.kg_bias_list.append(getattr(self, emb.key))\n self.kg_bias_keys.append(emb.key)\n self.kg_bias_keys = sorted(self.kg_bias_keys)\n # If we have kg bias terms, we want to take the average of those context matrices when generating the final\n # context matrix to be returned. The no_kg_key is used for the context matrix without kg_bias terms added.\n # If we use the key ending in _nokg, it will not be averaged in the final result. If we do not have kg bias\n # terms, we want the nokg context matrix to be the final matrix. MAIN_CONTEXT_MATRIX key allows for this.\n if len(self.kg_bias_keys) > 0:\n self.no_kg_key = \"context_matrix_nokg\"\n else:\n self.no_kg_key = MAIN_CONTEXT_MATRIX\n self.kg_softmax = nn.Softmax(dim=2)\n\n # Two things to note, the attn mask is a block diagonal matrix prevent an alias from paying attention to its\n # own K candidates in the attention layer This works because the original input is added to the output of\n # this attention, meaning an alias becomes its original embedding plus the contributions of the other\n # aliases in the sentence. Second, the attn mask is added to the attention before softmax (added to Q dot\n # V^T) -- softmax makes e^(-1e9+old_value) become zero When setting it to be -inf, you can get nans in the\n # loss if all entities end up being masked out (eg only one alias in the sentence)\n self.e2e_entity_mask = torch.zeros((self.K * self.M, self.K * self.M))\n for i in range(self.M):\n self.e2e_entity_mask[\n i * self.K : (i + 1) * self.K, i * self.K : (i + 1) * self.K\n ] = 1.0\n # Must manually move this to the device as it's not part of a module...we can probably fix this\n self.e2e_entity_mask = self.e2e_entity_mask.masked_fill(\n (self.e2e_entity_mask == 1), float(-1e9)\n )\n\n # Track attention weights\n self.attention_weights = {}\n\n # Prediction layers: each stage except the last gets a prediction layer\n # Last layer's prediction head is added in slice heads\n disambig_task = nn.ModuleDict()\n for i in range(self.num_model_stages - 1):\n disambig_task[bootleg.utils.model_utils.get_stage_head_name(i)] = MLP(\n self.hidden_size, self.hidden_size, 1, self.num_fc_layers, self.dropout\n )\n self.predict_layers = {DISAMBIG: disambig_task}\n self.predict_layers = nn.ModuleDict(self.predict_layers)\n\n def forward(\n self,\n sent_embedding,\n sent_embedding_mask,\n entity_embedding,\n entity_embedding_mask,\n start_span_idx,\n end_span_idx,\n batch_on_the_fly_data,\n ):\n \"\"\"Model forward.\n\n Args:\n sent_embedding: sentence embedding (B x N x L)\n sent_embedding_mask: sentence embedding mask (B x N)\n entity_embedding: entity embedding (B x M x K x H)\n entity_embedding_mask: entity embedding mask (B x M x K)\n start_span_idx: start mention index into sentence (B x M)\n end_span_idx: end mention index into sentence (B x M)\n batch_on_the_fly_data: batch on the fly dictionary with values (B x (M*K) x (M*K)) of KG adjacency matrices\n\n Returns: Dict of Dict of intermediate layer candidate scores (B x M x K),\n Dict of all output entity embeddings from each KG matrix (B x M x K x H)\n \"\"\"\n batch_size = sent_embedding.shape[0]\n out = {DISAMBIG: {}}\n\n # Create KG bias matrices for each kg bias key\n kg_bias_norms = {}\n for key in self.kg_bias_keys:\n bias_weight = getattr(self, key) # self.kg_bias_weights[key]\n kg_bias = (\n batch_on_the_fly_data[key]\n .float()\n .to(sent_embedding.device)\n .reshape(batch_size, self.M * self.K, self.M * self.K)\n )\n kg_bias_diag = kg_bias + bias_weight * torch.eye(self.M * self.K).repeat(\n batch_size, 1, 1\n ).view(batch_size, self.M * self.K, self.M * self.K).to(kg_bias.device)\n kg_bias_norm = self.kg_softmax(\n kg_bias_diag.masked_fill((kg_bias_diag == 0), float(-1e9))\n )\n kg_bias_norms[key] = kg_bias_norm\n sent_tensor = sent_embedding.transpose(0, 1)\n\n # Resize the alias embeddings and the entity mask from B x M x K x D -> B x (M*K) x D\n entity_embedding = entity_embedding.contiguous().view(\n batch_size, self.M * self.K, self.hidden_size\n )\n entity_embedding = entity_embedding.transpose(0, 1) # reshape for attention\n key_padding_mask_entities = entity_embedding_mask.contiguous().view(\n batch_size, self.M * self.K\n )\n\n # Iterate through stages\n query_tensor = entity_embedding\n for stage_index in range(self.num_model_stages):\n # As we are adding a residual in the attention modules, we can make embs empty\n embs = []\n context_mat_dict = {}\n # ============================================================================\n # Phrase module: compute attention between entities and words\n # ============================================================================\n word_entity_attn_context, word_entity_attn_weights = self.attention_modules[\n f\"stage_{stage_index}_entity_word\"\n ](\n q=query_tensor,\n x=sent_tensor,\n key_mask=sent_embedding_mask,\n attn_mask=None,\n )\n # Add embeddings to be merged in the output\n embs.append(word_entity_attn_context)\n # Save the attention weights\n self.attention_weights[\n f\"stage_{stage_index}_entity_word\"\n ] = word_entity_attn_weights\n\n # ============================================================================\n # Co-occurrence module: compute self attention over entities\n # ============================================================================\n # Move entity mask to device\n # TODO: move to device in init?\n self.e2e_entity_mask = self.e2e_entity_mask.to(\n key_padding_mask_entities.device\n )\n\n entity_attn_context, entity_attn_weights = self.attention_modules[\n f\"stage_{stage_index}_self_entity\"\n ](\n x=query_tensor,\n key_mask=key_padding_mask_entities,\n attn_mask=self.e2e_entity_mask,\n )\n # Mask out MxK of single aliases, alias_indices is batch x M, mask is true when single alias\n non_null_aliases = (\n self.K\n - key_padding_mask_entities.reshape(batch_size, self.M, self.K).sum(-1)\n ) != 0\n entity_attn_post_mask = (\n (non_null_aliases.sum(1) == 1)\n .unsqueeze(1)\n .expand(batch_size, self.K * self.M)\n .transpose(0, 1)\n )\n entity_attn_post_mask = entity_attn_post_mask.unsqueeze(-1).expand_as(\n entity_attn_context\n )\n entity_attn_context = torch.where(\n entity_attn_post_mask,\n torch.zeros_like(entity_attn_context),\n entity_attn_context,\n )\n\n # Add embeddings to be merged in the output\n embs.append(entity_attn_context)\n # Save the attention weights\n self.attention_weights[\n f\"stage_{stage_index}_self_entity\"\n ] = entity_attn_weights\n\n # Combine module output\n context_matrix_nokg = self.combine_modules[f\"stage_{stage_index}_combine\"](\n embs\n )\n context_mat_dict[self.no_kg_key] = context_matrix_nokg.transpose(\n 0, 1\n ).reshape(batch_size, self.M, self.K, self.hidden_size)\n # ============================================================================\n # KG module: add in KG connectivity bias\n # ============================================================================\n for key in self.kg_bias_keys:\n context_matrix_kg = torch.bmm(\n kg_bias_norms[key], context_matrix_nokg.transpose(0, 1)\n ).transpose(0, 1)\n context_matrix_kg = (context_matrix_nokg + context_matrix_kg) / 2\n context_mat_dict[f\"context_matrix_{key}\"] = context_matrix_kg.transpose(\n 0, 1\n ).reshape(batch_size, self.M, self.K, self.hidden_size)\n\n if stage_index < self.num_model_stages - 1:\n score = model_utils.max_score_context_matrix(\n context_mat_dict,\n self.predict_layers[DISAMBIG][\n bootleg.utils.model_utils.get_stage_head_name(stage_index)\n ],\n )\n out[DISAMBIG][\n f\"{bootleg.utils.model_utils.get_stage_head_name(stage_index)}\"\n ] = score\n\n # This will take the average of the context matrices that do not end in the key \"_nokg\";\n # if there are not kg bias terms, it will select the context_matrix_nokg\n # (as it's key, in this setting, will not end in _nokg)\n query_tensor = (\n model_utils.generate_final_context_matrix(\n context_mat_dict, ending_key_to_exclude=\"_nokg\"\n )\n .reshape(batch_size, self.M * self.K, self.hidden_size)\n .transpose(0, 1)\n )\n return {\n \"intermed_scores\": out,\n \"ent_embs\": context_mat_dict,\n \"final_scores\": None,\n }\n\n\nclass BootlegM2E(AttnNetwork):\n \"\"\"Bootleg attention network with a mention to entity canidate tensformer\n layer.\n\n Args:\n args: args\n entity_symbols: entity symbols\n \"\"\"\n\n def __init__(self, args, entity_symbols):\n super(BootlegM2E, self).__init__(args, entity_symbols)\n self.dropout = args.train_config.dropout\n\n # For each stage, create a transformer block for phrase (entity_word) and co-occurrence (self_entity) modules\n self.attention_modules = nn.ModuleDict()\n self.combine_modules = nn.ModuleDict()\n for i in range(self.num_model_stages):\n self.attention_modules[f\"stage_{i}_entity_word\"] = AttnBlock(\n size=self.hidden_size,\n ff_inner_size=args.model_config.ff_inner_size,\n dropout=self.dropout,\n num_heads=self.num_heads,\n )\n self.attention_modules[f\"stage_{i}_self_entity\"] = SelfAttnBlock(\n size=self.hidden_size,\n ff_inner_size=args.model_config.ff_inner_size,\n dropout=self.dropout,\n num_heads=self.num_heads,\n )\n self.attention_modules[f\"stage_{i}_mention_entity\"] = AttnBlock(\n size=self.hidden_size,\n ff_inner_size=args.model_config.ff_inner_size,\n dropout=self.dropout,\n num_heads=self.num_heads,\n )\n self.combine_modules[f\"stage_{i}_combine\"] = NormAndSum(self.hidden_size)\n\n # For the KG bias module\n # self.kg_bias_weights = nn.ParameterDict() # ParameterDicts are buggy in DataParallel\n # self.kg_bias_list = []\n self.kg_bias_keys = []\n for emb in args.data_config.ent_embeddings:\n if emb.load_class == KG_BIAS_LOAD_CLASS:\n # self.kg_bias_weights[emb.key] = torch.nn.Parameter(torch.tensor(2.0))\n # setattr(self, emb.key, torch.nn.Parameter(torch.tensor(2.0)))\n # self.kg_bias_list.append(getattr(self, emb.key))\n self.kg_bias_keys.append(emb.key)\n self.kg_bias_keys = sorted(self.kg_bias_keys)\n # If we have kg bias terms, we want to take the average of those context matrices when generating the final\n # context matrix to be returned. The no_kg_key is used for the context matrix without kg_bias terms added.\n # If we use the key ending in _nokg, it will not be averaged in the final result. If we do not have kg bias\n # terms, we want the nokg context matrix to be the final matrix. MAIN_CONTEXT_MATRIX key allows for this.\n if len(self.kg_bias_keys) > 0:\n self.no_kg_key = \"context_matrix_nokg\"\n else:\n self.no_kg_key = MAIN_CONTEXT_MATRIX\n self.kg_softmax = nn.Softmax(dim=2)\n\n # Two things to note, the attn mask is a block diagonal matrix prevent an alias from paying attention to its\n # own K candidates in the attention layer This works because the original input is added to the output of\n # this attention, meaning an alias becomes its original embedding plus the contributions of the other\n # aliases in the sentence. Second, the attn mask is added to the attention before softmax (added to Q dot\n # V^T) -- softmax makes e^(-1e9+old_value) become zero When setting it to be -inf, you can get nans in the\n # loss if all entities end up being masked out (eg only one alias in the sentence)\n self.e2e_entity_mask = torch.zeros((self.K * self.M, self.K * self.M))\n for i in range(self.M):\n self.e2e_entity_mask[\n i * self.K : (i + 1) * self.K, i * self.K : (i + 1) * self.K\n ] = 1.0\n # Must manually move this to the device as it's not part of a module...we can probably fix this\n self.e2e_entity_mask = self.e2e_entity_mask.masked_fill(\n (self.e2e_entity_mask == 1), float(-1e9)\n )\n\n # Track attention weights\n self.attention_weights = {}\n\n # Prediction layers: each stage except the last gets a prediction layer\n # Last layer's prediction head is added in slice heads\n disambig_task = nn.ModuleDict()\n for i in range(self.num_model_stages - 1):\n disambig_task[bootleg.utils.model_utils.get_stage_head_name(i)] = MLP(\n self.hidden_size, self.hidden_size, 1, self.num_fc_layers, self.dropout\n )\n self.predict_layers = {DISAMBIG: disambig_task}\n self.predict_layers = nn.ModuleDict(self.predict_layers)\n\n def forward(\n self,\n sent_embedding,\n sent_embedding_mask,\n entity_embedding,\n entity_embedding_mask,\n start_span_idx,\n end_span_idx,\n batch_on_the_fly_data,\n ):\n \"\"\"Model forward.\n\n Args:\n sent_embedding: sentence embedding (B x N x L)\n sent_embedding_mask: sentence embedding mask (B x N)\n entity_embedding: entity embedding (B x M x K x H)\n entity_embedding_mask: entity embedding mask (B x M x K)\n start_span_idx: start mention index into sentence (B x M)\n end_span_idx: end mention index into sentence (B x M)\n batch_on_the_fly_data: batch on the fly dictionary with values (B x (M*K) x (M*K)) of KG adjacency matrices\n\n Returns: Dict of Dict of intermediate layer candidate scores (B x M x K),\n Dict of all output entity embeddings from each KG matrix (B x M x K x H)\n \"\"\"\n batch_size = sent_embedding.shape[0]\n out = {DISAMBIG: {}}\n\n # Create KG bias matrices for each kg bias key\n kg_bias_norms = {}\n for key in self.kg_bias_keys:\n kg_bias_norms[key] = (\n batch_on_the_fly_data[key]\n .float()\n .reshape(batch_size, self.M * self.K, self.M * self.K)\n )\n\n # get mention embedding\n # average words in mention; batch x M x dim\n mention_tensor_start = model_utils.select_alias_word_sent(\n start_span_idx, sent_embedding\n )\n mention_tensor_end = model_utils.select_alias_word_sent(\n end_span_idx, sent_embedding\n )\n mention_tensor = (mention_tensor_start + mention_tensor_end) / 2\n\n # reshape for alias attention where each mention attends to its K candidates\n # query = batch*M x 1 x dim, key = value = batch*M x K x dim\n # softmax(QK^T) -> batch*M x 1 x K\n # softmax(QK^T)V -> batch*M x 1 x dim\n mention_tensor = mention_tensor.reshape(\n batch_size * self.M, 1, self.hidden_size\n ).transpose(0, 1)\n\n # get sentence embedding; move batch to middle\n sent_tensor = sent_embedding.transpose(0, 1)\n\n # Resize the alias embeddings and the entity mask from B x M x K x D -> B x (M*K) x D\n entity_embedding = entity_embedding.contiguous().view(\n batch_size, self.M * self.K, self.hidden_size\n )\n entity_embedding = entity_embedding.transpose(0, 1) # reshape for attention\n key_padding_mask_entities = entity_embedding_mask.contiguous().view(\n batch_size, self.M * self.K\n )\n key_padding_mask_entities_mention = entity_embedding_mask.contiguous().view(\n batch_size * self.M, self.K\n )\n # Mask of aliases; key_padding_mask_entities_mention of True means mask.\n # We want to find aliases with all masked entities\n key_padding_mask_mentions = (\n torch.sum(~key_padding_mask_entities_mention, dim=-1) == 0\n )\n # Unmask these aliases to avoid nan in attention\n key_padding_mask_entities_mention[key_padding_mask_mentions] = False\n # Iterate through stages\n query_tensor = entity_embedding\n for stage_index in range(self.num_model_stages):\n # As we are adding a residual in the attention modules, we can make embs empty\n embs = []\n context_mat_dict = {}\n key_tensor_mention = (\n query_tensor.transpose(0, 1)\n .contiguous()\n .reshape(batch_size, self.M, self.K, self.hidden_size)\n .reshape(batch_size * self.M, self.K, self.hidden_size)\n .transpose(0, 1)\n )\n # ============================================================================\n # Phrase module: compute attention between entities and words\n # ============================================================================\n word_entity_attn_context, word_entity_attn_weights = self.attention_modules[\n f\"stage_{stage_index}_entity_word\"\n ](\n q=query_tensor,\n x=sent_tensor,\n key_mask=sent_embedding_mask,\n attn_mask=None,\n )\n # Add embeddings to be merged in the output\n embs.append(word_entity_attn_context)\n # Save the attention weights\n self.attention_weights[\n f\"stage_{stage_index}_entity_word\"\n ] = word_entity_attn_weights\n\n # ============================================================================\n # Co-occurrence module: compute self attention over entities\n # ============================================================================\n # Move entity mask to device\n # TODO: move to device in init?\n self.e2e_entity_mask = self.e2e_entity_mask.to(\n key_padding_mask_entities.device\n )\n\n entity_attn_context, entity_attn_weights = self.attention_modules[\n f\"stage_{stage_index}_self_entity\"\n ](\n x=query_tensor,\n key_mask=key_padding_mask_entities,\n attn_mask=self.e2e_entity_mask,\n )\n # Mask out MxK of single aliases, alias_indices is batch x M, mask is true when single alias\n non_null_aliases = (\n self.K\n - key_padding_mask_entities.reshape(batch_size, self.M, self.K).sum(-1)\n ) != 0\n entity_attn_post_mask = (\n (non_null_aliases.sum(1) == 1)\n .unsqueeze(1)\n .expand(batch_size, self.K * self.M)\n .transpose(0, 1)\n )\n entity_attn_post_mask = entity_attn_post_mask.unsqueeze(-1).expand_as(\n entity_attn_context\n )\n entity_attn_context = torch.where(\n entity_attn_post_mask,\n torch.zeros_like(entity_attn_context),\n entity_attn_context,\n )\n\n # Add embeddings to be merged in the output\n embs.append(entity_attn_context)\n # Save the attention weights\n self.attention_weights[\n f\"stage_{stage_index}_self_entity\"\n ] = entity_attn_weights\n\n # ============================================================================\n # Mention module: compute attention between entities and mentions\n # ============================================================================\n # output is 1 x batch*M x dim\n (\n mention_entity_attn_context,\n mention_entity_attn_weights,\n ) = self.attention_modules[f\"stage_{stage_index}_mention_entity\"](\n q=mention_tensor,\n x=key_tensor_mention,\n key_mask=key_padding_mask_entities_mention,\n attn_mask=None,\n )\n # key_padding_mask_mentions mentions have all padded candidates,\n # meaning their row in the context matrix are all nan\n mention_entity_attn_context[key_padding_mask_mentions.unsqueeze(0)] = 0\n mention_entity_attn_context = (\n mention_entity_attn_context.expand(\n self.K, batch_size * self.M, self.hidden_size\n )\n .transpose(0, 1)\n .reshape(batch_size, self.M * self.K, self.hidden_size)\n .transpose(0, 1)\n )\n # Add embeddings to be merged in the output\n embs.append(mention_entity_attn_context)\n # Save the attention weights\n self.attention_weights[\n f\"stage_{stage_index}_mention_entity\"\n ] = mention_entity_attn_weights\n\n # Combine module output\n context_matrix_nokg = self.combine_modules[f\"stage_{stage_index}_combine\"](\n embs\n )\n context_mat_dict[self.no_kg_key] = context_matrix_nokg.transpose(\n 0, 1\n ).reshape(batch_size, self.M, self.K, self.hidden_size)\n # ============================================================================\n # KG module: add in KG connectivity bias\n # ============================================================================\n for key in self.kg_bias_keys:\n context_matrix_kg = torch.bmm(\n kg_bias_norms[key], context_matrix_nokg.transpose(0, 1)\n ).transpose(0, 1)\n context_matrix_kg = (context_matrix_nokg + context_matrix_kg) / 2\n context_mat_dict[f\"context_matrix_{key}\"] = context_matrix_kg.transpose(\n 0, 1\n ).reshape(batch_size, self.M, self.K, self.hidden_size)\n\n if stage_index < self.num_model_stages - 1:\n score = model_utils.max_score_context_matrix(\n context_mat_dict,\n self.predict_layers[DISAMBIG][\n bootleg.utils.model_utils.get_stage_head_name(stage_index)\n ],\n )\n out[DISAMBIG][\n f\"{bootleg.utils.model_utils.get_stage_head_name(stage_index)}\"\n ] = score\n\n # This will take the average of the context matrices that do not end in the key \"_nokg\";\n # if there are not kg bias terms, it will select the context_matrix_nokg\n # (as it's key, in this setting, will not end in _nokg)\n query_tensor = (\n model_utils.generate_final_context_matrix(\n context_mat_dict, ending_key_to_exclude=\"_nokg\"\n )\n .reshape(batch_size, self.M * self.K, self.hidden_size)\n .transpose(0, 1)\n )\n return {\n \"intermed_scores\": out,\n \"ent_embs\": context_mat_dict,\n \"final_scores\": None,\n }\n\n\nclass BERTNED(AttnNetwork):\n \"\"\"NED Baseline model using BERT.\n\n Args:\n args: args\n entity_symbols: entity symbols\n \"\"\"\n\n def __init__(self, args, entity_symbols):\n super(BERTNED, self).__init__(args, entity_symbols)\n self.dropout = args.train_config.dropout\n self.span_proj = MLP(\n input_size=2 * BERT_WORD_DIM,\n num_hidden_units=None,\n output_size=self.hidden_size,\n num_layers=1,\n )\n # Prediction layers\n disambig_task = nn.ModuleDict()\n disambig_task[\"final\"] = MLP(\n self.hidden_size, self.hidden_size, 1, self.num_fc_layers, self.dropout\n )\n self.predict_layers = {DISAMBIG: disambig_task}\n self.predict_layers = nn.ModuleDict(self.predict_layers)\n\n def forward(\n self,\n sent_embedding,\n sent_embedding_mask,\n entity_embedding,\n entity_embedding_mask,\n start_span_idx,\n end_span_idx,\n batch_on_the_fly_data,\n ):\n \"\"\"Model forward.\n\n Args:\n sent_embedding: sentence embedding (B x N x L)\n sent_embedding_mask: sentence embedding mask (B x N)\n entity_embedding: entity embedding (B x M x K x H)\n entity_embedding_mask: entity embedding mask (B x M x K)\n start_span_idx: start mention index into sentence (B x M)\n end_span_idx: end mention index into sentence (B x M)\n batch_on_the_fly_data: batch on the fly dictionary with values (B x (M*K) x (M*K)) of KG adjacency matrices\n\n Returns: Dict of Dict of intermediate output layer scores (will be empty for this model),\n Output entity embeddings (B x M x K x H),\n Candidate scores (B x M x K)\n \"\"\"\n out = {DISAMBIG: {}}\n context_mat_dict = {}\n\n batch_size, M, K, emb_dim = entity_embedding.shape\n alias_start_idx_sent = start_span_idx\n alias_end_idx_sent = end_span_idx\n assert (\n emb_dim == self.hidden_size\n ), f\"BERT NED requires the learned entity embedding dim be the same as the hidden size\"\n assert alias_start_idx_sent.shape == alias_end_idx_sent.shape\n\n # Get alias words from sent embedding then cat and proj\n alias_start_word_tensor = model_utils.select_alias_word_sent(\n alias_start_idx_sent, sent_embedding\n )\n alias_end_word_tensor = model_utils.select_alias_word_sent(\n alias_end_idx_sent, sent_embedding\n )\n alias_pair_word_tensor = torch.cat(\n [alias_start_word_tensor, alias_end_word_tensor], dim=-1\n )\n alias_emb = (\n self.span_proj(alias_pair_word_tensor)\n .unsqueeze(2)\n .expand(batch_size, M, self.K, self.hidden_size)\n )\n alias_emb = (\n alias_emb.contiguous()\n .reshape((batch_size * M * self.K), self.hidden_size)\n .unsqueeze(1)\n )\n\n # entity_embedding_mask: if I don't have 30 candidates, use a mask to fill the rest of the\n # matrix for empty candidates\n entity_embedding_zeroed = torch.where(\n entity_embedding_mask.unsqueeze(-1),\n torch.zeros_like(entity_embedding),\n entity_embedding,\n )\n entity_embedding_tensor = (\n entity_embedding_zeroed.contiguous()\n .reshape((batch_size * M * self.K), self.hidden_size)\n .unsqueeze(-1)\n )\n\n # Performs batch wise dot produce across each dim=0 dimension\n score = (\n torch.bmm(alias_emb, entity_embedding_tensor)\n .unsqueeze(-1)\n .reshape(batch_size, M, self.K)\n )\n context_mat_dict[DISAMBIG] = entity_embedding_tensor.reshape(\n batch_size, M, self.K, self.hidden_size\n )\n return {\n \"intermed_scores\": out,\n \"ent_embs\": context_mat_dict,\n \"final_scores\": score,\n }\n", "\"\"\"BootlegAnnotator.\"\"\"\nimport logging\nimport os\nimport tarfile\nimport urllib\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport ujson\nfrom tqdm import tqdm\nfrom transformers import BertTokenizer\n\nimport emmental\nfrom bootleg.data import get_dataloader_embeddings\nfrom bootleg.end2end.annotator_utils import DownloadProgressBar\nfrom bootleg.end2end.extract_mentions import (\n find_aliases_in_sentence_tag,\n get_all_aliases,\n)\nfrom bootleg.symbols.constants import PAD_ID, PRED_LAYER\nfrom bootleg.symbols.entity_symbols import EntitySymbols\nfrom bootleg.task_config import NED_TASK, NED_TASK_TO_LABEL, TYPE_PRED_TASK\nfrom bootleg.tasks import ned_task, type_pred_task\nfrom bootleg.utils import sentence_utils\nfrom bootleg.utils.embedding_utils import get_max_candidates\nfrom bootleg.utils.parser.parser_utils import parse_boot_and_emm_args\nfrom bootleg.utils.utils import load_yaml_file\nfrom emmental.model import EmmentalModel\n\nlogger = logging.getLogger(__name__)\n\nBOOTLEG_MODEL_PATHS = {\n \"bootleg_cased\": \"https://bootleg-data.s3-us-west-2.amazonaws.com/models/latest/bootleg_cased.tar.gz\",\n \"bootleg_cased_mini\": \"https://bootleg-data.s3-us-west-2.amazonaws.com/models/latest/bootleg_cased_mini.tar.gz\",\n \"bootleg_uncased\": \"https://bootleg-data.s3-us-west-2.amazonaws.com/models/latest/bootleg_uncased.tar.gz\",\n \"bootleg_uncased_mini\": \"https://bootleg-data.s3-us-west-2.amazonaws.com/models/latest/bootleg_uncased_mini.tar.gz\",\n}\n\n\ndef get_default_cache():\n \"\"\"Gets default cache directory for saving Bootleg data.\n\n Returns:\n \"\"\"\n try:\n from torch.hub import _get_torch_home\n\n torch_cache_home = _get_torch_home()\n except ImportError:\n torch_cache_home = os.path.expanduser(\n os.getenv(\n \"TORCH_HOME\",\n os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"torch\"),\n )\n )\n return Path(torch_cache_home) / \"bootleg\"\n\n\ndef create_config(model_path, data_path, model_name):\n \"\"\"Creates Bootleg config.\n\n Args:\n model_path: model directory\n data_path: data directory\n model_name: model name\n\n Returns: updated config\n \"\"\"\n config_file = model_path / model_name / \"bootleg_config.yaml\"\n config_args = load_yaml_file(config_file)\n\n # set the model checkpoint path\n config_args[\"emmental\"][\"model_path\"] = str(\n model_path / model_name / \"bootleg_wiki.pth\"\n )\n\n # set the path for the entity db and candidate map\n config_args[\"data_config\"][\"entity_dir\"] = str(data_path / \"entity_db\")\n config_args[\"data_config\"][\"alias_cand_map\"] = \"alias2qids.json\"\n\n # set the embedding paths\n config_args[\"data_config\"][\"emb_dir\"] = str(data_path / \"entity_db\")\n config_args[\"data_config\"][\"word_embedding\"][\"cache_dir\"] = str(\n data_path / \"pretrained_bert_models\"\n )\n\n # set log path\n config_args[\"emmental\"][\"log_path\"] = str(data_path / \"log_dir\")\n\n config_args = parse_boot_and_emm_args(config_args)\n return config_args\n\n\ndef create_sources(model_path, data_path, model_name):\n \"\"\"Downloads Bootleg data and saves in log dir.\n\n Args:\n model_path: model directory\n data_path: data directory\n model_name: model name to download\n\n Returns:\n \"\"\"\n download_path = BOOTLEG_MODEL_PATHS[model_name]\n if not (model_path / model_name).exists():\n print(\n f\"{model_path / model_name} not found. Downloading from {download_path}..\"\n )\n urllib.request.urlretrieve(\n download_path,\n filename=str(model_path / f\"{model_name}.tar.gz\"),\n reporthook=DownloadProgressBar(),\n )\n print(f\"Downloaded. Decompressing...\")\n tar = tarfile.open(str(model_path / f\"{model_name}.tar.gz\"), \"r:gz\")\n tar.extractall(model_path)\n tar.close()\n\n if not (data_path / \"entity_db\").exists():\n print(f\"{data_path / 'entity_db'} not found. Downloading..\")\n urllib.request.urlretrieve(\n \"https://bootleg-data.s3-us-west-2.amazonaws.com/data/latest/entity_db.tar.gz\",\n filename=str(data_path / \"entity_db.tar.gz\"),\n reporthook=DownloadProgressBar(),\n )\n print(f\"Downloaded. Decompressing...\")\n tar = tarfile.open(str(data_path / \"entity_db.tar.gz\"), \"r:gz\")\n tar.extractall(data_path)\n tar.close()\n\n\nclass BootlegAnnotator(object):\n \"\"\"BootlegAnnotator class: convenient wrapper of preprocessing and model\n eval to allow for annotating single sentences at a time for quick\n experimentation, e.g. in notebooks.\n\n Args:\n config: model config (default None)\n device: model device, -1 for CPU (default None)\n min_alias_len: minimum alias length (default 1)\n max_alias_len: maximum alias length (default 6)\n cand_map: alias candidate map (default None)\n threshold: probability threshold (default 0.0)\n cache_dir: cache directory (default None)\n model_name: model name (default None)\n return_embs: whether to return embeddings or not (default False)\n verbose: verbose boolean (default False)\n \"\"\"\n\n def __init__(\n self,\n config=None,\n device=None,\n min_alias_len=1,\n max_alias_len=6,\n cand_map=None,\n threshold=0.0,\n cache_dir=None,\n model_name=None,\n return_embs=False,\n verbose=False,\n ):\n self.min_alias_len = min_alias_len\n self.max_alias_len = max_alias_len\n self.verbose = verbose\n self.threshold = threshold\n self.return_embs = return_embs\n\n if not cache_dir:\n self.cache_dir = get_default_cache()\n self.model_path = self.cache_dir / \"models\"\n self.data_path = self.cache_dir / \"data\"\n else:\n self.cache_dir = Path(cache_dir)\n self.model_path = self.cache_dir / \"models\"\n self.data_path = self.cache_dir / \"data\"\n\n if not model_name:\n model_name = \"bootleg_uncased\"\n\n assert model_name in {\n \"bootleg_cased\",\n \"bootleg_cased_mini\",\n \"bootleg_uncased\",\n \"bootleg_uncased_mini\",\n }, (\n f\"model_name must be one of [bootleg_cased, bootleg_cased_mini, \"\n f\"bootleg_uncased_mini, bootleg_uncased]. You have {model_name}.\"\n )\n\n if not config:\n self.cache_dir.mkdir(parents=True, exist_ok=True)\n self.model_path.mkdir(parents=True, exist_ok=True)\n self.data_path.mkdir(parents=True, exist_ok=True)\n create_sources(self.model_path, self.data_path, model_name)\n self.config = create_config(self.model_path, self.data_path, model_name)\n else:\n if \"emmental\" in config:\n config = parse_boot_and_emm_args(config)\n self.config = config\n # Ensure some of the critical annotator args are the correct type\n self.config.data_config.max_aliases = int(\n self.config.data_config.max_aliases\n )\n self.config.run_config.eval_batch_size = int(\n self.config.run_config.eval_batch_size\n )\n self.config.data_config.max_seq_len = int(\n self.config.data_config.max_seq_len\n )\n self.config.data_config.train_in_candidates = bool(\n self.config.data_config.train_in_candidates\n )\n\n if not device:\n device = 0 if torch.cuda.is_available() else -1\n\n if self.verbose:\n self.config[\"run_config\"][\"log_level\"] = \"DEBUG\"\n else:\n self.config[\"run_config\"][\"log_level\"] = \"INFO\"\n\n self.torch_device = (\n torch.device(device) if device != -1 else torch.device(\"cpu\")\n )\n self.config.model_config.device = device\n\n log_level = logging.getLevelName(self.config[\"run_config\"][\"log_level\"].upper())\n emmental.init(\n log_dir=self.config[\"meta_config\"][\"log_path\"],\n config=self.config,\n use_exact_log_path=self.config[\"meta_config\"][\"use_exact_log_path\"],\n level=log_level,\n )\n\n logger.debug(\"Reading entity database\")\n self.entity_db = EntitySymbols.load_from_cache(\n os.path.join(\n self.config.data_config.entity_dir,\n self.config.data_config.entity_map_dir,\n ),\n alias_cand_map_file=self.config.data_config.alias_cand_map,\n alias_idx_file=self.config.data_config.alias_idx_map,\n )\n logger.debug(\"Reading word tokenizers\")\n self.tokenizer = BertTokenizer.from_pretrained(\n self.config.data_config.word_embedding.bert_model,\n do_lower_case=True\n if \"uncased\" in self.config.data_config.word_embedding.bert_model\n else False,\n cache_dir=self.config.data_config.word_embedding.cache_dir,\n )\n\n # Create tasks\n tasks = [NED_TASK]\n if self.config.data_config.type_prediction.use_type_pred is True:\n tasks.append(TYPE_PRED_TASK)\n self.task_to_label_dict = {t: NED_TASK_TO_LABEL[t] for t in tasks}\n\n # Create tasks\n self.model = EmmentalModel(name=\"Bootleg\")\n task_to_add = ned_task.create_task(self.config, self.entity_db)\n # As we manually keep track of the aliases for scoring, we only need the embeddings as action outputs\n task_to_add.action_outputs = [(PRED_LAYER, \"ent_embs\")]\n self.model.add_task(task_to_add)\n if TYPE_PRED_TASK in tasks:\n self.model.add_task(type_pred_task.create_task(self.config, self.entity_db))\n # Add the mention type embedding to the embedding payload\n type_pred_task.update_ned_task(self.model)\n\n logger.debug(\"Loading model\")\n # Load the best model from the pretrained model\n assert (\n self.config[\"model_config\"][\"model_path\"] is not None\n ), f\"Must have a model to load in the model_path for the BootlegAnnotator\"\n self.model.load(self.config[\"model_config\"][\"model_path\"])\n self.model.eval()\n if cand_map is None:\n alias_map = self.entity_db.get_alias2qids()\n else:\n logger.debug(f\"Loading candidate map\")\n alias_map = ujson.load(open(cand_map))\n\n self.all_aliases_trie = get_all_aliases(alias_map, verbose)\n\n # get batch_on_the_fly embeddings\n self.batch_on_the_fly_embs = get_dataloader_embeddings(\n self.config, self.entity_db\n )\n\n def extract_mentions(self, text, label_func):\n \"\"\"Wrapper function for mention extraction.\n\n Args:\n text: text to extract mentions from\n label_func: function that performs extraction (input is (text, alias trie, max alias length) ->\n output is list of found aliases and found spans\n\n Returns: JSON object of sentence to be used in eval\n \"\"\"\n found_aliases, found_spans = label_func(\n text, self.all_aliases_trie, self.min_alias_len, self.max_alias_len\n )\n return {\n \"sentence\": text,\n \"aliases\": found_aliases,\n \"spans\": found_spans,\n \"cands\": [self.entity_db.get_qid_cands(al) for al in found_aliases],\n # we don't know the true QID\n \"qids\": [\"Q-1\" for i in range(len(found_aliases))],\n \"gold\": [True for i in range(len(found_aliases))],\n }\n\n def set_threshold(self, value):\n \"\"\"Sets threshold.\n\n Args:\n value: threshold value\n\n Returns:\n \"\"\"\n self.threshold = value\n\n def label_mentions(\n self,\n text_list=None,\n label_func=find_aliases_in_sentence_tag,\n extracted_examples=None,\n ):\n \"\"\"Extracts mentions and runs disambiguation. If user provides extracted_examples, we will ignore text_list\n\n Args:\n text_list: list of text to disambiguate (or single string) (can be None if extracted_examples is not None)\n label_func: mention extraction funciton (optional)\n extracted_examples: List of Dicts of keys \"sentence\", \"aliases\", \"spans\", \"cands\" (QIDs) (optional)\n\n Returns: Dict of\n\n * ``qids``: final predicted QIDs,\n * ``probs``: final predicted probs,\n * ``titles``: final predicted titles,\n * ``cands``: all entity candidates,\n * ``cand_probs``: probabilities of all candidates,\n * ``spans``: final extracted word spans,\n * ``aliases``: final extracted aliases,\n * ``embs``: final entity contextualized embeddings (if return_embs is True)\n * ``cand_embs``: final candidate entity contextualized embeddings (if return_embs is True)\n \"\"\"\n # Check inputs are sane\n do_extract_mentions = True\n if extracted_examples is not None:\n do_extract_mentions = False\n assert (\n type(extracted_examples) is list\n ), f\"Must provide a list of Dics for extracted_examples\"\n check_ex = extracted_examples[0]\n assert (\n len(\n {\"sentence\", \"aliases\", \"spans\", \"cands\"}.intersection(\n check_ex.keys()\n )\n )\n == 4\n ), (\n f\"You must have keys of sentence, aliases, spans, and cands for extracted_examples. You have\"\n f\"{extracted_examples.keys()}\"\n )\n else:\n assert (\n text_list is not None\n ), f\"If you do not provide extracted_examples you must provide text_list\"\n\n if text_list is None:\n assert extracted_examples is not None, (\n f\"If you do not provide text_list \"\n f\"you must provide extracted_exampels\"\n )\n else:\n if type(text_list) is str:\n text_list = [text_list]\n else:\n assert (\n type(text_list) is list\n and len(text_list) > 0\n and type(text_list[0]) is str\n ), f\"We only accept inputs of strings and lists of strings\"\n\n # Get number of examples\n if extracted_examples is not None:\n num_exs = len(extracted_examples)\n else:\n num_exs = len(text_list)\n\n ebs = int(self.config.run_config.eval_batch_size)\n self.config.data_config.max_aliases = int(self.config.data_config.max_aliases)\n total_start_exs = 0\n total_final_exs = 0\n dropped_by_thresh = 0\n\n final_char_spans = []\n\n batch_example_qid_cands = []\n batch_example_eid_cands = []\n batch_example_aliases_locs_start = []\n batch_example_aliases_locs_end = []\n batch_example_alias_list_pos = []\n batch_example_true_entities = []\n batch_word_indices = []\n batch_spans_arr = []\n batch_example_aliases = []\n batch_idx_unq = []\n batch_subsplit_idx = []\n for idx_unq in tqdm(\n range(num_exs),\n desc=\"Prepping data\",\n total=num_exs,\n disable=not self.verbose,\n ):\n if do_extract_mentions:\n sample = self.extract_mentions(text_list[idx_unq], label_func)\n else:\n sample = extracted_examples[idx_unq]\n # Add the unk qids and gold values\n sample[\"qids\"] = [\"Q-1\" for _ in range(len(sample[\"aliases\"]))]\n sample[\"gold\"] = [True for _ in range(len(sample[\"aliases\"]))]\n total_start_exs += len(sample[\"aliases\"])\n char_spans = self.get_char_spans(sample[\"spans\"], sample[\"sentence\"])\n\n final_char_spans.append(char_spans)\n\n (\n idxs_arr,\n aliases_to_predict_per_split,\n spans_arr,\n phrase_tokens_arr,\n pos_idxs,\n ) = sentence_utils.split_sentence(\n max_aliases=self.config.data_config.max_aliases,\n phrase=sample[\"sentence\"],\n spans=sample[\"spans\"],\n aliases=sample[\"aliases\"],\n aliases_seen_by_model=list(range(len(sample[\"aliases\"]))),\n seq_len=self.config.data_config.max_seq_len,\n is_bert=True,\n tokenizer=self.tokenizer,\n )\n aliases_arr = [\n [sample[\"aliases\"][idx] for idx in idxs] for idxs in idxs_arr\n ]\n old_spans_arr = [\n [sample[\"spans\"][idx] for idx in idxs] for idxs in idxs_arr\n ]\n qids_arr = [[sample[\"qids\"][idx] for idx in idxs] for idxs in idxs_arr]\n word_indices_arr = [\n self.tokenizer.convert_tokens_to_ids(pt) for pt in phrase_tokens_arr\n ]\n # iterate over each sample in the split\n for sub_idx in range(len(idxs_arr)):\n # ====================================================\n # GENERATE MODEL INPUTS\n # ====================================================\n aliases_to_predict_arr = aliases_to_predict_per_split[sub_idx]\n\n assert (\n len(aliases_to_predict_arr) >= 0\n ), f\"There are no aliases to predict for an example. This should not happen at this point.\"\n assert (\n len(aliases_arr[sub_idx]) <= self.config.data_config.max_aliases\n ), f\"{sample} should have no more than {self.config.data_config.max_aliases} aliases.\"\n\n example_aliases_locs_start = (\n np.ones(self.config.data_config.max_aliases) * PAD_ID\n )\n example_aliases_locs_end = (\n np.ones(self.config.data_config.max_aliases) * PAD_ID\n )\n example_alias_list_pos = (\n np.ones(self.config.data_config.max_aliases) * PAD_ID\n )\n example_true_entities = (\n np.ones(self.config.data_config.max_aliases) * PAD_ID\n )\n example_qid_cands = [\n [\n \"-1\"\n for _ in range(\n get_max_candidates(self.entity_db, self.config.data_config)\n )\n ]\n for _ in range(self.config.data_config.max_aliases)\n ]\n example_eid_cands = [\n [\n -1\n for _ in range(\n get_max_candidates(self.entity_db, self.config.data_config)\n )\n ]\n for _ in range(self.config.data_config.max_aliases)\n ]\n for mention_idx, alias in enumerate(aliases_arr[sub_idx]):\n span_start_idx, span_end_idx = spans_arr[sub_idx][mention_idx]\n # generate indexes into alias table.\n alias_qids = np.array(sample[\"cands\"][mention_idx])\n # first entry is the non candidate class (NC and eid 0) - used when train in cands is false\n # if we train in candidates, this gets overwritten\n example_qid_cands[mention_idx][0] = \"NC\"\n example_qid_cands[mention_idx][\n (not self.config.data_config.train_in_candidates) : len(\n alias_qids\n )\n + (not self.config.data_config.train_in_candidates)\n ] = sample[\"cands\"][mention_idx]\n example_eid_cands[mention_idx][0] = 0\n example_eid_cands[mention_idx][\n (not self.config.data_config.train_in_candidates) : len(\n alias_qids\n )\n + (not self.config.data_config.train_in_candidates)\n ] = [\n self.entity_db.get_eid(q) for q in sample[\"cands\"][mention_idx]\n ]\n if not qids_arr[sub_idx][mention_idx] in alias_qids:\n # assert not data_args.train_in_candidates\n if not self.config.data_config.train_in_candidates:\n # set class label to be \"not in candidate set\"\n true_entity_idx = 0\n else:\n true_entity_idx = -2\n else:\n # Here we are getting the correct class label for training.\n # Our training is \"which of the max_entities entity candidates is the right one\n # (class labels 1 to max_entities) or is it none of these (class label 0)\".\n # + (not discard_noncandidate_entities) is to ensure label 0 is\n # reserved for \"not in candidate set\" class\n true_entity_idx = np.nonzero(\n alias_qids == qids_arr[sub_idx][mention_idx]\n )[0][0] + (not self.config.data_config.train_in_candidates)\n example_aliases_locs_start[mention_idx] = span_start_idx\n # The span_idxs are [start, end). We want [start, end]. So subtract 1 from end idx.\n example_aliases_locs_end[mention_idx] = span_end_idx - 1\n example_alias_list_pos[mention_idx] = idxs_arr[sub_idx][mention_idx]\n # leave as -1 if it's not an alias we want to predict; we get these if we split a sentence\n # and need to only predict subsets\n if mention_idx in aliases_to_predict_arr:\n example_true_entities[mention_idx] = true_entity_idx\n\n # get word indices\n word_indices = word_indices_arr[sub_idx]\n\n batch_example_qid_cands.append(example_qid_cands)\n batch_example_eid_cands.append(example_eid_cands)\n batch_example_aliases_locs_start.append(example_aliases_locs_start)\n batch_example_aliases_locs_end.append(example_aliases_locs_end)\n batch_example_alias_list_pos.append(example_alias_list_pos)\n batch_example_true_entities.append(example_true_entities)\n batch_word_indices.append(word_indices)\n batch_example_aliases.append(aliases_arr[sub_idx])\n # Add the orginal sample spans because spans_arr is w.r.t BERT subword token\n batch_spans_arr.append(old_spans_arr[sub_idx])\n batch_idx_unq.append(idx_unq)\n batch_subsplit_idx.append(sub_idx)\n\n batch_example_eid_cands = torch.tensor(batch_example_eid_cands).long()\n batch_example_aliases_locs_start = torch.tensor(\n batch_example_aliases_locs_start\n )\n batch_example_aliases_locs_end = torch.tensor(batch_example_aliases_locs_end)\n batch_example_true_entities = torch.tensor(batch_example_true_entities)\n batch_word_indices = torch.tensor(batch_word_indices)\n\n final_pred_cands = [[] for _ in range(num_exs)]\n final_all_cands = [[] for _ in range(num_exs)]\n final_cand_probs = [[] for _ in range(num_exs)]\n final_pred_probs = [[] for _ in range(num_exs)]\n final_entity_embs = [[] for _ in range(num_exs)]\n final_entity_cand_embs = [[] for _ in range(num_exs)]\n final_titles = [[] for _ in range(num_exs)]\n final_spans = [[] for _ in range(num_exs)]\n final_aliases = [[] for _ in range(num_exs)]\n for b_i in tqdm(\n range(0, batch_word_indices.shape[0], ebs),\n desc=\"Evaluating model\",\n disable=not self.verbose,\n ):\n start_span_idx = batch_example_aliases_locs_start[b_i : b_i + ebs]\n end_span_idx = batch_example_aliases_locs_end[b_i : b_i + ebs]\n word_indices = batch_word_indices[b_i : b_i + ebs]\n eid_cands = batch_example_eid_cands[b_i : b_i + ebs]\n x_dict = self.get_forward_batch(\n start_span_idx, end_span_idx, word_indices, eid_cands\n )\n x_dict[\"guid\"] = torch.arange(b_i, b_i + ebs, device=self.torch_device)\n\n with torch.no_grad():\n res = self.model( # type: ignore\n uids=x_dict[\"guid\"],\n X_dict=x_dict,\n Y_dict=None,\n task_to_label_dict=self.task_to_label_dict,\n return_action_outputs=self.return_embs,\n )\n del x_dict\n if self.return_embs:\n (uid_bdict, _, prob_bdict, _, out_bdict) = res\n output_embs = out_bdict[NED_TASK][f\"{PRED_LAYER}_ent_embs\"]\n else:\n output_embs = None\n (uid_bdict, _, prob_bdict, _) = res\n # ====================================================\n # EVALUATE MODEL OUTPUTS\n # ====================================================\n # recover predictions\n probs = prob_bdict[NED_TASK]\n max_probs = probs.max(2)\n max_probs_indices = probs.argmax(2)\n for ex_i in range(probs.shape[0]):\n idx_unq = batch_idx_unq[b_i + ex_i]\n entity_cands = batch_example_qid_cands[b_i + ex_i]\n # batch size is 1 so we can reshape\n probs_ex = probs[ex_i].reshape(\n self.config.data_config.max_aliases, probs.shape[2]\n )\n for alias_idx, true_entity_pos_idx in enumerate(\n batch_example_true_entities[b_i + ex_i]\n ):\n if true_entity_pos_idx != PAD_ID:\n pred_idx = max_probs_indices[ex_i][alias_idx]\n pred_prob = max_probs[ex_i][alias_idx].item()\n all_cands = entity_cands[alias_idx]\n pred_qid = all_cands[pred_idx]\n if pred_prob > self.threshold:\n final_all_cands[idx_unq].append(all_cands)\n final_cand_probs[idx_unq].append(probs_ex[alias_idx])\n final_pred_cands[idx_unq].append(pred_qid)\n final_pred_probs[idx_unq].append(pred_prob)\n if self.return_embs:\n final_entity_embs[idx_unq].append(\n output_embs[ex_i][alias_idx][pred_idx]\n )\n final_entity_cand_embs[idx_unq].append(\n output_embs[ex_i][alias_idx]\n )\n final_aliases[idx_unq].append(\n batch_example_aliases[b_i + ex_i][alias_idx]\n )\n final_spans[idx_unq].append(\n batch_spans_arr[b_i + ex_i][alias_idx]\n )\n final_titles[idx_unq].append(\n self.entity_db.get_title(pred_qid)\n if pred_qid != \"NC\"\n else \"NC\"\n )\n total_final_exs += 1\n else:\n dropped_by_thresh += 1\n assert total_final_exs + dropped_by_thresh == total_start_exs, (\n f\"Something went wrong and we have predicted fewer mentions than extracted. \"\n f\"Start {total_start_exs}, Out {total_final_exs}, No cand {dropped_by_thresh}\"\n )\n res_dict = {\n \"qids\": final_pred_cands,\n \"probs\": final_pred_probs,\n \"titles\": final_titles,\n \"cands\": final_all_cands,\n \"cand_probs\": final_cand_probs,\n \"spans\": final_spans,\n \"aliases\": final_aliases,\n }\n if self.return_embs:\n res_dict[\"embs\"] = final_entity_embs\n res_dict[\"cand_embs\"] = final_entity_cand_embs\n return res_dict\n\n def get_forward_batch(\n self, start_span_idx, end_span_idx, token_ids, entity_cand_eid\n ):\n \"\"\"Preps the forward batch for disambiguation.\n\n Args:\n start_span_idx: start span tensor\n end_span_idx: end span tensor\n token_ids: word token tensor\n eid_cands: candidate eids\n\n Returns: X_dict used in Emmental\n \"\"\"\n entity_cand_eid_mask = entity_cand_eid == -1\n entity_cand_eid_noneg = torch.where(\n entity_cand_eid >= 0,\n entity_cand_eid,\n (\n torch.ones_like(entity_cand_eid, dtype=torch.long)\n * (self.entity_db.num_entities_with_pad_and_nocand - 1)\n ),\n )\n\n kg_prepped_embs = {}\n for emb_key in self.batch_on_the_fly_embs:\n kg_adj = self.batch_on_the_fly_embs[emb_key][\"kg_adj\"]\n prep_func = self.batch_on_the_fly_embs[emb_key][\"kg_adj_process_func\"]\n batch_prep = []\n for j in range(entity_cand_eid_noneg.shape[0]):\n batch_prep.append(\n prep_func(entity_cand_eid_noneg[j].cpu(), kg_adj).reshape(1, -1)\n )\n kg_prepped_embs[emb_key] = torch.tensor(\n batch_prep, device=self.torch_device\n )\n\n X_dict = {\n \"guids\": [],\n \"start_span_idx\": start_span_idx.to(self.torch_device),\n \"end_span_idx\": end_span_idx.to(self.torch_device),\n \"token_ids\": token_ids.to(self.torch_device),\n \"entity_cand_eid\": entity_cand_eid_noneg.to(self.torch_device),\n \"entity_cand_eid_mask\": entity_cand_eid_mask.to(self.torch_device),\n \"batch_on_the_fly_kg_adj\": kg_prepped_embs,\n }\n return X_dict\n\n def get_char_spans(self, spans, text):\n \"\"\"Helper function to get character spans instead of default word\n spans.\n\n Args:\n spans: word spans\n text: text\n\n Returns: character spans\n \"\"\"\n query_toks = text.split()\n char_spans = []\n for span in spans:\n space_btwn_toks = (\n len(\" \".join(query_toks[0 : span[0] + 1]))\n - len(\" \".join(query_toks[0 : span[0]]))\n - len(query_toks[span[0]])\n )\n char_b = len(\" \".join(query_toks[0 : span[0]])) + space_btwn_toks\n char_e = char_b + len(\" \".join(query_toks[span[0] : span[1]]))\n char_spans.append([char_b, char_e])\n return char_spans\n", "import argparse\nimport csv\nimport json\nimport os\nimport pickle\nimport random\nimport sys\nfrom collections import OrderedDict, defaultdict\n\nimport jsonlines\nimport numpy as np\nimport pandas as pd\nimport ujson\nfrom tqdm import tqdm\n\n\ndef load_mentions(file):\n lines = []\n with jsonlines.open(file) as f:\n for line in f:\n new_line = {\n \"id\": line[\"id\"],\n \"sentence\": line[\"sentence\"],\n \"aliases\": line[\"aliases\"],\n \"spans\": line[\"spans\"],\n \"gold\": line[\"gold\"],\n \"cand_probs\": line[\"cand_probs\"],\n \"qids\": line[\"qids\"],\n \"sent_idx_unq\": line[\"sent_idx_unq\"],\n \"probs\": line[\"probs\"],\n \"ctx_emb_ids\": line[\"ctx_emb_ids\"],\n \"entity_ids\": line[\"entity_ids\"],\n }\n lines.append(new_line)\n return pd.DataFrame(lines)\n\n\ndef generate_features(bootleg_labels_df, threshold):\n ctx_emb_id_dict = {}\n ctx_emb_id_dict_first = {}\n qid_dict = {}\n qid_dict_first = {}\n\n num_removed = 0\n for ind, row in bootleg_labels_df.iterrows():\n ctx_emb_ids = row[\"ctx_emb_ids\"]\n qids = row[\"qids\"]\n spans = row[\"spans\"]\n\n # get sentence length\n example = row[\"sentence\"]\n tokens = example.split(\" \")\n length = len(tokens)\n\n # initialize result datastructures\n ctx_emb_id_result = [-1] * length\n qid_result = [\"UNK\"] * length\n\n ctx_emb_id_result_first = [-1] * length\n qid_result_first = [\"UNK\"] * length\n\n for i in range(len(spans)):\n span = spans[i]\n start, end = span[0], span[1]\n span_len = end - start\n\n prob = row[\"probs\"][i]\n if prob < threshold:\n num_removed += 1\n continue\n\n # contextual\n ctx_emb_id = ctx_emb_ids[i]\n ctx_emb_id_lst = [ctx_emb_id] * span_len\n ctx_emb_id_result[start:end] = ctx_emb_id_lst\n ctx_emb_id_result_first[start] = ctx_emb_id\n\n # qids\n qid = qids[i]\n qid_lst = [qid] * span_len\n qid_result[start:end] = qid_lst\n qid_result_first[start] = qid\n\n idx = row[\"id\"]\n if idx in ctx_emb_id_dict:\n raise ValueError(\"ERROR: duplicate indices!\")\n\n ctx_emb_id_dict[idx] = ctx_emb_id_result\n qid_dict[idx] = qid_result\n\n ctx_emb_id_dict_first[idx] = ctx_emb_id_result_first\n qid_dict_first[idx] = qid_result_first\n print(\n f\"Removed {num_removed} out of {bootleg_labels_df.shape[0]} with threshold {threshold}\"\n )\n return ctx_emb_id_dict, qid_dict, ctx_emb_id_dict_first, qid_dict_first\n\n\ndef main(bootleg_directory, base_data, threshold=0.0):\n # load the features to add\n boot_labels_file = os.path.join(bootleg_directory, \"bootleg_labels.jsonl\")\n bootleg_labels_df = load_mentions(boot_labels_file)\n print(bootleg_labels_df.columns)\n print(bootleg_labels_df.shape)\n\n # load the base tacred data\n train_file = \"{}/train.json\".format(base_data)\n with open(train_file) as train:\n df_train = json.load(train)\n df_train = pd.DataFrame.from_dict(df_train, orient=\"columns\")\n print(\"TRAIN SHAPE: \", df_train.shape)\n\n dev_file = \"{}/dev.json\".format(base_data)\n with open(dev_file) as dev:\n df_dev = json.load(dev)\n df_dev = pd.DataFrame.from_dict(df_dev, orient=\"columns\")\n print(\"DEV SHAPE: \", df_dev.shape)\n\n test_file = \"{}/test.json\".format(base_data)\n with open(test_file) as test:\n df_test = json.load(test)\n df_test = pd.DataFrame.from_dict(df_test, orient=\"columns\")\n print(\"TEST SHAPE\", df_test.shape)\n\n (\n ctx_emb_id_dict,\n qid_dict,\n ctx_emb_id_dict_first,\n qid_dict_first,\n ) = generate_features(bootleg_labels_df, threshold)\n\n # add features to the data\n dfs = [df_train, df_dev, df_test]\n for df in dfs:\n df[\"entity_emb_id\"] = np.nan\n df[\"entity_emb_id_first\"] = np.nan\n df[\"ent_id\"] = np.nan\n df[\"ent_id_first\"] = np.nan\n\n dict_ctx_emb_id = {}\n dict_ctx_emb_id_first = {}\n dict_qid = {}\n dict_qid_first = {}\n\n for ind, row in df.iterrows():\n idx = row[\"id\"]\n tokens = row[\"token\"]\n length = len(tokens)\n\n # initialize result datastructures\n ctx_emb_id_default = [-1] * length\n qid_default = [\"UNK\"] * length\n\n # contextual\n if idx in ctx_emb_id_dict:\n dict_ctx_emb_id[idx] = ctx_emb_id_dict[idx]\n else:\n dict_ctx_emb_id[idx] = ctx_emb_id_default\n\n if idx in ctx_emb_id_dict_first:\n dict_ctx_emb_id_first[idx] = ctx_emb_id_dict_first[idx]\n else:\n dict_ctx_emb_id_first[idx] = ctx_emb_id_default\n\n # qids\n if idx in qid_dict:\n dict_qid[idx] = qid_dict[idx]\n else:\n dict_qid[idx] = qid_default\n\n if idx in qid_dict_first:\n dict_qid_first[idx] = qid_dict_first[idx]\n else:\n dict_qid_first[idx] = qid_default\n\n assert len(dict_ctx_emb_id.keys()) == df.shape[0]\n assert len(dict_ctx_emb_id_first.keys()) == df.shape[0]\n assert len(dict_qid.keys()) == df.shape[0]\n assert len(dict_qid_first.keys()) == df.shape[0]\n df[\"entity_emb_id\"] = df[\"id\"].map(dict_ctx_emb_id)\n df[\"entity_emb_id_first\"] = df[\"id\"].map(dict_ctx_emb_id_first)\n df[\"ent_id\"] = df[\"id\"].map(dict_qid)\n df[\"ent_id_first\"] = df[\"id\"].map(dict_qid_first)\n\n # Save tacred data with Bootleg features\n train_out = df_train.to_json(\n r\"{}/train_ent.json\".format(base_data), orient=\"records\"\n )\n dev_out = df_dev.to_json(r\"{}/dev_ent.json\".format(base_data), orient=\"records\")\n test_out = df_test.to_json(r\"{}/test_ent.json\".format(base_data), orient=\"records\")\n print(\n \"Saved datasets with Bootleg features to train_ent.json, dev_ent.json, test_ent.json in\",\n base_data,\n \"directory\",\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--bootleg_directory\",\n type=str,\n help=\"location where ctx_embeddings.npy and bootleg_labels.jsonl saved\",\n )\n parser.add_argument(\n \"--tacred_directory\",\n type=str,\n default=\"/dfs/scratch0/lorr1/projects/bootleg-data/downstream/tacred\",\n help=\"location where base tacred data is located\",\n )\n args = parser.parse_args()\n\n main(args.bootleg_directory, args.tacred_directory)\n" ]
[ [ "torch.nn.Softmax", "torch.cat", "torch.zeros", "torch.nn.ModuleDict", "torch.sum", "torch.zeros_like", "torch.eye", "torch.tensor", "torch.bmm" ], [ "numpy.nonzero", "torch.hub._get_torch_home", "torch.tensor", "numpy.ones", "torch.no_grad", "torch.cuda.is_available", "torch.arange", "torch.device", "numpy.array", "torch.ones_like" ], [ "pandas.DataFrame", "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
lightbooster/models
[ "833900b085a353712010c26c66ae9111246b5ac7", "833900b085a353712010c26c66ae9111246b5ac7", "833900b085a353712010c26c66ae9111246b5ac7", "833900b085a353712010c26c66ae9111246b5ac7" ]
[ "official/nlp/modeling/models/bert_classifier_test.py", "official/nlp/modeling/layers/text_layers_test.py", "official/vision/detection/modeling/shapemask_model.py", "research/object_detection/core/target_assigner.py" ]
[ "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for BERT trainer network.\"\"\"\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import\nfrom official.nlp.modeling import networks\nfrom official.nlp.modeling.models import bert_classifier\n\n\n# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It\n# guarantees forward compatibility of this code for the V2 switchover.\n@keras_parameterized.run_all_keras_modes\nclass BertClassifierTest(keras_parameterized.TestCase):\n\n @parameterized.named_parameters(('single_cls', 1, False), ('3_cls', 3, False),\n ('3_cls_dictoutputs', 3, True))\n def test_bert_trainer(self, num_classes, dict_outputs):\n \"\"\"Validate that the Keras object can be created.\"\"\"\n # Build a transformer network to use within the BERT trainer.\n vocab_size = 100\n sequence_length = 512\n test_network = networks.BertEncoder(\n vocab_size=vocab_size, num_layers=2, dict_outputs=dict_outputs)\n\n # Create a BERT trainer with the created network.\n bert_trainer_model = bert_classifier.BertClassifier(\n test_network, num_classes=num_classes)\n\n # Create a set of 2-dimensional inputs (the first dimension is implicit).\n word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)\n\n # Invoke the trainer model on the inputs. This causes the layer to be built.\n cls_outs = bert_trainer_model([word_ids, mask, type_ids])\n\n # Validate that the outputs are of the expected shape.\n expected_classification_shape = [None, num_classes]\n self.assertAllEqual(expected_classification_shape, cls_outs.shape.as_list())\n\n @parameterized.parameters(1, 2)\n def test_bert_trainer_tensor_call(self, num_classes):\n \"\"\"Validate that the Keras object can be invoked.\"\"\"\n # Build a transformer network to use within the BERT trainer. (Here, we use\n # a short sequence_length for convenience.)\n test_network = networks.BertEncoder(vocab_size=100, num_layers=2)\n\n # Create a BERT trainer with the created network.\n bert_trainer_model = bert_classifier.BertClassifier(\n test_network, num_classes=num_classes)\n\n # Create a set of 2-dimensional data tensors to feed into the model.\n word_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)\n mask = tf.constant([[1, 1], [1, 0]], dtype=tf.int32)\n type_ids = tf.constant([[1, 1], [2, 2]], dtype=tf.int32)\n\n # Invoke the trainer model on the tensors. In Eager mode, this does the\n # actual calculation. (We can't validate the outputs, since the network is\n # too complex: this simply ensures we're not hitting runtime errors.)\n _ = bert_trainer_model([word_ids, mask, type_ids])\n\n def test_serialize_deserialize(self):\n \"\"\"Validate that the BERT trainer can be serialized and deserialized.\"\"\"\n # Build a transformer network to use within the BERT trainer. (Here, we use\n # a short sequence_length for convenience.)\n test_network = networks.BertEncoder(\n vocab_size=100, num_layers=2, sequence_length=5)\n\n # Create a BERT trainer with the created network. (Note that all the args\n # are different, so we can catch any serialization mismatches.)\n bert_trainer_model = bert_classifier.BertClassifier(\n test_network, num_classes=4, initializer='zeros')\n\n # Create another BERT trainer via serialization and deserialization.\n config = bert_trainer_model.get_config()\n new_bert_trainer_model = bert_classifier.BertClassifier.from_config(config)\n\n # Validate that the config can be forced to JSON.\n _ = new_bert_trainer_model.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(bert_trainer_model.get_config(),\n new_bert_trainer_model.get_config())\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests bert.text_layers.\"\"\"\n\nimport os\nimport tempfile\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom sentencepiece import SentencePieceTrainer\nfrom official.nlp.modeling.layers import text_layers\n\n\nclass RoundRobinTruncatorTest(tf.test.TestCase):\n\n def test_correct_outputs(self):\n\n def test_input(start, lengths):\n return tf.ragged.constant([[start + 10*j + i for i in range(length)]\n for j, length in enumerate(lengths)],\n dtype=tf.int32)\n\n # Single segment.\n single_input = test_input(11, [4, 5, 6])\n expected_single_output = tf.ragged.constant(\n [[11, 12, 13, 14],\n [21, 22, 23, 24, 25],\n [31, 32, 33, 34, 35], # Truncated.\n ])\n\n self.assertAllEqual(\n expected_single_output,\n text_layers.round_robin_truncate_inputs(single_input, limit=5))\n # Test wrapping in a singleton list.\n actual_single_list_output = text_layers.round_robin_truncate_inputs(\n [single_input], limit=5)\n self.assertIsInstance(actual_single_list_output, list)\n self.assertAllEqual(expected_single_output, actual_single_list_output[0])\n\n # Two segments.\n input_a = test_input(111, [1, 2, 2, 3, 4, 5])\n input_b = test_input(211, [1, 3, 4, 2, 2, 5])\n expected_a = tf.ragged.constant(\n [[111],\n [121, 122],\n [131, 132],\n [141, 142, 143],\n [151, 152, 153], # Truncated.\n [161, 162, 163], # Truncated.\n ])\n expected_b = tf.ragged.constant(\n [[211],\n [221, 222, 223],\n [231, 232, 233], # Truncated.\n [241, 242],\n [251, 252],\n [261, 262], # Truncated.\n ])\n actual_a, actual_b = text_layers.round_robin_truncate_inputs(\n [input_a, input_b], limit=5)\n self.assertAllEqual(expected_a, actual_a)\n self.assertAllEqual(expected_b, actual_b)\n\n\n# This test covers the in-process behavior of a BertTokenizer layer.\n# For saving, restoring, and the restored behavior (incl. shape inference),\n# see nlp/tools/export_tfhub_lib_test.py.\nclass BertTokenizerTest(tf.test.TestCase):\n\n def _make_vocab_file(self, vocab, filename=\"vocab.txt\"):\n path = os.path.join(\n tempfile.mkdtemp(dir=self.get_temp_dir()), # New subdir each time.\n filename)\n with tf.io.gfile.GFile(path, \"w\") as f:\n f.write(\"\\n\".join(vocab + [\"\"]))\n return path\n\n def test_uncased(self):\n vocab_file = self._make_vocab_file(\n [\"[PAD]\", \"[UNK]\", \"[CLS]\", \"[SEP]\", \"d\", \"##ef\", \"abc\", \"xy\"])\n bert_tokenize = text_layers.BertTokenizer(\n vocab_file=vocab_file, lower_case=True)\n inputs = tf.constant([\"abc def\", \"ABC DEF d\"])\n token_ids = bert_tokenize(inputs)\n self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]],\n [[6], [4, 5], [4]]]))\n bert_tokenize.tokenize_with_offsets = True\n token_ids_2, start_offsets, limit_offsets = bert_tokenize(inputs)\n self.assertAllEqual(token_ids, token_ids_2)\n self.assertAllEqual(start_offsets, tf.ragged.constant([[[0], [4, 5]],\n [[0], [4, 5], [8]]]))\n self.assertAllEqual(limit_offsets, tf.ragged.constant([[[3], [5, 7]],\n [[3], [5, 7], [9]]]))\n self.assertEqual(bert_tokenize.vocab_size.numpy(), 8)\n\n # Repeat the above and test that case matters with lower_case=False.\n def test_cased(self):\n vocab_file = self._make_vocab_file(\n [\"[PAD]\", \"[UNK]\", \"[CLS]\", \"[SEP]\", \"d\", \"##ef\", \"abc\", \"ABC\"])\n bert_tokenize = text_layers.BertTokenizer(\n vocab_file=vocab_file, lower_case=False, tokenize_with_offsets=True)\n inputs = tf.constant([\"abc def\", \"ABC DEF\"])\n token_ids, start_offsets, limit_offsets = bert_tokenize(inputs)\n self.assertAllEqual(token_ids, tf.ragged.constant([[[6], [4, 5]],\n [[7], [1]]]))\n self.assertAllEqual(start_offsets, tf.ragged.constant([[[0], [4, 5]],\n [[0], [4]]]))\n self.assertAllEqual(limit_offsets, tf.ragged.constant([[[3], [5, 7]],\n [[3], [7]]]))\n\n def test_special_tokens_complete(self):\n vocab_file = self._make_vocab_file(\n [\"foo\", \"[PAD]\", \"[UNK]\", \"[CLS]\", \"[SEP]\", \"[MASK]\", \"xy\"])\n bert_tokenize = text_layers.BertTokenizer(\n vocab_file=vocab_file, lower_case=True)\n self.assertDictEqual(bert_tokenize.get_special_tokens_dict(),\n dict(padding_id=1,\n start_of_sequence_id=3,\n end_of_segment_id=4,\n mask_id=5,\n vocab_size=7))\n\n def test_special_tokens_partial(self):\n vocab_file = self._make_vocab_file(\n [\"[PAD]\", \"[CLS]\", \"[SEP]\"])\n bert_tokenize = text_layers.BertTokenizer(\n vocab_file=vocab_file, lower_case=True)\n self.assertDictEqual(bert_tokenize.get_special_tokens_dict(),\n dict(padding_id=0,\n start_of_sequence_id=1,\n end_of_segment_id=2,\n vocab_size=3)) # No mask_id,\n\n def test_special_tokens_in_estimator(self):\n \"\"\"Tests getting special tokens without an Eager init context.\"\"\"\n vocab_file = self._make_vocab_file(\n [\"[PAD]\", \"[UNK]\", \"[CLS]\", \"[SEP]\", \"d\", \"##ef\", \"abc\", \"xy\"])\n\n def input_fn():\n with tf.init_scope():\n self.assertFalse(tf.executing_eagerly())\n # Build a preprocessing Model.\n sentences = tf.keras.layers.Input(shape=[], dtype=tf.string)\n bert_tokenizer = text_layers.BertTokenizer(\n vocab_file=vocab_file, lower_case=True)\n special_tokens_dict = bert_tokenizer.get_special_tokens_dict()\n for k, v in special_tokens_dict.items():\n self.assertIsInstance(v, int, \"Unexpected type for {}\".format(k))\n tokens = bert_tokenizer(sentences)\n packed_inputs = text_layers.BertPackInputs(\n 4, special_tokens_dict=special_tokens_dict)(tokens)\n preprocessing = tf.keras.Model(sentences, packed_inputs)\n # Map the dataset.\n ds = tf.data.Dataset.from_tensors(\n (tf.constant([\"abc\", \"DEF\"]), tf.constant([0, 1])))\n ds = ds.map(lambda features, labels: (preprocessing(features), labels))\n return ds\n\n def model_fn(features, labels, mode):\n del labels # Unused.\n return tf.estimator.EstimatorSpec(mode=mode,\n predictions=features[\"input_word_ids\"])\n\n estimator = tf.estimator.Estimator(model_fn=model_fn)\n outputs = list(estimator.predict(input_fn))\n self.assertAllEqual(outputs, np.array([[2, 6, 3, 0],\n [2, 4, 5, 3]]))\n\n\n# This test covers the in-process behavior of a SentencepieceTokenizer layer.\nclass SentencepieceTokenizerTest(tf.test.TestCase):\n\n def setUp(self):\n super().setUp()\n # Make a sentencepiece model.\n tmp_dir = self.get_temp_dir()\n tempfile.mkdtemp(dir=tmp_dir)\n vocab = [\"a\", \"b\", \"c\", \"d\", \"e\", \"abc\", \"def\", \"ABC\", \"DEF\"]\n model_prefix = os.path.join(tmp_dir, \"spm_model\")\n input_text_file_path = os.path.join(tmp_dir, \"train_input.txt\")\n with tf.io.gfile.GFile(input_text_file_path, \"w\") as f:\n f.write(\" \".join(vocab + [\"\\n\"]))\n # Add 7 more tokens: <pad>, <unk>, [CLS], [SEP], [MASK], <s>, </s>.\n full_vocab_size = len(vocab) + 7\n flags = dict(\n model_prefix=model_prefix,\n model_type=\"word\",\n input=input_text_file_path,\n pad_id=0, unk_id=1, control_symbols=\"[CLS],[SEP],[MASK]\",\n vocab_size=full_vocab_size,\n bos_id=full_vocab_size-2, eos_id=full_vocab_size-1)\n SentencePieceTrainer.Train(\n \" \".join([\"--{}={}\".format(k, v) for k, v in flags.items()]))\n self._spm_path = model_prefix + \".model\"\n\n def test_uncased(self):\n sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(\n model_file_path=self._spm_path, lower_case=True, nbest_size=0)\n\n inputs = tf.constant([\"abc def\", \"ABC DEF d\"])\n token_ids = sentencepiece_tokenizer(inputs)\n self.assertAllEqual(\n token_ids,\n tf.ragged.constant([[8, 12], [8, 12, 11]]))\n sentencepiece_tokenizer.tokenize_with_offsets = True\n token_ids_2, start_offsets, limit_offsets = sentencepiece_tokenizer(inputs)\n self.assertAllEqual(token_ids, token_ids_2)\n self.assertAllEqual(\n start_offsets, tf.ragged.constant([[0, 3], [0, 3, 7]]))\n self.assertAllEqual(\n limit_offsets, tf.ragged.constant([[3, 7], [3, 7, 9]]))\n self.assertEqual(sentencepiece_tokenizer.vocab_size.numpy(), 16)\n\n # Repeat the above and test that case matters with lower_case=False.\n def test_cased(self):\n sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(\n model_file_path=self._spm_path,\n lower_case=False,\n nbest_size=0,\n tokenize_with_offsets=False)\n\n inputs = tf.constant([\"abc def\", \"ABC DEF d\"])\n token_ids = sentencepiece_tokenizer(inputs)\n self.assertAllEqual(\n token_ids,\n tf.ragged.constant([[8, 12], [5, 6, 11]]))\n sentencepiece_tokenizer.tokenize_with_offsets = True\n token_ids_2, start_offsets, limit_offsets = sentencepiece_tokenizer(inputs)\n self.assertAllEqual(token_ids, token_ids_2)\n self.assertAllEqual(\n start_offsets,\n tf.ragged.constant([[0, 3], [0, 3, 7]]))\n self.assertAllEqual(\n limit_offsets,\n tf.ragged.constant([[3, 7], [3, 7, 9]]))\n\n def test_special_tokens(self):\n sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(\n model_file_path=self._spm_path, lower_case=True, nbest_size=0)\n self.assertDictEqual(sentencepiece_tokenizer.get_special_tokens_dict(),\n dict(padding_id=0,\n start_of_sequence_id=2,\n end_of_segment_id=3,\n mask_id=4,\n vocab_size=16))\n\n def test_special_tokens_in_estimator(self):\n \"\"\"Tests getting special tokens without an Eager init context.\"\"\"\n\n def input_fn():\n with tf.init_scope():\n self.assertFalse(tf.executing_eagerly())\n # Build a preprocessing Model.\n sentences = tf.keras.layers.Input(shape=[], dtype=tf.string)\n sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(\n model_file_path=self._spm_path, lower_case=True, nbest_size=0)\n special_tokens_dict = sentencepiece_tokenizer.get_special_tokens_dict()\n for k, v in special_tokens_dict.items():\n self.assertIsInstance(v, int, \"Unexpected type for {}\".format(k))\n tokens = sentencepiece_tokenizer(sentences)\n packed_inputs = text_layers.BertPackInputs(\n 4, special_tokens_dict=special_tokens_dict)(tokens)\n preprocessing = tf.keras.Model(sentences, packed_inputs)\n # Map the dataset.\n ds = tf.data.Dataset.from_tensors(\n (tf.constant([\"abc\", \"DEF\"]), tf.constant([0, 1])))\n ds = ds.map(lambda features, labels: (preprocessing(features), labels))\n return ds\n\n def model_fn(features, labels, mode):\n del labels # Unused.\n return tf.estimator.EstimatorSpec(mode=mode,\n predictions=features[\"input_word_ids\"])\n\n estimator = tf.estimator.Estimator(model_fn=model_fn)\n outputs = list(estimator.predict(input_fn))\n self.assertAllEqual(outputs, np.array([[2, 8, 3, 0],\n [2, 12, 3, 0]]))\n\n def test_strip_diacritics(self):\n sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(\n model_file_path=self._spm_path,\n lower_case=True,\n nbest_size=0,\n strip_diacritics=True)\n inputs = tf.constant([\"a b c d e\", \"ă ḅ č ḓ é\"])\n token_ids = sentencepiece_tokenizer(inputs)\n self.assertAllEqual(\n token_ids,\n tf.ragged.constant([[7, 9, 10, 11, 13], [7, 9, 10, 11, 13]]))\n\n def test_fail_on_tokenize_with_offsets_and_strip_diacritics(self):\n # Raise an error in init().\n with self.assertRaises(ValueError):\n text_layers.SentencepieceTokenizer(\n model_file_path=self._spm_path,\n tokenize_with_offsets=True,\n lower_case=True,\n nbest_size=0,\n strip_diacritics=True)\n\n sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(\n model_file_path=self._spm_path,\n lower_case=True,\n nbest_size=0,\n strip_diacritics=True)\n sentencepiece_tokenizer.tokenize_with_offsets = True\n\n # Raise an error in call():\n inputs = tf.constant([\"abc def\", \"ABC DEF d\", \"Äffin\"])\n with self.assertRaises(ValueError):\n sentencepiece_tokenizer(inputs)\n\n def test_serialize_deserialize(self):\n self.skipTest(\"b/170480226\")\n sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(\n model_file_path=self._spm_path,\n lower_case=False,\n nbest_size=0,\n tokenize_with_offsets=False,\n name=\"sentencepiece_tokenizer_layer\")\n config = sentencepiece_tokenizer.get_config()\n new_tokenizer = text_layers.SentencepieceTokenizer.from_config(config)\n self.assertEqual(config, new_tokenizer.get_config())\n inputs = tf.constant([\"abc def\", \"ABC DEF d\"])\n token_ids = sentencepiece_tokenizer(inputs)\n token_ids_2 = new_tokenizer(inputs)\n self.assertAllEqual(token_ids, token_ids_2)\n\n # TODO(b/170480226): Remove once tf_hub_export_lib_test.py covers saving.\n def test_saving(self):\n sentencepiece_tokenizer = text_layers.SentencepieceTokenizer(\n model_file_path=self._spm_path, lower_case=True, nbest_size=0)\n inputs = tf.keras.layers.Input([], dtype=tf.string)\n outputs = sentencepiece_tokenizer(inputs)\n model = tf.keras.Model(inputs, outputs)\n export_path = tempfile.mkdtemp(dir=self.get_temp_dir())\n model.save(export_path, signatures={})\n\n\nclass BertPackInputsTest(tf.test.TestCase):\n\n def test_round_robin_correct_outputs(self):\n bpi = text_layers.BertPackInputs(\n 10,\n start_of_sequence_id=1001,\n end_of_segment_id=1002,\n padding_id=999,\n truncator=\"round_robin\")\n # Single input, rank 2.\n bert_inputs = bpi(\n tf.ragged.constant([[11, 12, 13],\n [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]]))\n self.assertAllEqual(\n bert_inputs[\"input_word_ids\"],\n tf.constant([[1001, 11, 12, 13, 1002, 999, 999, 999, 999, 999],\n [1001, 21, 22, 23, 24, 25, 26, 27, 28, 1002]]))\n self.assertAllEqual(\n bert_inputs[\"input_mask\"],\n tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))\n self.assertAllEqual(\n bert_inputs[\"input_type_ids\"],\n tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))\n\n # Two inputs, rank 3. Truncation does not respect word boundaries.\n bert_inputs = bpi([\n tf.ragged.constant([[[111], [112, 113]],\n [[121, 122, 123], [124, 125, 126], [127, 128]]]),\n tf.ragged.constant([[[211, 212], [213]],\n [[221, 222], [223, 224, 225], [226, 227, 228]]])\n ])\n self.assertAllEqual(\n bert_inputs[\"input_word_ids\"],\n tf.constant([[1001, 111, 112, 113, 1002, 211, 212, 213, 1002, 999],\n [1001, 121, 122, 123, 124, 1002, 221, 222, 223, 1002]]))\n self.assertAllEqual(\n bert_inputs[\"input_mask\"],\n tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))\n self.assertAllEqual(\n bert_inputs[\"input_type_ids\"],\n tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]]))\n\n # Three inputs has not been supported for round_robin so far.\n with self.assertRaisesRegex(ValueError, \"Must pass 1 or 2 inputs\"):\n bert_inputs = bpi([\n tf.ragged.constant([[[111], [112, 113]],\n [[121, 122, 123], [124, 125, 126], [127, 128]]]),\n tf.ragged.constant([[[211, 212], [213]],\n [[221, 222], [223, 224, 225], [226, 227, 228]]]),\n tf.ragged.constant([[[311, 312], [313]],\n [[321, 322], [323, 324, 325], [326, 327, 328]]])\n ])\n\n def test_waterfall_correct_outputs(self):\n bpi = text_layers.BertPackInputs(\n 10,\n start_of_sequence_id=1001,\n end_of_segment_id=1002,\n padding_id=999,\n truncator=\"waterfall\")\n # Single input, rank 2.\n bert_inputs = bpi(\n tf.ragged.constant([[11, 12, 13],\n [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]]))\n self.assertAllEqual(\n bert_inputs[\"input_word_ids\"],\n tf.constant([[1001, 11, 12, 13, 1002, 999, 999, 999, 999, 999],\n [1001, 21, 22, 23, 24, 25, 26, 27, 28, 1002]]))\n self.assertAllEqual(\n bert_inputs[\"input_mask\"],\n tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))\n self.assertAllEqual(\n bert_inputs[\"input_type_ids\"],\n tf.constant([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))\n\n # Two inputs, rank 3. Truncation does not respect word boundaries.\n bert_inputs = bpi([\n tf.ragged.constant([[[111], [112, 113]],\n [[121, 122, 123], [124, 125, 126], [127, 128]]]),\n tf.ragged.constant([[[211, 212], [213]],\n [[221, 222], [223, 224, 225], [226, 227, 228]]])\n ])\n self.assertAllEqual(\n bert_inputs[\"input_word_ids\"],\n tf.constant([[1001, 111, 112, 113, 1002, 211, 212, 213, 1002, 999],\n [1001, 121, 122, 123, 124, 125, 126, 127, 1002, 1002]]))\n self.assertAllEqual(\n bert_inputs[\"input_mask\"],\n tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))\n self.assertAllEqual(\n bert_inputs[\"input_type_ids\"],\n tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]))\n\n # Three inputs, rank 3. Truncation does not respect word boundaries.\n bert_inputs = bpi([\n tf.ragged.constant([[[111], [112, 113]],\n [[121, 122, 123], [124, 125, 126], [127, 128]]]),\n tf.ragged.constant([[[211], [212]],\n [[221, 222], [223, 224, 225], [226, 227, 228]]]),\n tf.ragged.constant([[[311, 312], [313]],\n [[321, 322], [323, 324, 325], [326, 327]]])\n ])\n self.assertAllEqual(\n bert_inputs[\"input_word_ids\"],\n tf.constant([[1001, 111, 112, 113, 1002, 211, 212, 1002, 311, 1002],\n [1001, 121, 122, 123, 124, 125, 126, 1002, 1002, 1002]]))\n self.assertAllEqual(\n bert_inputs[\"input_mask\"],\n tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]))\n self.assertAllEqual(\n bert_inputs[\"input_type_ids\"],\n tf.constant([[0, 0, 0, 0, 0, 1, 1, 1, 2, 2],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 2]]))\n\n def test_special_tokens_dict(self):\n special_tokens_dict = dict(start_of_sequence_id=1001,\n end_of_segment_id=1002,\n padding_id=999,\n extraneous_key=666)\n bpi = text_layers.BertPackInputs(10,\n special_tokens_dict=special_tokens_dict)\n bert_inputs = bpi(\n tf.ragged.constant([[11, 12, 13],\n [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]]))\n self.assertAllEqual(\n bert_inputs[\"input_word_ids\"],\n tf.constant([[1001, 11, 12, 13, 1002, 999, 999, 999, 999, 999],\n [1001, 21, 22, 23, 24, 25, 26, 27, 28, 1002]]))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Model definition for the ShapeMask Model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom official.vision.detection.dataloader import anchor\nfrom official.vision.detection.dataloader import mode_keys\nfrom official.vision.detection.evaluation import factory as eval_factory\nfrom official.vision.detection.modeling import base_model\nfrom official.vision.detection.modeling import losses\nfrom official.vision.detection.modeling.architecture import factory\nfrom official.vision.detection.ops import postprocess_ops\nfrom official.vision.detection.utils import box_utils\n\n\nclass ShapeMaskModel(base_model.Model):\n \"\"\"ShapeMask model function.\"\"\"\n\n def __init__(self, params):\n super(ShapeMaskModel, self).__init__(params)\n\n self._params = params\n self._keras_model = None\n\n # Architecture generators.\n self._backbone_fn = factory.backbone_generator(params)\n self._fpn_fn = factory.multilevel_features_generator(params)\n self._retinanet_head_fn = factory.retinanet_head_generator(params)\n self._shape_prior_head_fn = factory.shapeprior_head_generator(params)\n self._coarse_mask_fn = factory.coarsemask_head_generator(params)\n self._fine_mask_fn = factory.finemask_head_generator(params)\n\n # Loss functions.\n self._cls_loss_fn = losses.RetinanetClassLoss(\n params.retinanet_loss, params.architecture.num_classes)\n self._box_loss_fn = losses.RetinanetBoxLoss(params.retinanet_loss)\n self._box_loss_weight = params.retinanet_loss.box_loss_weight\n\n # Mask loss function.\n self._shapemask_prior_loss_fn = losses.ShapemaskMseLoss()\n self._shapemask_loss_fn = losses.ShapemaskLoss()\n self._shape_prior_loss_weight = (\n params.shapemask_loss.shape_prior_loss_weight)\n self._coarse_mask_loss_weight = (\n params.shapemask_loss.coarse_mask_loss_weight)\n self._fine_mask_loss_weight = (params.shapemask_loss.fine_mask_loss_weight)\n\n # Predict function.\n self._generate_detections_fn = postprocess_ops.MultilevelDetectionGenerator(\n params.architecture.min_level, params.architecture.max_level,\n params.postprocess)\n\n def build_outputs(self, inputs, mode):\n is_training = mode == mode_keys.TRAIN\n images = inputs['image']\n\n if 'anchor_boxes' in inputs:\n anchor_boxes = inputs['anchor_boxes']\n else:\n anchor_boxes = anchor.Anchor(\n self._params.architecture.min_level,\n self._params.architecture.max_level, self._params.anchor.num_scales,\n self._params.anchor.aspect_ratios, self._params.anchor.anchor_size,\n images.get_shape().as_list()[1:3]).multilevel_boxes\n\n batch_size = tf.shape(images)[0]\n for level in anchor_boxes:\n anchor_boxes[level] = tf.tile(\n tf.expand_dims(anchor_boxes[level], 0), [batch_size, 1, 1, 1])\n\n backbone_features = self._backbone_fn(images, is_training=is_training)\n fpn_features = self._fpn_fn(backbone_features, is_training=is_training)\n cls_outputs, box_outputs = self._retinanet_head_fn(\n fpn_features, is_training=is_training)\n\n valid_boxes, valid_scores, valid_classes, valid_detections = (\n self._generate_detections_fn(box_outputs, cls_outputs, anchor_boxes,\n inputs['image_info'][:, 1:2, :]))\n\n image_size = images.get_shape().as_list()[1:3]\n valid_outer_boxes = box_utils.compute_outer_boxes(\n tf.reshape(valid_boxes, [-1, 4]),\n image_size,\n scale=self._params.shapemask_parser.outer_box_scale)\n valid_outer_boxes = tf.reshape(valid_outer_boxes, tf.shape(valid_boxes))\n\n # Wrapping if else code paths into a layer to make the checkpoint loadable\n # in prediction mode.\n class SampledBoxesLayer(tf.keras.layers.Layer):\n \"\"\"ShapeMask model function.\"\"\"\n\n def call(self, inputs, val_boxes, val_classes, val_outer_boxes, training):\n if training:\n boxes = inputs['mask_boxes']\n outer_boxes = inputs['mask_outer_boxes']\n classes = inputs['mask_classes']\n else:\n boxes = val_boxes\n classes = val_classes\n outer_boxes = val_outer_boxes\n return boxes, classes, outer_boxes\n\n boxes, classes, outer_boxes = SampledBoxesLayer()(\n inputs,\n valid_boxes,\n valid_classes,\n valid_outer_boxes,\n training=is_training)\n\n instance_features, prior_masks = self._shape_prior_head_fn(\n fpn_features, boxes, outer_boxes, classes, is_training)\n coarse_mask_logits = self._coarse_mask_fn(instance_features, prior_masks,\n classes, is_training)\n fine_mask_logits = self._fine_mask_fn(instance_features, coarse_mask_logits,\n classes, is_training)\n\n model_outputs = {\n 'cls_outputs': cls_outputs,\n 'box_outputs': box_outputs,\n 'fine_mask_logits': fine_mask_logits,\n 'coarse_mask_logits': coarse_mask_logits,\n 'prior_masks': prior_masks,\n }\n\n if not is_training:\n model_outputs.update({\n 'num_detections': valid_detections,\n 'detection_boxes': valid_boxes,\n 'detection_outer_boxes': valid_outer_boxes,\n 'detection_masks': fine_mask_logits,\n 'detection_classes': valid_classes,\n 'detection_scores': valid_scores,\n })\n\n return model_outputs\n\n def build_loss_fn(self):\n if self._keras_model is None:\n raise ValueError('build_loss_fn() must be called after build_model().')\n\n filter_fn = self.make_filter_trainable_variables_fn()\n trainable_variables = filter_fn(self._keras_model.trainable_variables)\n\n def _total_loss_fn(labels, outputs):\n cls_loss = self._cls_loss_fn(outputs['cls_outputs'],\n labels['cls_targets'],\n labels['num_positives'])\n box_loss = self._box_loss_fn(outputs['box_outputs'],\n labels['box_targets'],\n labels['num_positives'])\n\n # Adds Shapemask model losses.\n shape_prior_loss = self._shapemask_prior_loss_fn(outputs['prior_masks'],\n labels['mask_targets'],\n labels['mask_is_valid'])\n coarse_mask_loss = self._shapemask_loss_fn(outputs['coarse_mask_logits'],\n labels['mask_targets'],\n labels['mask_is_valid'])\n fine_mask_loss = self._shapemask_loss_fn(outputs['fine_mask_logits'],\n labels['fine_mask_targets'],\n labels['mask_is_valid'])\n\n model_loss = (\n cls_loss + self._box_loss_weight * box_loss +\n shape_prior_loss * self._shape_prior_loss_weight +\n coarse_mask_loss * self._coarse_mask_loss_weight +\n fine_mask_loss * self._fine_mask_loss_weight)\n\n l2_regularization_loss = self.weight_decay_loss(trainable_variables)\n total_loss = model_loss + l2_regularization_loss\n\n shapemask_losses = {\n 'total_loss': total_loss,\n 'loss': total_loss,\n 'retinanet_cls_loss': cls_loss,\n 'l2_regularization_loss': l2_regularization_loss,\n 'retinanet_box_loss': box_loss,\n 'shapemask_prior_loss': shape_prior_loss,\n 'shapemask_coarse_mask_loss': coarse_mask_loss,\n 'shapemask_fine_mask_loss': fine_mask_loss,\n 'model_loss': model_loss,\n }\n return shapemask_losses\n\n return _total_loss_fn\n\n def build_input_layers(self, params, mode):\n is_training = mode == mode_keys.TRAIN\n input_shape = (\n params.shapemask_parser.output_size +\n [params.shapemask_parser.num_channels])\n if is_training:\n batch_size = params.train.batch_size\n input_layer = {\n 'image':\n tf.keras.layers.Input(\n shape=input_shape,\n batch_size=batch_size,\n name='image',\n dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32),\n 'image_info':\n tf.keras.layers.Input(\n shape=[4, 2], batch_size=batch_size, name='image_info'),\n 'mask_classes':\n tf.keras.layers.Input(\n shape=[params.shapemask_parser.num_sampled_masks],\n batch_size=batch_size,\n name='mask_classes',\n dtype=tf.int64),\n 'mask_outer_boxes':\n tf.keras.layers.Input(\n shape=[params.shapemask_parser.num_sampled_masks, 4],\n batch_size=batch_size,\n name='mask_outer_boxes',\n dtype=tf.float32),\n 'mask_boxes':\n tf.keras.layers.Input(\n shape=[params.shapemask_parser.num_sampled_masks, 4],\n batch_size=batch_size,\n name='mask_boxes',\n dtype=tf.float32),\n }\n else:\n batch_size = params.eval.batch_size\n input_layer = {\n 'image':\n tf.keras.layers.Input(\n shape=input_shape,\n batch_size=batch_size,\n name='image',\n dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32),\n 'image_info':\n tf.keras.layers.Input(\n shape=[4, 2], batch_size=batch_size, name='image_info'),\n }\n return input_layer\n\n def build_model(self, params, mode):\n if self._keras_model is None:\n input_layers = self.build_input_layers(self._params, mode)\n outputs = self.model_outputs(input_layers, mode)\n\n model = tf.keras.models.Model(\n inputs=input_layers, outputs=outputs, name='shapemask')\n assert model is not None, 'Fail to build tf.keras.Model.'\n model.optimizer = self.build_optimizer()\n self._keras_model = model\n\n return self._keras_model\n\n def post_processing(self, labels, outputs):\n required_output_fields = [\n 'num_detections', 'detection_boxes', 'detection_classes',\n 'detection_masks', 'detection_scores'\n ]\n\n for field in required_output_fields:\n if field not in outputs:\n raise ValueError(\n '\"{}\" is missing in outputs, requried {} found {}'.format(\n field, required_output_fields, outputs.keys()))\n\n required_label_fields = ['image_info']\n for field in required_label_fields:\n if field not in labels:\n raise ValueError(\n '\"{}\" is missing in labels, requried {} found {}'.format(\n field, required_label_fields, labels.keys()))\n\n predictions = {\n 'image_info': labels['image_info'],\n 'num_detections': outputs['num_detections'],\n 'detection_boxes': outputs['detection_boxes'],\n 'detection_outer_boxes': outputs['detection_outer_boxes'],\n 'detection_classes': outputs['detection_classes'],\n 'detection_scores': outputs['detection_scores'],\n 'detection_masks': outputs['detection_masks'],\n }\n\n if 'groundtruths' in labels:\n predictions['source_id'] = labels['groundtruths']['source_id']\n labels = labels['groundtruths']\n\n return labels, predictions\n\n def eval_metrics(self):\n return eval_factory.evaluator_generator(self._params.eval)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Base target assigner module.\n\nThe job of a TargetAssigner is, for a given set of anchors (bounding boxes) and\ngroundtruth detections (bounding boxes), to assign classification and regression\ntargets to each anchor as well as weights to each anchor (specifying, e.g.,\nwhich anchors should not contribute to training loss).\n\nIt assigns classification/regression targets by performing the following steps:\n1) Computing pairwise similarity between anchors and groundtruth boxes using a\n provided RegionSimilarity Calculator\n2) Computing a matching based on the similarity matrix using a provided Matcher\n3) Assigning regression targets based on the matching and a provided BoxCoder\n4) Assigning classification targets based on the matching and groundtruth labels\n\nNote that TargetAssigners only operate on detections from a single\nimage at a time, so any logic for applying a TargetAssigner to multiple\nimages must be handled externally.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow.compat.v1 as tf\nimport tensorflow.compat.v2 as tf2\n\nfrom object_detection.box_coders import faster_rcnn_box_coder\nfrom object_detection.box_coders import mean_stddev_box_coder\nfrom object_detection.core import box_coder\nfrom object_detection.core import box_list\nfrom object_detection.core import box_list_ops\nfrom object_detection.core import densepose_ops\nfrom object_detection.core import keypoint_ops\nfrom object_detection.core import matcher as mat\nfrom object_detection.core import region_similarity_calculator as sim_calc\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.matchers import argmax_matcher\nfrom object_detection.matchers import hungarian_matcher\nfrom object_detection.utils import shape_utils\nfrom object_detection.utils import target_assigner_utils as ta_utils\nfrom object_detection.utils import tf_version\n\nif tf_version.is_tf1():\n from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top\n\nResizeMethod = tf2.image.ResizeMethod\n\n_DEFAULT_KEYPOINT_OFFSET_STD_DEV = 1.0\n\n\nclass TargetAssigner(object):\n \"\"\"Target assigner to compute classification and regression targets.\"\"\"\n\n def __init__(self,\n similarity_calc,\n matcher,\n box_coder_instance,\n negative_class_weight=1.0):\n \"\"\"Construct Object Detection Target Assigner.\n\n Args:\n similarity_calc: a RegionSimilarityCalculator\n matcher: an object_detection.core.Matcher used to match groundtruth to\n anchors.\n box_coder_instance: an object_detection.core.BoxCoder used to encode\n matching groundtruth boxes with respect to anchors.\n negative_class_weight: classification weight to be associated to negative\n anchors (default: 1.0). The weight must be in [0., 1.].\n\n Raises:\n ValueError: if similarity_calc is not a RegionSimilarityCalculator or\n if matcher is not a Matcher or if box_coder is not a BoxCoder\n \"\"\"\n if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator):\n raise ValueError('similarity_calc must be a RegionSimilarityCalculator')\n if not isinstance(matcher, mat.Matcher):\n raise ValueError('matcher must be a Matcher')\n if not isinstance(box_coder_instance, box_coder.BoxCoder):\n raise ValueError('box_coder must be a BoxCoder')\n self._similarity_calc = similarity_calc\n self._matcher = matcher\n self._box_coder = box_coder_instance\n self._negative_class_weight = negative_class_weight\n\n @property\n def box_coder(self):\n return self._box_coder\n\n # TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields.\n def assign(self,\n anchors,\n groundtruth_boxes,\n groundtruth_labels=None,\n unmatched_class_label=None,\n groundtruth_weights=None):\n \"\"\"Assign classification and regression targets to each anchor.\n\n For a given set of anchors and groundtruth detections, match anchors\n to groundtruth_boxes and assign classification and regression targets to\n each anchor as well as weights based on the resulting match (specifying,\n e.g., which anchors should not contribute to training loss).\n\n Anchors that are not matched to anything are given a classification target\n of self._unmatched_cls_target which can be specified via the constructor.\n\n Args:\n anchors: a BoxList representing N anchors\n groundtruth_boxes: a BoxList representing M groundtruth boxes\n groundtruth_labels: a tensor of shape [M, d_1, ... d_k]\n with labels for each of the ground_truth boxes. The subshape\n [d_1, ... d_k] can be empty (corresponding to scalar inputs). When set\n to None, groundtruth_labels assumes a binary problem where all\n ground_truth boxes get a positive label (of 1).\n unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]\n which is consistent with the classification target for each\n anchor (and can be empty for scalar targets). This shape must thus be\n compatible with the groundtruth labels that are passed to the \"assign\"\n function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).\n If set to None, unmatched_cls_target is set to be [0] for each anchor.\n groundtruth_weights: a float tensor of shape [M] indicating the weight to\n assign to all anchors match to a particular groundtruth box. The weights\n must be in [0., 1.]. If None, all weights are set to 1. Generally no\n groundtruth boxes with zero weight match to any anchors as matchers are\n aware of groundtruth weights. Additionally, `cls_weights` and\n `reg_weights` are calculated using groundtruth weights as an added\n safety.\n\n Returns:\n cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],\n where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels\n which has shape [num_gt_boxes, d_1, d_2, ... d_k].\n cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],\n representing weights for each element in cls_targets.\n reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]\n reg_weights: a float32 tensor with shape [num_anchors]\n match: an int32 tensor of shape [num_anchors] containing result of anchor\n groundtruth matching. Each position in the tensor indicates an anchor\n and holds the following meaning:\n (1) if match[i] >= 0, anchor i is matched with groundtruth match[i].\n (2) if match[i]=-1, anchor i is marked to be background .\n (3) if match[i]=-2, anchor i is ignored since it is not background and\n does not have sufficient overlap to call it a foreground.\n\n Raises:\n ValueError: if anchors or groundtruth_boxes are not of type\n box_list.BoxList\n \"\"\"\n if not isinstance(anchors, box_list.BoxList):\n raise ValueError('anchors must be an BoxList')\n if not isinstance(groundtruth_boxes, box_list.BoxList):\n raise ValueError('groundtruth_boxes must be an BoxList')\n\n if unmatched_class_label is None:\n unmatched_class_label = tf.constant([0], tf.float32)\n\n if groundtruth_labels is None:\n groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),\n 0))\n groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)\n\n unmatched_shape_assert = shape_utils.assert_shape_equal(\n shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],\n shape_utils.combined_static_and_dynamic_shape(unmatched_class_label))\n labels_and_box_shapes_assert = shape_utils.assert_shape_equal(\n shape_utils.combined_static_and_dynamic_shape(\n groundtruth_labels)[:1],\n shape_utils.combined_static_and_dynamic_shape(\n groundtruth_boxes.get())[:1])\n\n if groundtruth_weights is None:\n num_gt_boxes = groundtruth_boxes.num_boxes_static()\n if not num_gt_boxes:\n num_gt_boxes = groundtruth_boxes.num_boxes()\n groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)\n\n # set scores on the gt boxes\n scores = 1 - groundtruth_labels[:, 0]\n groundtruth_boxes.add_field(fields.BoxListFields.scores, scores)\n\n with tf.control_dependencies(\n [unmatched_shape_assert, labels_and_box_shapes_assert]):\n match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,\n anchors)\n match = self._matcher.match(match_quality_matrix,\n valid_rows=tf.greater(groundtruth_weights, 0))\n reg_targets = self._create_regression_targets(anchors,\n groundtruth_boxes,\n match)\n cls_targets = self._create_classification_targets(groundtruth_labels,\n unmatched_class_label,\n match)\n reg_weights = self._create_regression_weights(match, groundtruth_weights)\n\n cls_weights = self._create_classification_weights(match,\n groundtruth_weights)\n # convert cls_weights from per-anchor to per-class.\n class_label_shape = tf.shape(cls_targets)[1:]\n weights_shape = tf.shape(cls_weights)\n weights_multiple = tf.concat(\n [tf.ones_like(weights_shape), class_label_shape],\n axis=0)\n for _ in range(len(cls_targets.get_shape()[1:])):\n cls_weights = tf.expand_dims(cls_weights, -1)\n cls_weights = tf.tile(cls_weights, weights_multiple)\n\n num_anchors = anchors.num_boxes_static()\n if num_anchors is not None:\n reg_targets = self._reset_target_shape(reg_targets, num_anchors)\n cls_targets = self._reset_target_shape(cls_targets, num_anchors)\n reg_weights = self._reset_target_shape(reg_weights, num_anchors)\n cls_weights = self._reset_target_shape(cls_weights, num_anchors)\n\n return (cls_targets, cls_weights, reg_targets, reg_weights,\n match.match_results)\n\n def _reset_target_shape(self, target, num_anchors):\n \"\"\"Sets the static shape of the target.\n\n Args:\n target: the target tensor. Its first dimension will be overwritten.\n num_anchors: the number of anchors, which is used to override the target's\n first dimension.\n\n Returns:\n A tensor with the shape info filled in.\n \"\"\"\n target_shape = target.get_shape().as_list()\n target_shape[0] = num_anchors\n target.set_shape(target_shape)\n return target\n\n def _create_regression_targets(self, anchors, groundtruth_boxes, match):\n \"\"\"Returns a regression target for each anchor.\n\n Args:\n anchors: a BoxList representing N anchors\n groundtruth_boxes: a BoxList representing M groundtruth_boxes\n match: a matcher.Match object\n\n Returns:\n reg_targets: a float32 tensor with shape [N, box_code_dimension]\n \"\"\"\n matched_gt_boxes = match.gather_based_on_match(\n groundtruth_boxes.get(),\n unmatched_value=tf.zeros(4),\n ignored_value=tf.zeros(4))\n matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)\n if groundtruth_boxes.has_field(fields.BoxListFields.keypoints):\n groundtruth_keypoints = groundtruth_boxes.get_field(\n fields.BoxListFields.keypoints)\n matched_keypoints = match.gather_based_on_match(\n groundtruth_keypoints,\n unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),\n ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))\n matched_gt_boxlist.add_field(fields.BoxListFields.keypoints,\n matched_keypoints)\n matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)\n match_results_shape = shape_utils.combined_static_and_dynamic_shape(\n match.match_results)\n\n # Zero out the unmatched and ignored regression targets.\n unmatched_ignored_reg_targets = tf.tile(\n self._default_regression_target(), [match_results_shape[0], 1])\n matched_anchors_mask = match.matched_column_indicator()\n reg_targets = tf.where(matched_anchors_mask,\n matched_reg_targets,\n unmatched_ignored_reg_targets)\n return reg_targets\n\n def _default_regression_target(self):\n \"\"\"Returns the default target for anchors to regress to.\n\n Default regression targets are set to zero (though in\n this implementation what these targets are set to should\n not matter as the regression weight of any box set to\n regress to the default target is zero).\n\n Returns:\n default_target: a float32 tensor with shape [1, box_code_dimension]\n \"\"\"\n return tf.constant([self._box_coder.code_size*[0]], tf.float32)\n\n def _create_classification_targets(self, groundtruth_labels,\n unmatched_class_label, match):\n \"\"\"Create classification targets for each anchor.\n\n Assign a classification target of for each anchor to the matching\n groundtruth label that is provided by match. Anchors that are not matched\n to anything are given the target self._unmatched_cls_target\n\n Args:\n groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]\n with labels for each of the ground_truth boxes. The subshape\n [d_1, ... d_k] can be empty (corresponding to scalar labels).\n unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]\n which is consistent with the classification target for each\n anchor (and can be empty for scalar targets). This shape must thus be\n compatible with the groundtruth labels that are passed to the \"assign\"\n function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).\n match: a matcher.Match object that provides a matching between anchors\n and groundtruth boxes.\n\n Returns:\n a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the\n subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has\n shape [num_gt_boxes, d_1, d_2, ... d_k].\n \"\"\"\n return match.gather_based_on_match(\n groundtruth_labels,\n unmatched_value=unmatched_class_label,\n ignored_value=unmatched_class_label)\n\n def _create_regression_weights(self, match, groundtruth_weights):\n \"\"\"Set regression weight for each anchor.\n\n Only positive anchors are set to contribute to the regression loss, so this\n method returns a weight of 1 for every positive anchor and 0 for every\n negative anchor.\n\n Args:\n match: a matcher.Match object that provides a matching between anchors\n and groundtruth boxes.\n groundtruth_weights: a float tensor of shape [M] indicating the weight to\n assign to all anchors match to a particular groundtruth box.\n\n Returns:\n a float32 tensor with shape [num_anchors] representing regression weights.\n \"\"\"\n return match.gather_based_on_match(\n groundtruth_weights, ignored_value=0., unmatched_value=0.)\n\n def _create_classification_weights(self,\n match,\n groundtruth_weights):\n \"\"\"Create classification weights for each anchor.\n\n Positive (matched) anchors are associated with a weight of\n positive_class_weight and negative (unmatched) anchors are associated with\n a weight of negative_class_weight. When anchors are ignored, weights are set\n to zero. By default, both positive/negative weights are set to 1.0,\n but they can be adjusted to handle class imbalance (which is almost always\n the case in object detection).\n\n Args:\n match: a matcher.Match object that provides a matching between anchors\n and groundtruth boxes.\n groundtruth_weights: a float tensor of shape [M] indicating the weight to\n assign to all anchors match to a particular groundtruth box.\n\n Returns:\n a float32 tensor with shape [num_anchors] representing classification\n weights.\n \"\"\"\n return match.gather_based_on_match(\n groundtruth_weights,\n ignored_value=0.,\n unmatched_value=self._negative_class_weight)\n\n def get_box_coder(self):\n \"\"\"Get BoxCoder of this TargetAssigner.\n\n Returns:\n BoxCoder object.\n \"\"\"\n return self._box_coder\n\n\n# TODO(rathodv): This method pulls in all the implementation dependencies into\n# core. Therefore its best to have this factory method outside of core.\ndef create_target_assigner(reference, stage=None,\n negative_class_weight=1.0, use_matmul_gather=False):\n \"\"\"Factory function for creating standard target assigners.\n\n Args:\n reference: string referencing the type of TargetAssigner.\n stage: string denoting stage: {proposal, detection}.\n negative_class_weight: classification weight to be associated to negative\n anchors (default: 1.0)\n use_matmul_gather: whether to use matrix multiplication based gather which\n are better suited for TPUs.\n\n Returns:\n TargetAssigner: desired target assigner.\n\n Raises:\n ValueError: if combination reference+stage is invalid.\n \"\"\"\n if reference == 'Multibox' and stage == 'proposal':\n if tf_version.is_tf2():\n raise ValueError('GreedyBipartiteMatcher is not supported in TF 2.X.')\n similarity_calc = sim_calc.NegSqDistSimilarity()\n matcher = bipartite_matcher.GreedyBipartiteMatcher()\n box_coder_instance = mean_stddev_box_coder.MeanStddevBoxCoder()\n\n elif reference == 'FasterRCNN' and stage == 'proposal':\n similarity_calc = sim_calc.IouSimilarity()\n matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7,\n unmatched_threshold=0.3,\n force_match_for_each_row=True,\n use_matmul_gather=use_matmul_gather)\n box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder(\n scale_factors=[10.0, 10.0, 5.0, 5.0])\n\n elif reference == 'FasterRCNN' and stage == 'detection':\n similarity_calc = sim_calc.IouSimilarity()\n # Uses all proposals with IOU < 0.5 as candidate negatives.\n matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,\n negatives_lower_than_unmatched=True,\n use_matmul_gather=use_matmul_gather)\n box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder(\n scale_factors=[10.0, 10.0, 5.0, 5.0])\n\n elif reference == 'FastRCNN':\n similarity_calc = sim_calc.IouSimilarity()\n matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,\n unmatched_threshold=0.1,\n force_match_for_each_row=False,\n negatives_lower_than_unmatched=False,\n use_matmul_gather=use_matmul_gather)\n box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder()\n\n else:\n raise ValueError('No valid combination of reference and stage.')\n\n return TargetAssigner(similarity_calc, matcher, box_coder_instance,\n negative_class_weight=negative_class_weight)\n\n\ndef batch_assign(target_assigner,\n anchors_batch,\n gt_box_batch,\n gt_class_targets_batch,\n unmatched_class_label=None,\n gt_weights_batch=None):\n \"\"\"Batched assignment of classification and regression targets.\n\n Args:\n target_assigner: a target assigner.\n anchors_batch: BoxList representing N box anchors or list of BoxList objects\n with length batch_size representing anchor sets.\n gt_box_batch: a list of BoxList objects with length batch_size\n representing groundtruth boxes for each image in the batch\n gt_class_targets_batch: a list of tensors with length batch_size, where\n each tensor has shape [num_gt_boxes_i, classification_target_size] and\n num_gt_boxes_i is the number of boxes in the ith boxlist of\n gt_box_batch.\n unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]\n which is consistent with the classification target for each\n anchor (and can be empty for scalar targets). This shape must thus be\n compatible with the groundtruth labels that are passed to the \"assign\"\n function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).\n gt_weights_batch: A list of 1-D tf.float32 tensors of shape\n [num_boxes] containing weights for groundtruth boxes.\n\n Returns:\n batch_cls_targets: a tensor with shape [batch_size, num_anchors,\n num_classes],\n batch_cls_weights: a tensor with shape [batch_size, num_anchors,\n num_classes],\n batch_reg_targets: a tensor with shape [batch_size, num_anchors,\n box_code_dimension]\n batch_reg_weights: a tensor with shape [batch_size, num_anchors],\n match: an int32 tensor of shape [batch_size, num_anchors] containing result\n of anchor groundtruth matching. Each position in the tensor indicates an\n anchor and holds the following meaning:\n (1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i].\n (2) if match[x, i]=-1, anchor i is marked to be background .\n (3) if match[x, i]=-2, anchor i is ignored since it is not background and\n does not have sufficient overlap to call it a foreground.\n\n Raises:\n ValueError: if input list lengths are inconsistent, i.e.,\n batch_size == len(gt_box_batch) == len(gt_class_targets_batch)\n and batch_size == len(anchors_batch) unless anchors_batch is a single\n BoxList.\n \"\"\"\n if not isinstance(anchors_batch, list):\n anchors_batch = len(gt_box_batch) * [anchors_batch]\n if not all(\n isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):\n raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')\n if not (len(anchors_batch)\n == len(gt_box_batch)\n == len(gt_class_targets_batch)):\n raise ValueError('batch size incompatible with lengths of anchors_batch, '\n 'gt_box_batch and gt_class_targets_batch.')\n cls_targets_list = []\n cls_weights_list = []\n reg_targets_list = []\n reg_weights_list = []\n match_list = []\n if gt_weights_batch is None:\n gt_weights_batch = [None] * len(gt_class_targets_batch)\n for anchors, gt_boxes, gt_class_targets, gt_weights in zip(\n anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch):\n (cls_targets, cls_weights,\n reg_targets, reg_weights, match) = target_assigner.assign(\n anchors, gt_boxes, gt_class_targets, unmatched_class_label,\n gt_weights)\n cls_targets_list.append(cls_targets)\n cls_weights_list.append(cls_weights)\n reg_targets_list.append(reg_targets)\n reg_weights_list.append(reg_weights)\n match_list.append(match)\n batch_cls_targets = tf.stack(cls_targets_list)\n batch_cls_weights = tf.stack(cls_weights_list)\n batch_reg_targets = tf.stack(reg_targets_list)\n batch_reg_weights = tf.stack(reg_weights_list)\n batch_match = tf.stack(match_list)\n return (batch_cls_targets, batch_cls_weights, batch_reg_targets,\n batch_reg_weights, batch_match)\n\n\n# Assign an alias to avoid large refactor of existing users.\nbatch_assign_targets = batch_assign\n\n\ndef batch_get_targets(batch_match, groundtruth_tensor_list,\n groundtruth_weights_list, unmatched_value,\n unmatched_weight):\n \"\"\"Returns targets based on anchor-groundtruth box matching results.\n\n Args:\n batch_match: An int32 tensor of shape [batch, num_anchors] containing the\n result of target assignment returned by TargetAssigner.assign(..).\n groundtruth_tensor_list: A list of groundtruth tensors of shape\n [num_groundtruth, d_1, d_2, ..., d_k]. The tensors can be of any type.\n groundtruth_weights_list: A list of weights, one per groundtruth tensor, of\n shape [num_groundtruth].\n unmatched_value: A tensor of shape [d_1, d_2, ..., d_k] of the same type as\n groundtruth tensor containing target value for anchors that remain\n unmatched.\n unmatched_weight: Scalar weight to assign to anchors that remain unmatched.\n\n Returns:\n targets: A tensor of shape [batch, num_anchors, d_1, d_2, ..., d_k]\n containing targets for anchors.\n weights: A float tensor of shape [batch, num_anchors] containing the weights\n to assign to each target.\n \"\"\"\n match_list = tf.unstack(batch_match)\n targets_list = []\n weights_list = []\n for match_tensor, groundtruth_tensor, groundtruth_weight in zip(\n match_list, groundtruth_tensor_list, groundtruth_weights_list):\n match_object = mat.Match(match_tensor)\n targets = match_object.gather_based_on_match(\n groundtruth_tensor,\n unmatched_value=unmatched_value,\n ignored_value=unmatched_value)\n targets_list.append(targets)\n weights = match_object.gather_based_on_match(\n groundtruth_weight,\n unmatched_value=unmatched_weight,\n ignored_value=tf.zeros_like(unmatched_weight))\n weights_list.append(weights)\n return tf.stack(targets_list), tf.stack(weights_list)\n\n\ndef batch_assign_confidences(target_assigner,\n anchors_batch,\n gt_box_batch,\n gt_class_confidences_batch,\n gt_weights_batch=None,\n unmatched_class_label=None,\n include_background_class=True,\n implicit_class_weight=1.0):\n \"\"\"Batched assignment of classification and regression targets.\n\n This differences between batch_assign_confidences and batch_assign_targets:\n - 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and\n tensor (high-dimensional) targets. 'batch_assign_confidences' only support\n scalar (agnostic) and vector (multiclass) targets.\n - 'batch_assign_targets' assumes the input class tensor using the binary\n one/K-hot encoding. 'batch_assign_confidences' takes the class confidence\n scores as the input, where 1 means positive classes, 0 means implicit\n negative classes, and -1 means explicit negative classes.\n - 'batch_assign_confidences' assigns the targets in the similar way as\n 'batch_assign_targets' except that it gives different weights for implicit\n and explicit classes. This allows user to control the negative gradients\n pushed differently for implicit and explicit examples during the training.\n\n Args:\n target_assigner: a target assigner.\n anchors_batch: BoxList representing N box anchors or list of BoxList objects\n with length batch_size representing anchor sets.\n gt_box_batch: a list of BoxList objects with length batch_size\n representing groundtruth boxes for each image in the batch\n gt_class_confidences_batch: a list of tensors with length batch_size, where\n each tensor has shape [num_gt_boxes_i, classification_target_size] and\n num_gt_boxes_i is the number of boxes in the ith boxlist of\n gt_box_batch. Note that in this tensor, 1 means explicit positive class,\n -1 means explicit negative class, and 0 means implicit negative class.\n gt_weights_batch: A list of 1-D tf.float32 tensors of shape\n [num_gt_boxes_i] containing weights for groundtruth boxes.\n unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]\n which is consistent with the classification target for each\n anchor (and can be empty for scalar targets). This shape must thus be\n compatible with the groundtruth labels that are passed to the \"assign\"\n function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).\n include_background_class: whether or not gt_class_confidences_batch includes\n the background class.\n implicit_class_weight: the weight assigned to implicit examples.\n\n Returns:\n batch_cls_targets: a tensor with shape [batch_size, num_anchors,\n num_classes],\n batch_cls_weights: a tensor with shape [batch_size, num_anchors,\n num_classes],\n batch_reg_targets: a tensor with shape [batch_size, num_anchors,\n box_code_dimension]\n batch_reg_weights: a tensor with shape [batch_size, num_anchors],\n match: an int32 tensor of shape [batch_size, num_anchors] containing result\n of anchor groundtruth matching. Each position in the tensor indicates an\n anchor and holds the following meaning:\n (1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i].\n (2) if match[x, i]=-1, anchor i is marked to be background .\n (3) if match[x, i]=-2, anchor i is ignored since it is not background and\n does not have sufficient overlap to call it a foreground.\n\n Raises:\n ValueError: if input list lengths are inconsistent, i.e.,\n batch_size == len(gt_box_batch) == len(gt_class_targets_batch)\n and batch_size == len(anchors_batch) unless anchors_batch is a single\n BoxList, or if any element in gt_class_confidences_batch has rank > 2.\n \"\"\"\n if not isinstance(anchors_batch, list):\n anchors_batch = len(gt_box_batch) * [anchors_batch]\n if not all(\n isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):\n raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')\n if not (len(anchors_batch)\n == len(gt_box_batch)\n == len(gt_class_confidences_batch)):\n raise ValueError('batch size incompatible with lengths of anchors_batch, '\n 'gt_box_batch and gt_class_confidences_batch.')\n\n cls_targets_list = []\n cls_weights_list = []\n reg_targets_list = []\n reg_weights_list = []\n match_list = []\n if gt_weights_batch is None:\n gt_weights_batch = [None] * len(gt_class_confidences_batch)\n for anchors, gt_boxes, gt_class_confidences, gt_weights in zip(\n anchors_batch, gt_box_batch, gt_class_confidences_batch,\n gt_weights_batch):\n\n if (gt_class_confidences is not None and\n len(gt_class_confidences.get_shape().as_list()) > 2):\n raise ValueError('The shape of the class target is not supported. ',\n gt_class_confidences.get_shape())\n\n cls_targets, _, reg_targets, _, match = target_assigner.assign(\n anchors, gt_boxes, gt_class_confidences, unmatched_class_label,\n groundtruth_weights=gt_weights)\n\n if include_background_class:\n cls_targets_without_background = tf.slice(\n cls_targets, [0, 1], [-1, -1])\n else:\n cls_targets_without_background = cls_targets\n\n positive_mask = tf.greater(cls_targets_without_background, 0.0)\n negative_mask = tf.less(cls_targets_without_background, 0.0)\n explicit_example_mask = tf.logical_or(positive_mask, negative_mask)\n positive_anchors = tf.reduce_any(positive_mask, axis=-1)\n\n regression_weights = tf.cast(positive_anchors, dtype=tf.float32)\n regression_targets = (\n reg_targets * tf.expand_dims(regression_weights, axis=-1))\n regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1)\n\n cls_targets_without_background = (\n cls_targets_without_background *\n (1 - tf.cast(negative_mask, dtype=tf.float32)))\n cls_weights_without_background = ((1 - implicit_class_weight) * tf.cast(\n explicit_example_mask, dtype=tf.float32) + implicit_class_weight)\n\n if include_background_class:\n cls_weights_background = (\n (1 - implicit_class_weight) * regression_weights_expanded\n + implicit_class_weight)\n classification_weights = tf.concat(\n [cls_weights_background, cls_weights_without_background], axis=-1)\n cls_targets_background = 1 - regression_weights_expanded\n classification_targets = tf.concat(\n [cls_targets_background, cls_targets_without_background], axis=-1)\n else:\n classification_targets = cls_targets_without_background\n classification_weights = cls_weights_without_background\n\n cls_targets_list.append(classification_targets)\n cls_weights_list.append(classification_weights)\n reg_targets_list.append(regression_targets)\n reg_weights_list.append(regression_weights)\n match_list.append(match)\n batch_cls_targets = tf.stack(cls_targets_list)\n batch_cls_weights = tf.stack(cls_weights_list)\n batch_reg_targets = tf.stack(reg_targets_list)\n batch_reg_weights = tf.stack(reg_weights_list)\n batch_match = tf.stack(match_list)\n return (batch_cls_targets, batch_cls_weights, batch_reg_targets,\n batch_reg_weights, batch_match)\n\n\ndef _smallest_positive_root(a, b, c):\n \"\"\"Returns the smallest positive root of a quadratic equation.\"\"\"\n\n discriminant = tf.sqrt(b ** 2 - 4 * a * c)\n\n # TODO(vighneshb) We are currently using the slightly incorrect\n # CenterNet implementation. The commented lines implement the fixed version\n # in https://github.com/princeton-vl/CornerNet. Change the implementation\n # after verifying it has no negative impact.\n # root1 = (-b - discriminant) / (2 * a)\n # root2 = (-b + discriminant) / (2 * a)\n\n # return tf.where(tf.less(root1, 0), root2, root1)\n\n return (-b + discriminant) / (2.0)\n\n\ndef max_distance_for_overlap(height, width, min_iou):\n \"\"\"Computes how far apart bbox corners can lie while maintaining the iou.\n\n Given a bounding box size, this function returns a lower bound on how far\n apart the corners of another box can lie while still maintaining the given\n IoU. The implementation is based on the `gaussian_radius` function in the\n Objects as Points github repo: https://github.com/xingyizhou/CenterNet\n\n Args:\n height: A 1-D float Tensor representing height of the ground truth boxes.\n width: A 1-D float Tensor representing width of the ground truth boxes.\n min_iou: A float representing the minimum IoU desired.\n\n Returns:\n distance: A 1-D Tensor of distances, of the same length as the input\n height and width tensors.\n \"\"\"\n\n # Given that the detected box is displaced at a distance `d`, the exact\n # IoU value will depend on the angle at which each corner is displaced.\n # We simplify our computation by assuming that each corner is displaced by\n # a distance `d` in both x and y direction. This gives us a lower IoU than\n # what is actually realizable and ensures that any box with corners less\n # than `d` distance apart will always have an IoU greater than or equal\n # to `min_iou`\n\n # The following 3 cases can be worked on geometrically and come down to\n # solving a quadratic inequality. In each case, to ensure `min_iou` we use\n # the smallest positive root of the equation.\n\n # Case where detected box is offset from ground truth and no box completely\n # contains the other.\n\n distance_detection_offset = _smallest_positive_root(\n a=1, b=-(height + width),\n c=width * height * ((1 - min_iou) / (1 + min_iou))\n )\n\n # Case where detection is smaller than ground truth and completely contained\n # in it.\n distance_detection_in_gt = _smallest_positive_root(\n a=4, b=-2 * (height + width),\n c=(1 - min_iou) * width * height\n )\n\n # Case where ground truth is smaller than detection and completely contained\n # in it.\n distance_gt_in_detection = _smallest_positive_root(\n a=4 * min_iou, b=(2 * min_iou) * (width + height),\n c=(min_iou - 1) * width * height\n )\n\n return tf.reduce_min([distance_detection_offset,\n distance_gt_in_detection,\n distance_detection_in_gt], axis=0)\n\n\ndef get_batch_predictions_from_indices(batch_predictions, indices):\n \"\"\"Gets the values of predictions in a batch at the given indices.\n\n The indices are expected to come from the offset targets generation functions\n in this library. The returned value is intended to be used inside a loss\n function.\n\n Args:\n batch_predictions: A tensor of shape [batch_size, height, width, channels]\n or [batch_size, height, width, class, channels] for class-specific\n features (e.g. keypoint joint offsets).\n indices: A tensor of shape [num_instances, 3] for single class features or\n [num_instances, 4] for multiple classes features.\n\n Returns:\n values: A tensor of shape [num_instances, channels] holding the predicted\n values at the given indices.\n \"\"\"\n return tf.gather_nd(batch_predictions, indices)\n\n\ndef _compute_std_dev_from_box_size(boxes_height, boxes_width, min_overlap):\n \"\"\"Computes the standard deviation of the Gaussian kernel from box size.\n\n Args:\n boxes_height: A 1D tensor with shape [num_instances] representing the height\n of each box.\n boxes_width: A 1D tensor with shape [num_instances] representing the width\n of each box.\n min_overlap: The minimum IOU overlap that boxes need to have to not be\n penalized.\n\n Returns:\n A 1D tensor with shape [num_instances] representing the computed Gaussian\n sigma for each of the box.\n \"\"\"\n # We are dividing by 3 so that points closer than the computed\n # distance have a >99% CDF.\n sigma = max_distance_for_overlap(boxes_height, boxes_width, min_overlap)\n sigma = (2 * tf.math.maximum(tf.math.floor(sigma), 0.0) + 1) / 6.0\n return sigma\n\n\ndef _preprocess_keypoints_and_weights(out_height, out_width, keypoints,\n class_onehot, class_weights,\n keypoint_weights, class_id,\n keypoint_indices):\n \"\"\"Preprocesses the keypoints and the corresponding keypoint weights.\n\n This function performs several common steps to preprocess the keypoints and\n keypoint weights features, including:\n 1) Select the subset of keypoints based on the keypoint indices, fill the\n keypoint NaN values with zeros and convert to absolute coordinates.\n 2) Generate the weights of the keypoint using the following information:\n a. The class of the instance.\n b. The NaN value of the keypoint coordinates.\n c. The provided keypoint weights.\n\n Args:\n out_height: An integer or an integer tensor indicating the output height\n of the model.\n out_width: An integer or an integer tensor indicating the output width of\n the model.\n keypoints: A float tensor of shape [num_instances, num_total_keypoints, 2]\n representing the original keypoint grountruth coordinates.\n class_onehot: A float tensor of shape [num_instances, num_classes]\n containing the class targets with the 0th index assumed to map to the\n first non-background class.\n class_weights: A float tensor of shape [num_instances] containing weights\n for groundtruth instances.\n keypoint_weights: A float tensor of shape\n [num_instances, num_total_keypoints] representing the weights of each\n keypoints.\n class_id: int, the ID of the class (0-indexed) that contains the target\n keypoints to consider in this task.\n keypoint_indices: A list of integers representing the indices of the\n keypoints to be considered in this task. This is used to retrieve the\n subset of the keypoints that should be considered in this task.\n\n Returns:\n A tuple of two tensors:\n keypoint_absolute: A float tensor of shape\n [num_instances, num_keypoints, 2] which is the selected and updated\n keypoint coordinates.\n keypoint_weights: A float tensor of shape [num_instances, num_keypoints]\n representing the updated weight of each keypoint.\n \"\"\"\n # Select the targets keypoints by their type ids and generate the mask\n # of valid elements.\n valid_mask, keypoints = ta_utils.get_valid_keypoint_mask_for_class(\n keypoint_coordinates=keypoints,\n class_id=class_id,\n class_onehot=class_onehot,\n class_weights=class_weights,\n keypoint_indices=keypoint_indices)\n # Keypoint coordinates in absolute coordinate system.\n # The shape of the tensors: [num_instances, num_keypoints, 2].\n keypoints_absolute = keypoint_ops.to_absolute_coordinates(\n keypoints, out_height, out_width)\n # Assign default weights for the keypoints.\n if keypoint_weights is None:\n keypoint_weights = tf.ones_like(keypoints[:, :, 0])\n else:\n keypoint_weights = tf.gather(\n keypoint_weights, indices=keypoint_indices, axis=1)\n keypoint_weights = keypoint_weights * valid_mask\n return keypoints_absolute, keypoint_weights\n\n\nclass CenterNetCenterHeatmapTargetAssigner(object):\n \"\"\"Wrapper to compute the object center heatmap.\"\"\"\n\n def __init__(self,\n stride,\n min_overlap=0.7,\n compute_heatmap_sparse=False,\n keypoint_class_id=None,\n keypoint_indices=None,\n keypoint_weights_for_center=None):\n \"\"\"Initializes the target assigner.\n\n Args:\n stride: int, the stride of the network in output pixels.\n min_overlap: The minimum IOU overlap that boxes need to have to not be\n penalized.\n compute_heatmap_sparse: bool, indicating whether or not to use the sparse\n version of the Op that computes the heatmap. The sparse version scales\n better with number of classes, but in some cases is known to cause\n OOM error. See (b/170989061).\n keypoint_class_id: int, the ID of the class (0-indexed) that contains the\n target keypoints to consider in this task.\n keypoint_indices: A list of integers representing the indices of the\n keypoints to be considered in this task. This is used to retrieve the\n subset of the keypoints from gt_keypoints that should be considered in\n this task.\n keypoint_weights_for_center: The keypoint weights used for calculating the\n location of object center. The number of weights need to be the same as\n the number of keypoints. The object center is calculated by the weighted\n mean of the keypoint locations. If not provided, the object center is\n determined by the center of the bounding box (default behavior).\n \"\"\"\n\n self._stride = stride\n self._min_overlap = min_overlap\n self._compute_heatmap_sparse = compute_heatmap_sparse\n self._keypoint_class_id = keypoint_class_id\n self._keypoint_indices = keypoint_indices\n self._keypoint_weights_for_center = keypoint_weights_for_center\n\n def assign_center_targets_from_boxes(self,\n height,\n width,\n gt_boxes_list,\n gt_classes_list,\n gt_weights_list=None):\n \"\"\"Computes the object center heatmap target.\n\n Args:\n height: int, height of input to the model. This is used to\n determine the height of the output.\n width: int, width of the input to the model. This is used to\n determine the width of the output.\n gt_boxes_list: A list of float tensors with shape [num_boxes, 4]\n representing the groundtruth detection bounding boxes for each sample in\n the batch. The box coordinates are expected in normalized coordinates.\n gt_classes_list: A list of float tensors with shape [num_boxes,\n num_classes] representing the one-hot encoded class labels for each box\n in the gt_boxes_list.\n gt_weights_list: A list of float tensors with shape [num_boxes]\n representing the weight of each groundtruth detection box.\n\n Returns:\n heatmap: A Tensor of size [batch_size, output_height, output_width,\n num_classes] representing the per class center heatmap. output_height\n and output_width are computed by dividing the input height and width by\n the stride specified during initialization.\n \"\"\"\n\n out_height = tf.cast(height // self._stride, tf.float32)\n out_width = tf.cast(width // self._stride, tf.float32)\n # Compute the yx-grid to be used to generate the heatmap. Each returned\n # tensor has shape of [out_height, out_width]\n (y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width)\n\n heatmaps = []\n if gt_weights_list is None:\n gt_weights_list = [None] * len(gt_boxes_list)\n # TODO(vighneshb) Replace the for loop with a batch version.\n for boxes, class_targets, weights in zip(gt_boxes_list, gt_classes_list,\n gt_weights_list):\n boxes = box_list.BoxList(boxes)\n # Convert the box coordinates to absolute output image dimension space.\n boxes = box_list_ops.to_absolute_coordinates(boxes,\n height // self._stride,\n width // self._stride)\n # Get the box center coordinates. Each returned tensors have the shape of\n # [num_instances]\n (y_center, x_center, boxes_height,\n boxes_width) = boxes.get_center_coordinates_and_sizes()\n\n # Compute the sigma from box size. The tensor shape: [num_instances].\n sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width,\n self._min_overlap)\n # Apply the Gaussian kernel to the center coordinates. Returned heatmap\n # has shape of [out_height, out_width, num_classes]\n heatmap = ta_utils.coordinates_to_heatmap(\n y_grid=y_grid,\n x_grid=x_grid,\n y_coordinates=y_center,\n x_coordinates=x_center,\n sigma=sigma,\n channel_onehot=class_targets,\n channel_weights=weights,\n sparse=self._compute_heatmap_sparse)\n heatmaps.append(heatmap)\n\n # Return the stacked heatmaps over the batch.\n return tf.stack(heatmaps, axis=0)\n\n def assign_center_targets_from_keypoints(self,\n height,\n width,\n gt_classes_list,\n gt_keypoints_list,\n gt_weights_list=None,\n gt_keypoints_weights_list=None):\n \"\"\"Computes the object center heatmap target using keypoint locations.\n\n Args:\n height: int, height of input to the model. This is used to\n determine the height of the output.\n width: int, width of the input to the model. This is used to\n determine the width of the output.\n gt_classes_list: A list of float tensors with shape [num_boxes,\n num_classes] representing the one-hot encoded class labels for each box\n in the gt_boxes_list.\n gt_keypoints_list: A list of float tensors with shape [num_boxes, 4]\n representing the groundtruth detection bounding boxes for each sample in\n the batch. The box coordinates are expected in normalized coordinates.\n gt_weights_list: A list of float tensors with shape [num_boxes]\n representing the weight of each groundtruth detection box.\n gt_keypoints_weights_list: [Optional] a list of 3D tf.float32 tensors of\n shape [num_instances, num_total_keypoints] representing the weights of\n each keypoints. If not provided, then all not NaN keypoints will be\n equally weighted.\n\n Returns:\n heatmap: A Tensor of size [batch_size, output_height, output_width,\n num_classes] representing the per class center heatmap. output_height\n and output_width are computed by dividing the input height and width by\n the stride specified during initialization.\n \"\"\"\n assert (self._keypoint_weights_for_center is not None and\n self._keypoint_class_id is not None and\n self._keypoint_indices is not None)\n out_height = tf.cast(height // self._stride, tf.float32)\n out_width = tf.cast(width // self._stride, tf.float32)\n # Compute the yx-grid to be used to generate the heatmap. Each returned\n # tensor has shape of [out_height, out_width]\n (y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width)\n\n heatmaps = []\n if gt_weights_list is None:\n gt_weights_list = [None] * len(gt_classes_list)\n if gt_keypoints_weights_list is None:\n gt_keypoints_weights_list = [None] * len(gt_keypoints_list)\n\n for keypoints, classes, kp_weights, weights in zip(\n gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,\n gt_weights_list):\n\n keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(\n out_height=out_height,\n out_width=out_width,\n keypoints=keypoints,\n class_onehot=classes,\n class_weights=weights,\n keypoint_weights=kp_weights,\n class_id=self._keypoint_class_id,\n keypoint_indices=self._keypoint_indices)\n # _, num_keypoints, _ = (\n # shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))\n\n # Update the keypoint weights by the specified keypoints weights.\n kp_loc_weights = tf.constant(\n self._keypoint_weights_for_center, dtype=tf.float32)\n updated_kp_weights = kp_weights * kp_loc_weights[tf.newaxis, :]\n\n # Obtain the sum of the weights for each instance.\n # instance_weight_sum has shape: [num_instance].\n instance_weight_sum = tf.reduce_sum(updated_kp_weights, axis=1)\n\n # Weight the keypoint coordinates by updated_kp_weights.\n # weighted_keypoints has shape: [num_instance, num_keypoints, 2]\n weighted_keypoints = keypoints_absolute * tf.expand_dims(\n updated_kp_weights, axis=2)\n\n # Compute the mean of the keypoint coordinates over the weighted\n # keypoints.\n # keypoint_mean has shape: [num_instance, 2]\n keypoint_mean = tf.math.divide(\n tf.reduce_sum(weighted_keypoints, axis=1),\n tf.expand_dims(instance_weight_sum, axis=-1))\n\n # Replace the NaN values (due to divided by zeros in the above operation)\n # by 0.0 where the sum of instance weight is zero.\n # keypoint_mean has shape: [num_instance, 2]\n keypoint_mean = tf.where(\n tf.stack([instance_weight_sum, instance_weight_sum], axis=1) > 0.0,\n keypoint_mean, tf.zeros_like(keypoint_mean))\n\n # Compute the distance from each keypoint to the mean location using\n # broadcasting and weighted by updated_kp_weights.\n # keypoint_dist has shape: [num_instance, num_keypoints]\n keypoint_mean = tf.expand_dims(keypoint_mean, axis=1)\n keypoint_dist = tf.math.sqrt(\n tf.reduce_sum(\n tf.math.square(keypoints_absolute - keypoint_mean), axis=2))\n keypoint_dist = keypoint_dist * updated_kp_weights\n\n # Compute the average of the distances from each keypoint to the mean\n # location and update the average value by zero when the instance weight\n # is zero.\n # avg_radius has shape: [num_instance]\n avg_radius = tf.math.divide(\n tf.reduce_sum(keypoint_dist, axis=1), instance_weight_sum)\n avg_radius = tf.where(\n instance_weight_sum > 0.0, avg_radius, tf.zeros_like(avg_radius))\n\n # Update the class instance weight. If the instance doesn't contain enough\n # valid keypoint values (i.e. instance_weight_sum == 0.0), then set the\n # instance weight to zero.\n # updated_class_weights has shape: [num_instance]\n updated_class_weights = tf.where(\n instance_weight_sum > 0.0, weights, tf.zeros_like(weights))\n\n # Compute the sigma from average distance. We use 2 * average distance to\n # to approximate the width/height of the bounding box.\n # sigma has shape: [num_instances].\n sigma = _compute_std_dev_from_box_size(2 * avg_radius, 2 * avg_radius,\n self._min_overlap)\n\n # Apply the Gaussian kernel to the center coordinates. Returned heatmap\n # has shape of [out_height, out_width, num_classes]\n heatmap = ta_utils.coordinates_to_heatmap(\n y_grid=y_grid,\n x_grid=x_grid,\n y_coordinates=keypoint_mean[:, 0, 0],\n x_coordinates=keypoint_mean[:, 0, 1],\n sigma=sigma,\n channel_onehot=classes,\n channel_weights=updated_class_weights,\n sparse=self._compute_heatmap_sparse)\n heatmaps.append(heatmap)\n\n # Return the stacked heatmaps over the batch.\n return tf.stack(heatmaps, axis=0)\n\n\nclass CenterNetBoxTargetAssigner(object):\n \"\"\"Wrapper to compute target tensors for the object detection task.\n\n This class has methods that take as input a batch of ground truth tensors\n (in the form of a list) and return the targets required to train the object\n detection task.\n \"\"\"\n\n def __init__(self, stride):\n \"\"\"Initializes the target assigner.\n\n Args:\n stride: int, the stride of the network in output pixels.\n \"\"\"\n\n self._stride = stride\n\n def assign_size_and_offset_targets(self,\n height,\n width,\n gt_boxes_list,\n gt_weights_list=None):\n \"\"\"Returns the box height/width and center offset targets and their indices.\n\n The returned values are expected to be used with predicted tensors\n of size (batch_size, height//self._stride, width//self._stride, 2). The\n predicted values at the relevant indices can be retrieved with the\n get_batch_predictions_from_indices function.\n\n Args:\n height: int, height of input to the model. This is used to determine the\n height of the output.\n width: int, width of the input to the model. This is used to determine the\n width of the output.\n gt_boxes_list: A list of float tensors with shape [num_boxes, 4]\n representing the groundtruth detection bounding boxes for each sample in\n the batch. The coordinates are expected in normalized coordinates.\n gt_weights_list: A list of tensors with shape [num_boxes] corresponding to\n the weight of each groundtruth detection box.\n\n Returns:\n batch_indices: an integer tensor of shape [num_boxes, 3] holding the\n indices inside the predicted tensor which should be penalized. The\n first column indicates the index along the batch dimension and the\n second and third columns indicate the index along the y and x\n dimensions respectively.\n batch_box_height_width: a float tensor of shape [num_boxes, 2] holding\n expected height and width of each box in the output space.\n batch_offsets: a float tensor of shape [num_boxes, 2] holding the\n expected y and x offset of each box in the output space.\n batch_weights: a float tensor of shape [num_boxes] indicating the\n weight of each prediction.\n \"\"\"\n\n if gt_weights_list is None:\n gt_weights_list = [None] * len(gt_boxes_list)\n\n batch_indices = []\n batch_box_height_width = []\n batch_weights = []\n batch_offsets = []\n\n for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)):\n boxes = box_list.BoxList(boxes)\n boxes = box_list_ops.to_absolute_coordinates(boxes,\n height // self._stride,\n width // self._stride)\n # Get the box center coordinates. Each returned tensors have the shape of\n # [num_boxes]\n (y_center, x_center, boxes_height,\n boxes_width) = boxes.get_center_coordinates_and_sizes()\n num_boxes = tf.shape(x_center)\n\n # Compute the offsets and indices of the box centers. Shape:\n # offsets: [num_boxes, 2]\n # indices: [num_boxes, 2]\n (offsets, indices) = ta_utils.compute_floor_offsets_with_indices(\n y_source=y_center, x_source=x_center)\n\n # Assign ones if weights are not provided.\n if weights is None:\n weights = tf.ones(num_boxes, dtype=tf.float32)\n\n # Shape of [num_boxes, 1] integer tensor filled with current batch index.\n batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)\n batch_indices.append(tf.concat([batch_index, indices], axis=1))\n batch_box_height_width.append(\n tf.stack([boxes_height, boxes_width], axis=1))\n batch_weights.append(weights)\n batch_offsets.append(offsets)\n\n batch_indices = tf.concat(batch_indices, axis=0)\n batch_box_height_width = tf.concat(batch_box_height_width, axis=0)\n batch_weights = tf.concat(batch_weights, axis=0)\n batch_offsets = tf.concat(batch_offsets, axis=0)\n return (batch_indices, batch_box_height_width, batch_offsets, batch_weights)\n\n\n# TODO(yuhuic): Update this class to handle the instance/keypoint weights.\n# Currently those weights are used as \"mask\" to indicate whether an\n# instance/keypoint should be considered or not (expecting only either 0 or 1\n# value). In reality, the weights can be any value and this class should handle\n# those values properly.\nclass CenterNetKeypointTargetAssigner(object):\n \"\"\"Wrapper to compute target tensors for the CenterNet keypoint estimation.\n\n This class has methods that take as input a batch of groundtruth tensors\n (in the form of a list) and returns the targets required to train the\n CenterNet model for keypoint estimation. Specifically, the class methods\n expect the groundtruth in the following formats (consistent with the\n standard Object Detection API). Note that usually the groundtruth tensors are\n packed with a list which represents the batch dimension:\n\n gt_classes_list: [Required] a list of 2D tf.float32 one-hot\n (or k-hot) tensors of shape [num_instances, num_classes] containing the\n class targets with the 0th index assumed to map to the first non-background\n class.\n gt_keypoints_list: [Required] a list of 3D tf.float32 tensors of\n shape [num_instances, num_total_keypoints, 2] containing keypoint\n coordinates. Note that the \"num_total_keypoints\" should be the sum of the\n num_keypoints over all possible keypoint types, e.g. human pose, face.\n For example, if a dataset contains both 17 human pose keypoints and 5 face\n keypoints, then num_total_keypoints = 17 + 5 = 22.\n If an intance contains only a subet of keypoints (e.g. human pose keypoints\n but not face keypoints), the face keypoints will be filled with zeros.\n Also note that keypoints are assumed to be provided in normalized\n coordinates and missing keypoints should be encoded as NaN.\n gt_keypoints_weights_list: [Optional] a list 3D tf.float32 tensors of shape\n [num_instances, num_total_keypoints] representing the weights of each\n keypoints. If not provided, then all not NaN keypoints will be equally\n weighted.\n gt_boxes_list: [Optional] a list of 2D tf.float32 tensors of shape\n [num_instances, 4] containing coordinates of the groundtruth boxes.\n Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and\n assumed to be normalized and clipped relative to the image window with\n y_min <= y_max and x_min <= x_max.\n Note that the boxes are only used to compute the center targets but are not\n considered as required output of the keypoint task. If the boxes were not\n provided, the center targets will be inferred from the keypoints\n [not implemented yet].\n gt_weights_list: [Optional] A list of 1D tf.float32 tensors of shape\n [num_instances] containing weights for groundtruth boxes. Only useful when\n gt_boxes_list is also provided.\n \"\"\"\n\n def __init__(self,\n stride,\n class_id,\n keypoint_indices,\n keypoint_std_dev=None,\n per_keypoint_offset=False,\n peak_radius=0,\n compute_heatmap_sparse=False):\n \"\"\"Initializes a CenterNet keypoints target assigner.\n\n Args:\n stride: int, the stride of the network in output pixels.\n class_id: int, the ID of the class (0-indexed) that contains the target\n keypoints to consider in this task. For example, if the task is human\n pose estimation, the class id should correspond to the \"human\" class.\n keypoint_indices: A list of integers representing the indices of the\n keypoints to be considered in this task. This is used to retrieve the\n subset of the keypoints from gt_keypoints that should be considered in\n this task.\n keypoint_std_dev: A list of floats represent the standard deviation of the\n Gaussian kernel used to generate the keypoint heatmap (in the unit of\n output pixels). It is to provide the flexibility of using different\n sizes of Gaussian kernel for each keypoint type. If not provided, then\n all standard deviation will be the same as the default value (10.0 in\n the output pixel space). If provided, the length of keypoint_std_dev\n needs to be the same as the length of keypoint_indices, indicating the\n standard deviation of each keypoint type.\n per_keypoint_offset: boolean, indicating whether to assign offset for\n each keypoint channel. If set False, the output offset target will have\n the shape [batch_size, out_height, out_width, 2]. If set True, the\n output offset target will have the shape [batch_size, out_height,\n out_width, 2 * num_keypoints].\n peak_radius: int, the radius (in the unit of output pixel) around heatmap\n peak to assign the offset targets.\n compute_heatmap_sparse: bool, indicating whether or not to use the sparse\n version of the Op that computes the heatmap. The sparse version scales\n better with number of keypoint types, but in some cases is known to\n cause an OOM error. See (b/170989061).\n \"\"\"\n\n self._stride = stride\n self._class_id = class_id\n self._keypoint_indices = keypoint_indices\n self._per_keypoint_offset = per_keypoint_offset\n self._peak_radius = peak_radius\n self._compute_heatmap_sparse = compute_heatmap_sparse\n if keypoint_std_dev is None:\n self._keypoint_std_dev = ([_DEFAULT_KEYPOINT_OFFSET_STD_DEV] *\n len(keypoint_indices))\n else:\n assert len(keypoint_indices) == len(keypoint_std_dev)\n self._keypoint_std_dev = keypoint_std_dev\n\n def assign_keypoint_heatmap_targets(self,\n height,\n width,\n gt_keypoints_list,\n gt_classes_list,\n gt_keypoints_weights_list=None,\n gt_weights_list=None,\n gt_boxes_list=None):\n \"\"\"Returns the keypoint heatmap targets for the CenterNet model.\n\n Args:\n height: int, height of input to the CenterNet model. This is used to\n determine the height of the output.\n width: int, width of the input to the CenterNet model. This is used to\n determine the width of the output.\n gt_keypoints_list: A list of float tensors with shape [num_instances,\n num_total_keypoints, 2]. See class-level description for more detail.\n gt_classes_list: A list of float tensors with shape [num_instances,\n num_classes]. See class-level description for more detail.\n gt_keypoints_weights_list: A list of tensors with shape [num_instances,\n num_total_keypoints] corresponding to the weight of each keypoint.\n gt_weights_list: A list of float tensors with shape [num_instances]. See\n class-level description for more detail.\n gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See\n class-level description for more detail. If provided, the keypoint\n standard deviations will be scaled based on the box sizes.\n\n Returns:\n heatmap: A float tensor of shape [batch_size, output_height, output_width,\n num_keypoints] representing the per keypoint type center heatmap.\n output_height and output_width are computed by dividing the input height\n and width by the stride specified during initialization. Note that the\n \"num_keypoints\" is defined by the length of keypoint_indices, which is\n not necessarily equal to \"num_total_keypoints\".\n num_instances_batch: A 2D int tensor of shape\n [batch_size, num_keypoints] representing number of instances for each\n keypoint type.\n valid_mask: A float tensor with shape [batch_size, output_height,\n output_width] where all values within the regions of the blackout boxes\n are 0.0 and 1.0 else where.\n \"\"\"\n out_width = tf.cast(width // self._stride, tf.float32)\n out_height = tf.cast(height // self._stride, tf.float32)\n # Compute the yx-grid to be used to generate the heatmap. Each returned\n # tensor has shape of [out_height, out_width]\n y_grid, x_grid = ta_utils.image_shape_to_grids(out_height, out_width)\n\n if gt_keypoints_weights_list is None:\n gt_keypoints_weights_list = [None] * len(gt_keypoints_list)\n if gt_weights_list is None:\n gt_weights_list = [None] * len(gt_classes_list)\n if gt_boxes_list is None:\n gt_boxes_list = [None] * len(gt_keypoints_list)\n\n heatmaps = []\n num_instances_list = []\n valid_mask_list = []\n for keypoints, classes, kp_weights, weights, boxes in zip(\n gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,\n gt_weights_list, gt_boxes_list):\n keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(\n out_height=out_height,\n out_width=out_width,\n keypoints=keypoints,\n class_onehot=classes,\n class_weights=weights,\n keypoint_weights=kp_weights,\n class_id=self._class_id,\n keypoint_indices=self._keypoint_indices)\n num_instances, num_keypoints, _ = (\n shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))\n\n # A tensor of shape [num_instances, num_keypoints] with\n # each element representing the type dimension for each corresponding\n # keypoint:\n # [[0, 1, ..., k-1],\n # [0, 1, ..., k-1],\n # :\n # [0, 1, ..., k-1]]\n keypoint_types = tf.tile(\n input=tf.expand_dims(tf.range(num_keypoints), axis=0),\n multiples=[num_instances, 1])\n\n # A tensor of shape [num_instances, num_keypoints] with\n # each element representing the sigma of the Gaussian kernel for each\n # keypoint.\n keypoint_std_dev = tf.tile(\n input=tf.expand_dims(tf.constant(self._keypoint_std_dev), axis=0),\n multiples=[num_instances, 1])\n\n # If boxes is not None, then scale the standard deviation based on the\n # size of the object bounding boxes similar to object center heatmap.\n if boxes is not None:\n boxes = box_list.BoxList(boxes)\n # Convert the box coordinates to absolute output image dimension space.\n boxes = box_list_ops.to_absolute_coordinates(boxes,\n height // self._stride,\n width // self._stride)\n # Get the box height and width. Each returned tensors have the shape\n # of [num_instances]\n (_, _, boxes_height,\n boxes_width) = boxes.get_center_coordinates_and_sizes()\n\n # Compute the sigma from box size. The tensor shape: [num_instances].\n sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, 0.7)\n keypoint_std_dev = keypoint_std_dev * tf.stack(\n [sigma] * num_keypoints, axis=1)\n\n # Generate the valid region mask to ignore regions with target class but\n # no corresponding keypoints.\n # Shape: [num_instances].\n blackout = tf.logical_and(classes[:, self._class_id] > 0,\n tf.reduce_max(kp_weights, axis=1) < 1e-3)\n valid_mask = ta_utils.blackout_pixel_weights_by_box_regions(\n out_height, out_width, boxes.get(), blackout)\n valid_mask_list.append(valid_mask)\n\n # Apply the Gaussian kernel to the keypoint coordinates. Returned heatmap\n # has shape of [out_height, out_width, num_keypoints].\n heatmap = ta_utils.coordinates_to_heatmap(\n y_grid=y_grid,\n x_grid=x_grid,\n y_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]),\n x_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]),\n sigma=tf.keras.backend.flatten(keypoint_std_dev),\n channel_onehot=tf.one_hot(\n tf.keras.backend.flatten(keypoint_types), depth=num_keypoints),\n channel_weights=tf.keras.backend.flatten(kp_weights))\n num_instances_list.append(\n tf.cast(tf.reduce_sum(kp_weights, axis=0), dtype=tf.int32))\n heatmaps.append(heatmap)\n return (tf.stack(heatmaps, axis=0), tf.stack(num_instances_list, axis=0),\n tf.stack(valid_mask_list, axis=0))\n\n def _get_keypoint_types(self, num_instances, num_keypoints, num_neighbors):\n \"\"\"Gets keypoint type index tensor.\n\n The function prepares the tensor of keypoint indices with shape\n [num_instances, num_keypoints, num_neighbors]. Each element represents the\n keypoint type index for each corresponding keypoint and tiled along the 3rd\n axis:\n [[0, 1, ..., num_keypoints - 1],\n [0, 1, ..., num_keypoints - 1],\n :\n [0, 1, ..., num_keypoints - 1]]\n\n Args:\n num_instances: int, the number of instances, used to define the 1st\n dimension.\n num_keypoints: int, the number of keypoint types, used to define the 2nd\n dimension.\n num_neighbors: int, the number of neighborhood pixels to consider for each\n keypoint, used to define the 3rd dimension.\n\n Returns:\n A integer tensor of shape [num_instances, num_keypoints, num_neighbors].\n \"\"\"\n keypoint_types = tf.range(num_keypoints)[tf.newaxis, :, tf.newaxis]\n tiled_keypoint_types = tf.tile(keypoint_types,\n multiples=[num_instances, 1, num_neighbors])\n return tiled_keypoint_types\n\n def assign_keypoints_offset_targets(self,\n height,\n width,\n gt_keypoints_list,\n gt_classes_list,\n gt_keypoints_weights_list=None,\n gt_weights_list=None):\n \"\"\"Returns the offsets and indices of the keypoints for location refinement.\n\n The returned values are used to refine the location of each keypoints in the\n heatmap. The predicted values at the relevant indices can be retrieved with\n the get_batch_predictions_from_indices function.\n\n Args:\n height: int, height of input to the CenterNet model. This is used to\n determine the height of the output.\n width: int, width of the input to the CenterNet model. This is used to\n determine the width of the output.\n gt_keypoints_list: A list of tensors with shape [num_instances,\n num_total_keypoints]. See class-level description for more detail.\n gt_classes_list: A list of tensors with shape [num_instances,\n num_classes]. See class-level description for more detail.\n gt_keypoints_weights_list: A list of tensors with shape [num_instances,\n num_total_keypoints] corresponding to the weight of each keypoint.\n gt_weights_list: A list of float tensors with shape [num_instances]. See\n class-level description for more detail.\n\n Returns:\n batch_indices: an integer tensor of shape [num_total_instances, 3] (or\n [num_total_instances, 4] if 'per_keypoint_offset' is set True) holding\n the indices inside the predicted tensor which should be penalized. The\n first column indicates the index along the batch dimension and the\n second and third columns indicate the index along the y and x\n dimensions respectively. The fourth column corresponds to the channel\n dimension (if 'per_keypoint_offset' is set True).\n batch_offsets: a float tensor of shape [num_total_instances, 2] holding\n the expected y and x offset of each box in the output space.\n batch_weights: a float tensor of shape [num_total_instances] indicating\n the weight of each prediction.\n Note that num_total_instances = batch_size * num_instances *\n num_keypoints * num_neighbors\n \"\"\"\n\n batch_indices = []\n batch_offsets = []\n batch_weights = []\n\n if gt_keypoints_weights_list is None:\n gt_keypoints_weights_list = [None] * len(gt_keypoints_list)\n if gt_weights_list is None:\n gt_weights_list = [None] * len(gt_classes_list)\n for i, (keypoints, classes, kp_weights, weights) in enumerate(\n zip(gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,\n gt_weights_list)):\n keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(\n out_height=height // self._stride,\n out_width=width // self._stride,\n keypoints=keypoints,\n class_onehot=classes,\n class_weights=weights,\n keypoint_weights=kp_weights,\n class_id=self._class_id,\n keypoint_indices=self._keypoint_indices)\n num_instances, num_keypoints, _ = (\n shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))\n\n # [num_instances * num_keypoints]\n y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0])\n x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1])\n\n # All keypoint coordinates and their neighbors:\n # [num_instance * num_keypoints, num_neighbors]\n (y_source_neighbors, x_source_neighbors,\n valid_sources) = ta_utils.get_surrounding_grids(height // self._stride,\n width // self._stride,\n y_source, x_source,\n self._peak_radius)\n _, num_neighbors = shape_utils.combined_static_and_dynamic_shape(\n y_source_neighbors)\n\n # Update the valid keypoint weights.\n # [num_instance * num_keypoints, num_neighbors]\n valid_keypoints = tf.cast(\n valid_sources, dtype=tf.float32) * tf.stack(\n [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)\n\n # Compute the offsets and indices of the box centers. Shape:\n # offsets: [num_instances * num_keypoints, num_neighbors, 2]\n # indices: [num_instances * num_keypoints, num_neighbors, 2]\n offsets, indices = ta_utils.compute_floor_offsets_with_indices(\n y_source=y_source_neighbors,\n x_source=x_source_neighbors,\n y_target=y_source,\n x_target=x_source)\n # Reshape to:\n # offsets: [num_instances * num_keypoints * num_neighbors, 2]\n # indices: [num_instances * num_keypoints * num_neighbors, 2]\n offsets = tf.reshape(offsets, [-1, 2])\n indices = tf.reshape(indices, [-1, 2])\n\n # Prepare the batch indices to be prepended.\n batch_index = tf.fill(\n [num_instances * num_keypoints * num_neighbors, 1], i)\n if self._per_keypoint_offset:\n tiled_keypoint_types = self._get_keypoint_types(\n num_instances, num_keypoints, num_neighbors)\n batch_indices.append(\n tf.concat([batch_index, indices,\n tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))\n else:\n batch_indices.append(tf.concat([batch_index, indices], axis=1))\n batch_offsets.append(offsets)\n batch_weights.append(tf.keras.backend.flatten(valid_keypoints))\n\n # Concatenate the tensors in the batch in the first dimension:\n # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or\n # [batch_size * num_instances * num_keypoints * num_neighbors, 4] if\n # 'per_keypoint_offset' is set to True.\n batch_indices = tf.concat(batch_indices, axis=0)\n # shape: [batch_size * num_instances * num_keypoints * num_neighbors]\n batch_weights = tf.concat(batch_weights, axis=0)\n # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 2]\n batch_offsets = tf.concat(batch_offsets, axis=0)\n return (batch_indices, batch_offsets, batch_weights)\n\n def assign_keypoints_depth_targets(self,\n height,\n width,\n gt_keypoints_list,\n gt_classes_list,\n gt_keypoint_depths_list,\n gt_keypoint_depth_weights_list,\n gt_keypoints_weights_list=None,\n gt_weights_list=None):\n \"\"\"Returns the target depths of the keypoints.\n\n The returned values are the relative depth information of each keypoints.\n\n Args:\n height: int, height of input to the CenterNet model. This is used to\n determine the height of the output.\n width: int, width of the input to the CenterNet model. This is used to\n determine the width of the output.\n gt_keypoints_list: A list of tensors with shape [num_instances,\n num_total_keypoints, 2]. See class-level description for more detail.\n gt_classes_list: A list of tensors with shape [num_instances,\n num_classes]. See class-level description for more detail.\n gt_keypoint_depths_list: A list of tensors with shape [num_instances,\n num_total_keypoints] corresponding to the relative depth of the\n keypoints.\n gt_keypoint_depth_weights_list: A list of tensors with shape\n [num_instances, num_total_keypoints] corresponding to the weights of\n the relative depth.\n gt_keypoints_weights_list: A list of tensors with shape [num_instances,\n num_total_keypoints] corresponding to the weight of each keypoint.\n gt_weights_list: A list of float tensors with shape [num_instances]. See\n class-level description for more detail.\n\n Returns:\n batch_indices: an integer tensor of shape [num_total_instances, 3] (or\n [num_total_instances, 4] if 'per_keypoint_offset' is set True) holding\n the indices inside the predicted tensor which should be penalized. The\n first column indicates the index along the batch dimension and the\n second and third columns indicate the index along the y and x\n dimensions respectively. The fourth column corresponds to the channel\n dimension (if 'per_keypoint_offset' is set True).\n batch_depths: a float tensor of shape [num_total_instances, 1] indicating\n the target depth of each keypoint.\n batch_weights: a float tensor of shape [num_total_instances] indicating\n the weight of each prediction.\n Note that num_total_instances = batch_size * num_instances *\n num_keypoints * num_neighbors\n \"\"\"\n\n batch_indices = []\n batch_weights = []\n batch_depths = []\n\n if gt_keypoints_weights_list is None:\n gt_keypoints_weights_list = [None] * len(gt_keypoints_list)\n if gt_weights_list is None:\n gt_weights_list = [None] * len(gt_classes_list)\n if gt_keypoint_depths_list is None:\n gt_keypoint_depths_list = [None] * len(gt_classes_list)\n for i, (keypoints, classes, kp_weights, weights,\n keypoint_depths, keypoint_depth_weights) in enumerate(\n zip(gt_keypoints_list, gt_classes_list,\n gt_keypoints_weights_list, gt_weights_list,\n gt_keypoint_depths_list, gt_keypoint_depth_weights_list)):\n keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(\n out_height=height // self._stride,\n out_width=width // self._stride,\n keypoints=keypoints,\n class_onehot=classes,\n class_weights=weights,\n keypoint_weights=kp_weights,\n class_id=self._class_id,\n keypoint_indices=self._keypoint_indices)\n num_instances, num_keypoints, _ = (\n shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))\n\n # [num_instances * num_keypoints]\n y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0])\n x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1])\n\n # All keypoint coordinates and their neighbors:\n # [num_instance * num_keypoints, num_neighbors]\n (y_source_neighbors, x_source_neighbors,\n valid_sources) = ta_utils.get_surrounding_grids(height // self._stride,\n width // self._stride,\n y_source, x_source,\n self._peak_radius)\n _, num_neighbors = shape_utils.combined_static_and_dynamic_shape(\n y_source_neighbors)\n\n # Update the valid keypoint weights.\n # [num_instance * num_keypoints, num_neighbors]\n valid_keypoints = tf.cast(\n valid_sources, dtype=tf.float32) * tf.stack(\n [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)\n\n # Compute the offsets and indices of the box centers. Shape:\n # indices: [num_instances * num_keypoints, num_neighbors, 2]\n _, indices = ta_utils.compute_floor_offsets_with_indices(\n y_source=y_source_neighbors,\n x_source=x_source_neighbors,\n y_target=y_source,\n x_target=x_source)\n # Reshape to:\n # indices: [num_instances * num_keypoints * num_neighbors, 2]\n indices = tf.reshape(indices, [-1, 2])\n\n # Gather the keypoint depth from corresponding keypoint indices:\n # [num_instances, num_keypoints]\n keypoint_depths = tf.gather(\n keypoint_depths, self._keypoint_indices, axis=1)\n # Tile the depth target to surrounding pixels.\n # [num_instances, num_keypoints, num_neighbors]\n tiled_keypoint_depths = tf.tile(\n tf.expand_dims(keypoint_depths, axis=-1),\n multiples=[1, 1, num_neighbors])\n\n # [num_instances, num_keypoints]\n keypoint_depth_weights = tf.gather(\n keypoint_depth_weights, self._keypoint_indices, axis=1)\n # [num_instances, num_keypoints, num_neighbors]\n keypoint_depth_weights = tf.tile(\n tf.expand_dims(keypoint_depth_weights, axis=-1),\n multiples=[1, 1, num_neighbors])\n # Update the weights of keypoint depth by the weights of the keypoints.\n # A keypoint depth target is valid only if its corresponding keypoint\n # target is also valid.\n # [num_instances, num_keypoints, num_neighbors]\n tiled_depth_weights = (\n tf.reshape(valid_keypoints,\n [num_instances, num_keypoints, num_neighbors]) *\n keypoint_depth_weights)\n invalid_depths = tf.logical_or(\n tf.math.is_nan(tiled_depth_weights),\n tf.math.is_nan(tiled_keypoint_depths))\n # Assign zero values and weights to NaN values.\n final_keypoint_depths = tf.where(invalid_depths,\n tf.zeros_like(tiled_keypoint_depths),\n tiled_keypoint_depths)\n final_keypoint_depth_weights = tf.where(\n invalid_depths,\n tf.zeros_like(tiled_depth_weights),\n tiled_depth_weights)\n # [num_instances * num_keypoints * num_neighbors, 1]\n batch_depths.append(tf.reshape(final_keypoint_depths, [-1, 1]))\n\n # Prepare the batch indices to be prepended.\n batch_index = tf.fill(\n [num_instances * num_keypoints * num_neighbors, 1], i)\n if self._per_keypoint_offset:\n tiled_keypoint_types = self._get_keypoint_types(\n num_instances, num_keypoints, num_neighbors)\n batch_indices.append(\n tf.concat([batch_index, indices,\n tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))\n else:\n batch_indices.append(tf.concat([batch_index, indices], axis=1))\n batch_weights.append(\n tf.keras.backend.flatten(final_keypoint_depth_weights))\n\n # Concatenate the tensors in the batch in the first dimension:\n # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or\n # [batch_size * num_instances * num_keypoints * num_neighbors, 4] if\n # 'per_keypoint_offset' is set to True.\n batch_indices = tf.concat(batch_indices, axis=0)\n # shape: [batch_size * num_instances * num_keypoints * num_neighbors]\n batch_weights = tf.concat(batch_weights, axis=0)\n # shape: [batch_size * num_instances * num_keypoints * num_neighbors, 1]\n batch_depths = tf.concat(batch_depths, axis=0)\n return (batch_indices, batch_depths, batch_weights)\n\n def assign_joint_regression_targets(self,\n height,\n width,\n gt_keypoints_list,\n gt_classes_list,\n gt_boxes_list=None,\n gt_keypoints_weights_list=None,\n gt_weights_list=None):\n \"\"\"Returns the joint regression from center grid to keypoints.\n\n The joint regression is used as the grouping cue from the estimated\n keypoints to instance center. The offsets are the vectors from the floored\n object center coordinates to the keypoint coordinates.\n\n Args:\n height: int, height of input to the CenterNet model. This is used to\n determine the height of the output.\n width: int, width of the input to the CenterNet model. This is used to\n determine the width of the output.\n gt_keypoints_list: A list of float tensors with shape [num_instances,\n num_total_keypoints]. See class-level description for more detail.\n gt_classes_list: A list of float tensors with shape [num_instances,\n num_classes]. See class-level description for more detail.\n gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See\n class-level description for more detail. If provided, then the center\n targets will be computed based on the center of the boxes.\n gt_keypoints_weights_list: A list of float tensors with shape\n [num_instances, num_total_keypoints] representing to the weight of each\n keypoint.\n gt_weights_list: A list of float tensors with shape [num_instances]. See\n class-level description for more detail.\n\n Returns:\n batch_indices: an integer tensor of shape [num_instances, 4] holding the\n indices inside the predicted tensor which should be penalized. The\n first column indicates the index along the batch dimension and the\n second and third columns indicate the index along the y and x\n dimensions respectively, the last dimension refers to the keypoint type\n dimension.\n batch_offsets: a float tensor of shape [num_instances, 2] holding the\n expected y and x offset of each box in the output space.\n batch_weights: a float tensor of shape [num_instances] indicating the\n weight of each prediction.\n Note that num_total_instances = batch_size * num_instances * num_keypoints\n\n Raises:\n NotImplementedError: currently the object center coordinates need to be\n computed from groundtruth bounding boxes. The functionality of\n generating the object center coordinates from keypoints is not\n implemented yet.\n \"\"\"\n\n batch_indices = []\n batch_offsets = []\n batch_weights = []\n batch_size = len(gt_keypoints_list)\n if gt_keypoints_weights_list is None:\n gt_keypoints_weights_list = [None] * batch_size\n if gt_boxes_list is None:\n gt_boxes_list = [None] * batch_size\n if gt_weights_list is None:\n gt_weights_list = [None] * len(gt_classes_list)\n for i, (keypoints, classes, boxes, kp_weights, weights) in enumerate(\n zip(gt_keypoints_list, gt_classes_list,\n gt_boxes_list, gt_keypoints_weights_list, gt_weights_list)):\n keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(\n out_height=height // self._stride,\n out_width=width // self._stride,\n keypoints=keypoints,\n class_onehot=classes,\n class_weights=weights,\n keypoint_weights=kp_weights,\n class_id=self._class_id,\n keypoint_indices=self._keypoint_indices)\n num_instances, num_keypoints, _ = (\n shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))\n\n # If boxes are provided, compute the joint center from it.\n if boxes is not None:\n # Compute joint center from boxes.\n boxes = box_list.BoxList(boxes)\n boxes = box_list_ops.to_absolute_coordinates(boxes,\n height // self._stride,\n width // self._stride)\n y_center, x_center, _, _ = boxes.get_center_coordinates_and_sizes()\n else:\n # TODO(yuhuic): Add the logic to generate object centers from keypoints.\n raise NotImplementedError((\n 'The functionality of generating object centers from keypoints is'\n ' not implemented yet. Please provide groundtruth bounding boxes.'\n ))\n\n # Tile the yx center coordinates to be the same shape as keypoints.\n y_center_tiled = tf.tile(\n tf.reshape(y_center, shape=[num_instances, 1]),\n multiples=[1, num_keypoints])\n x_center_tiled = tf.tile(\n tf.reshape(x_center, shape=[num_instances, 1]),\n multiples=[1, num_keypoints])\n # [num_instance * num_keypoints, num_neighbors]\n (y_source_neighbors, x_source_neighbors,\n valid_sources) = ta_utils.get_surrounding_grids(\n height // self._stride, width // self._stride,\n tf.keras.backend.flatten(y_center_tiled),\n tf.keras.backend.flatten(x_center_tiled), self._peak_radius)\n\n _, num_neighbors = shape_utils.combined_static_and_dynamic_shape(\n y_source_neighbors)\n valid_keypoints = tf.cast(\n valid_sources, dtype=tf.float32) * tf.stack(\n [tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)\n\n # Compute the offsets and indices of the box centers. Shape:\n # offsets: [num_instances * num_keypoints, 2]\n # indices: [num_instances * num_keypoints, 2]\n (offsets, indices) = ta_utils.compute_floor_offsets_with_indices(\n y_source=y_source_neighbors,\n x_source=x_source_neighbors,\n y_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]),\n x_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]))\n # Reshape to:\n # offsets: [num_instances * num_keypoints * num_neighbors, 2]\n # indices: [num_instances * num_keypoints * num_neighbors, 2]\n offsets = tf.reshape(offsets, [-1, 2])\n indices = tf.reshape(indices, [-1, 2])\n\n # keypoint type tensor: [num_instances, num_keypoints, num_neighbors].\n tiled_keypoint_types = self._get_keypoint_types(\n num_instances, num_keypoints, num_neighbors)\n\n batch_index = tf.fill(\n [num_instances * num_keypoints * num_neighbors, 1], i)\n batch_indices.append(\n tf.concat([batch_index, indices,\n tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))\n batch_offsets.append(offsets)\n batch_weights.append(tf.keras.backend.flatten(valid_keypoints))\n\n # Concatenate the tensors in the batch in the first dimension:\n # shape: [batch_size * num_instances * num_keypoints, 4]\n batch_indices = tf.concat(batch_indices, axis=0)\n # shape: [batch_size * num_instances * num_keypoints]\n batch_weights = tf.concat(batch_weights, axis=0)\n # shape: [batch_size * num_instances * num_keypoints, 2]\n batch_offsets = tf.concat(batch_offsets, axis=0)\n return (batch_indices, batch_offsets, batch_weights)\n\n\ndef _resize_masks(masks, height, width, method):\n # Resize segmentation masks to conform to output dimensions. Use TF2\n # image resize because TF1's version is buggy:\n # https://yaqs.corp.google.com/eng/q/4970450458378240\n masks = tf2.image.resize(\n masks[:, :, :, tf.newaxis],\n size=(height, width),\n method=method)\n return masks[:, :, :, 0]\n\n\nclass CenterNetMaskTargetAssigner(object):\n \"\"\"Wrapper to compute targets for segmentation masks.\"\"\"\n\n def __init__(self, stride):\n self._stride = stride\n\n def assign_segmentation_targets(\n self, gt_masks_list, gt_classes_list,\n mask_resize_method=ResizeMethod.BILINEAR):\n \"\"\"Computes the segmentation targets.\n\n This utility produces a semantic segmentation mask for each class, starting\n with whole image instance segmentation masks. Effectively, each per-class\n segmentation target is the union of all masks from that class.\n\n Args:\n gt_masks_list: A list of float tensors with shape [num_boxes,\n input_height, input_width] with values in {0, 1} representing instance\n masks for each object.\n gt_classes_list: A list of float tensors with shape [num_boxes,\n num_classes] representing the one-hot encoded class labels for each box\n in the gt_boxes_list.\n mask_resize_method: A `tf.compat.v2.image.ResizeMethod`. The method to use\n when resizing masks from input resolution to output resolution.\n\n Returns:\n segmentation_targets: An int32 tensor of size [batch_size, output_height,\n output_width, num_classes] representing the class of each location in\n the output space.\n \"\"\"\n # TODO(ronnyvotel): Handle groundtruth weights.\n _, num_classes = shape_utils.combined_static_and_dynamic_shape(\n gt_classes_list[0])\n\n _, input_height, input_width = (\n shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0]))\n output_height = input_height // self._stride\n output_width = input_width // self._stride\n\n segmentation_targets_list = []\n for gt_masks, gt_classes in zip(gt_masks_list, gt_classes_list):\n gt_masks = _resize_masks(gt_masks, output_height, output_width,\n mask_resize_method)\n gt_masks = gt_masks[:, :, :, tf.newaxis]\n gt_classes_reshaped = tf.reshape(gt_classes, [-1, 1, 1, num_classes])\n # Shape: [h, w, num_classes].\n segmentations_for_image = tf.reduce_max(\n gt_masks * gt_classes_reshaped, axis=0)\n # Avoid the case where max of an empty array is -inf.\n segmentations_for_image = tf.maximum(segmentations_for_image, 0.0)\n segmentation_targets_list.append(segmentations_for_image)\n\n segmentation_target = tf.stack(segmentation_targets_list, axis=0)\n return segmentation_target\n\n\nclass CenterNetDensePoseTargetAssigner(object):\n \"\"\"Wrapper to compute targets for DensePose task.\"\"\"\n\n def __init__(self, stride, num_parts=24):\n self._stride = stride\n self._num_parts = num_parts\n\n def assign_part_and_coordinate_targets(self,\n height,\n width,\n gt_dp_num_points_list,\n gt_dp_part_ids_list,\n gt_dp_surface_coords_list,\n gt_weights_list=None):\n \"\"\"Returns the DensePose part_id and coordinate targets and their indices.\n\n The returned values are expected to be used with predicted tensors\n of size (batch_size, height//self._stride, width//self._stride, 2). The\n predicted values at the relevant indices can be retrieved with the\n get_batch_predictions_from_indices function.\n\n Args:\n height: int, height of input to the model. This is used to determine the\n height of the output.\n width: int, width of the input to the model. This is used to determine the\n width of the output.\n gt_dp_num_points_list: a list of 1-D tf.int32 tensors of shape [num_boxes]\n containing the number of DensePose sampled points per box.\n gt_dp_part_ids_list: a list of 2-D tf.int32 tensors of shape\n [num_boxes, max_sampled_points] containing the DensePose part ids\n (0-indexed) for each sampled point. Note that there may be padding, as\n boxes may contain a different number of sampled points.\n gt_dp_surface_coords_list: a list of 3-D tf.float32 tensors of shape\n [num_boxes, max_sampled_points, 4] containing the DensePose surface\n coordinates (normalized) for each sampled point. Note that there may be\n padding.\n gt_weights_list: A list of 1-D tensors with shape [num_boxes]\n corresponding to the weight of each groundtruth detection box.\n\n Returns:\n batch_indices: an integer tensor of shape [num_total_points, 4] holding\n the indices inside the predicted tensor which should be penalized. The\n first column indicates the index along the batch dimension and the\n second and third columns indicate the index along the y and x\n dimensions respectively. The fourth column is the part index.\n batch_part_ids: an int tensor of shape [num_total_points, num_parts]\n holding 1-hot encodings of parts for each sampled point.\n batch_surface_coords: a float tensor of shape [num_total_points, 2]\n holding the expected (v, u) coordinates for each sampled point.\n batch_weights: a float tensor of shape [num_total_points] indicating the\n weight of each prediction.\n Note that num_total_points = batch_size * num_boxes * max_sampled_points.\n \"\"\"\n\n if gt_weights_list is None:\n gt_weights_list = [None] * len(gt_dp_num_points_list)\n\n batch_indices = []\n batch_part_ids = []\n batch_surface_coords = []\n batch_weights = []\n\n for i, (num_points, part_ids, surface_coords, weights) in enumerate(\n zip(gt_dp_num_points_list, gt_dp_part_ids_list,\n gt_dp_surface_coords_list, gt_weights_list)):\n num_boxes, max_sampled_points = (\n shape_utils.combined_static_and_dynamic_shape(part_ids))\n part_ids_flattened = tf.reshape(part_ids, [-1])\n part_ids_one_hot = tf.one_hot(part_ids_flattened, depth=self._num_parts)\n # Get DensePose coordinates in the output space.\n surface_coords_abs = densepose_ops.to_absolute_coordinates(\n surface_coords, height // self._stride, width // self._stride)\n surface_coords_abs = tf.reshape(surface_coords_abs, [-1, 4])\n # Each tensor has shape [num_boxes * max_sampled_points].\n yabs, xabs, v, u = tf.unstack(surface_coords_abs, axis=-1)\n\n # Get the indices (in output space) for the DensePose coordinates. Note\n # that if self._stride is larger than 1, this will have the effect of\n # reducing spatial resolution of the groundtruth points.\n indices_y = tf.cast(yabs, tf.int32)\n indices_x = tf.cast(xabs, tf.int32)\n\n # Assign ones if weights are not provided.\n if weights is None:\n weights = tf.ones(num_boxes, dtype=tf.float32)\n # Create per-point weights.\n weights_per_point = tf.reshape(\n tf.tile(weights[:, tf.newaxis], multiples=[1, max_sampled_points]),\n shape=[-1])\n # Mask out invalid (i.e. padded) DensePose points.\n num_points_tiled = tf.tile(num_points[:, tf.newaxis],\n multiples=[1, max_sampled_points])\n range_tiled = tf.tile(tf.range(max_sampled_points)[tf.newaxis, :],\n multiples=[num_boxes, 1])\n valid_points = tf.math.less(range_tiled, num_points_tiled)\n valid_points = tf.cast(tf.reshape(valid_points, [-1]), dtype=tf.float32)\n weights_per_point = weights_per_point * valid_points\n\n # Shape of [num_boxes * max_sampled_points] integer tensor filled with\n # current batch index.\n batch_index = i * tf.ones_like(indices_y, dtype=tf.int32)\n batch_indices.append(\n tf.stack([batch_index, indices_y, indices_x, part_ids_flattened],\n axis=1))\n batch_part_ids.append(part_ids_one_hot)\n batch_surface_coords.append(tf.stack([v, u], axis=1))\n batch_weights.append(weights_per_point)\n\n batch_indices = tf.concat(batch_indices, axis=0)\n batch_part_ids = tf.concat(batch_part_ids, axis=0)\n batch_surface_coords = tf.concat(batch_surface_coords, axis=0)\n batch_weights = tf.concat(batch_weights, axis=0)\n return batch_indices, batch_part_ids, batch_surface_coords, batch_weights\n\n\nclass CenterNetTrackTargetAssigner(object):\n \"\"\"Wrapper to compute targets for tracking task.\n\n Reference paper: A Simple Baseline for Multi-Object Tracking [1]\n [1]: https://arxiv.org/abs/2004.01888\n \"\"\"\n\n def __init__(self, stride, num_track_ids):\n self._stride = stride\n self._num_track_ids = num_track_ids\n\n def assign_track_targets(self,\n height,\n width,\n gt_track_ids_list,\n gt_boxes_list,\n gt_weights_list=None):\n \"\"\"Computes the track ID targets.\n\n Args:\n height: int, height of input to the model. This is used to determine the\n height of the output.\n width: int, width of the input to the model. This is used to determine the\n width of the output.\n gt_track_ids_list: A list of 1-D tensors with shape [num_boxes]\n corresponding to the track ID of each groundtruth detection box.\n gt_boxes_list: A list of float tensors with shape [num_boxes, 4]\n representing the groundtruth detection bounding boxes for each sample in\n the batch. The coordinates are expected in normalized coordinates.\n gt_weights_list: A list of 1-D tensors with shape [num_boxes]\n corresponding to the weight of each groundtruth detection box.\n\n Returns:\n batch_indices: an integer tensor of shape [batch_size, num_boxes, 3]\n holding the indices inside the predicted tensor which should be\n penalized. The first column indicates the index along the batch\n dimension and the second and third columns indicate the index\n along the y and x dimensions respectively.\n batch_weights: a float tensor of shape [batch_size, num_boxes] indicating\n the weight of each prediction.\n track_id_targets: An int32 tensor of size [batch_size, num_boxes,\n num_track_ids] containing the one-hot track ID vector of each\n groundtruth detection box.\n \"\"\"\n track_id_targets = tf.one_hot(\n gt_track_ids_list, depth=self._num_track_ids, axis=-1)\n\n if gt_weights_list is None:\n gt_weights_list = [None] * len(gt_boxes_list)\n\n batch_indices = []\n batch_weights = []\n\n for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)):\n boxes = box_list.BoxList(boxes)\n boxes = box_list_ops.to_absolute_coordinates(boxes,\n height // self._stride,\n width // self._stride)\n # Get the box center coordinates. Each returned tensors have the shape of\n # [num_boxes]\n (y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes()\n num_boxes = tf.shape(x_center)\n\n # Compute the indices of the box centers. Shape:\n # indices: [num_boxes, 2]\n (_, indices) = ta_utils.compute_floor_offsets_with_indices(\n y_source=y_center, x_source=x_center)\n\n # Assign ones if weights are not provided.\n if weights is None:\n weights = tf.ones(num_boxes, dtype=tf.float32)\n\n # Shape of [num_boxes, 1] integer tensor filled with current batch index.\n batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)\n batch_indices.append(tf.concat([batch_index, indices], axis=1))\n batch_weights.append(weights)\n\n batch_indices = tf.stack(batch_indices, axis=0)\n batch_weights = tf.stack(batch_weights, axis=0)\n\n return batch_indices, batch_weights, track_id_targets\n\n\ndef filter_mask_overlap_min_area(masks):\n \"\"\"If a pixel belongs to 2 instances, remove it from the larger instance.\"\"\"\n\n num_instances = tf.shape(masks)[0]\n def _filter_min_area():\n \"\"\"Helper function to filter non empty masks.\"\"\"\n areas = tf.reduce_sum(masks, axis=[1, 2], keepdims=True)\n per_pixel_area = masks * areas\n # Make sure background is ignored in argmin.\n per_pixel_area = (masks * per_pixel_area +\n (1 - masks) * per_pixel_area.dtype.max)\n min_index = tf.cast(tf.argmin(per_pixel_area, axis=0), tf.int32)\n\n filtered_masks = (\n tf.range(num_instances)[:, tf.newaxis, tf.newaxis]\n ==\n min_index[tf.newaxis, :, :]\n )\n\n return tf.cast(filtered_masks, tf.float32) * masks\n\n return tf.cond(num_instances > 0, _filter_min_area,\n lambda: masks)\n\n\ndef filter_mask_overlap(masks, method='min_area'):\n\n if method == 'min_area':\n return filter_mask_overlap_min_area(masks)\n else:\n raise ValueError('Unknown mask overlap filter type - {}'.format(method))\n\n\nclass CenterNetCornerOffsetTargetAssigner(object):\n \"\"\"Wrapper to compute corner offsets for boxes using masks.\"\"\"\n\n def __init__(self, stride, overlap_resolution='min_area'):\n \"\"\"Initializes the corner offset target assigner.\n\n Args:\n stride: int, the stride of the network in output pixels.\n overlap_resolution: string, specifies how we handle overlapping\n instance masks. Currently only 'min_area' is supported which assigns\n overlapping pixels to the instance with the minimum area.\n \"\"\"\n\n self._stride = stride\n self._overlap_resolution = overlap_resolution\n\n def assign_corner_offset_targets(\n self, gt_boxes_list, gt_masks_list):\n \"\"\"Computes the corner offset targets and foreground map.\n\n For each pixel that is part of any object's foreground, this function\n computes the relative offsets to the top-left and bottom-right corners of\n that instance's bounding box. It also returns a foreground map to indicate\n which pixels contain valid corner offsets.\n\n Args:\n gt_boxes_list: A list of float tensors with shape [num_boxes, 4]\n representing the groundtruth detection bounding boxes for each sample in\n the batch. The coordinates are expected in normalized coordinates.\n gt_masks_list: A list of float tensors with shape [num_boxes,\n input_height, input_width] with values in {0, 1} representing instance\n masks for each object.\n\n Returns:\n corner_offsets: A float tensor of shape [batch_size, height, width, 4]\n containing, in order, the (y, x) offsets to the top left corner and\n the (y, x) offsets to the bottom right corner for each foregroung pixel\n foreground: A float tensor of shape [batch_size, height, width] in which\n each pixel is set to 1 if it is a part of any instance's foreground\n (and thus contains valid corner offsets) and 0 otherwise.\n\n \"\"\"\n _, input_height, input_width = (\n shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0]))\n output_height = input_height // self._stride\n output_width = input_width // self._stride\n y_grid, x_grid = tf.meshgrid(\n tf.range(output_height), tf.range(output_width),\n indexing='ij')\n y_grid, x_grid = tf.cast(y_grid, tf.float32), tf.cast(x_grid, tf.float32)\n\n corner_targets = []\n foreground_targets = []\n for gt_masks, gt_boxes in zip(gt_masks_list, gt_boxes_list):\n gt_masks = _resize_masks(gt_masks, output_height, output_width,\n method=ResizeMethod.NEAREST_NEIGHBOR)\n gt_masks = filter_mask_overlap(gt_masks, self._overlap_resolution)\n\n ymin, xmin, ymax, xmax = tf.unstack(gt_boxes, axis=1)\n ymin, ymax = ymin * output_height, ymax * output_height\n xmin, xmax = xmin * output_width, xmax * output_width\n\n top_y = ymin[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis]\n left_x = xmin[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis]\n bottom_y = ymax[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis]\n right_x = xmax[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis]\n\n foreground_target = tf.cast(tf.reduce_sum(gt_masks, axis=0) > 0.5,\n tf.float32)\n foreground_targets.append(foreground_target)\n\n corner_target = tf.stack([\n tf.reduce_sum(top_y * gt_masks, axis=0),\n tf.reduce_sum(left_x * gt_masks, axis=0),\n tf.reduce_sum(bottom_y * gt_masks, axis=0),\n tf.reduce_sum(right_x * gt_masks, axis=0),\n ], axis=2)\n\n corner_targets.append(corner_target)\n\n return (tf.stack(corner_targets, axis=0),\n tf.stack(foreground_targets, axis=0))\n\n\nclass CenterNetTemporalOffsetTargetAssigner(object):\n \"\"\"Wrapper to compute target tensors for the temporal offset task.\n\n This class has methods that take as input a batch of ground truth tensors\n (in the form of a list) and returns the targets required to train the\n temporal offset task.\n \"\"\"\n\n def __init__(self, stride):\n \"\"\"Initializes the target assigner.\n\n Args:\n stride: int, the stride of the network in output pixels.\n \"\"\"\n\n self._stride = stride\n\n def assign_temporal_offset_targets(self,\n height,\n width,\n gt_boxes_list,\n gt_offsets_list,\n gt_match_list,\n gt_weights_list=None):\n \"\"\"Returns the temporal offset targets and their indices.\n\n For each ground truth box, this function assigns it the corresponding\n temporal offset to train the model.\n\n Args:\n height: int, height of input to the model. This is used to determine the\n height of the output.\n width: int, width of the input to the model. This is used to determine the\n width of the output.\n gt_boxes_list: A list of float tensors with shape [num_boxes, 4]\n representing the groundtruth detection bounding boxes for each sample in\n the batch. The coordinates are expected in normalized coordinates.\n gt_offsets_list: A list of 2-D tf.float32 tensors of shape [num_boxes, 2]\n containing the spatial offsets of objects' centers compared with the\n previous frame.\n gt_match_list: A list of 1-D tf.float32 tensors of shape [num_boxes]\n containing flags that indicate if an object has existed in the\n previous frame.\n gt_weights_list: A list of tensors with shape [num_boxes] corresponding to\n the weight of each groundtruth detection box.\n\n Returns:\n batch_indices: an integer tensor of shape [num_boxes, 3] holding the\n indices inside the predicted tensor which should be penalized. The\n first column indicates the index along the batch dimension and the\n second and third columns indicate the index along the y and x\n dimensions respectively.\n batch_temporal_offsets: a float tensor of shape [num_boxes, 2] of the\n expected y and x temporal offset of each object center in the\n output space.\n batch_weights: a float tensor of shape [num_boxes] indicating the\n weight of each prediction.\n \"\"\"\n\n if gt_weights_list is None:\n gt_weights_list = [None] * len(gt_boxes_list)\n\n batch_indices = []\n batch_weights = []\n batch_temporal_offsets = []\n\n for i, (boxes, offsets, match_flags, weights) in enumerate(zip(\n gt_boxes_list, gt_offsets_list, gt_match_list, gt_weights_list)):\n boxes = box_list.BoxList(boxes)\n boxes = box_list_ops.to_absolute_coordinates(boxes,\n height // self._stride,\n width // self._stride)\n # Get the box center coordinates. Each returned tensors have the shape of\n # [num_boxes]\n (y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes()\n num_boxes = tf.shape(x_center)\n\n # Compute the offsets and indices of the box centers. Shape:\n # offsets: [num_boxes, 2]\n # indices: [num_boxes, 2]\n (_, indices) = ta_utils.compute_floor_offsets_with_indices(\n y_source=y_center, x_source=x_center)\n\n # Assign ones if weights are not provided.\n # if an object is not matched, its weight becomes zero.\n if weights is None:\n weights = tf.ones(num_boxes, dtype=tf.float32)\n weights *= match_flags\n\n # Shape of [num_boxes, 1] integer tensor filled with current batch index.\n batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)\n batch_indices.append(tf.concat([batch_index, indices], axis=1))\n batch_weights.append(weights)\n batch_temporal_offsets.append(offsets)\n\n batch_indices = tf.concat(batch_indices, axis=0)\n batch_weights = tf.concat(batch_weights, axis=0)\n batch_temporal_offsets = tf.concat(batch_temporal_offsets, axis=0)\n return (batch_indices, batch_temporal_offsets, batch_weights)\n\n\nclass DETRTargetAssigner(object):\n \"\"\"Target assigner for DETR (https://arxiv.org/abs/2005.12872).\n\n Detection Transformer (DETR) matches predicted boxes to groundtruth directly\n to determine targets instead of matching anchors to groundtruth. Hence, the\n new target assigner.\n \"\"\"\n\n def __init__(self):\n \"\"\"Construct Object Detection Target Assigner.\"\"\"\n self._similarity_calc = sim_calc.DETRSimilarity()\n self._matcher = hungarian_matcher.HungarianBipartiteMatcher()\n\n def batch_assign(self,\n pred_box_batch,\n gt_box_batch,\n pred_class_batch,\n gt_class_targets_batch,\n gt_weights_batch=None,\n unmatched_class_label_batch=None):\n \"\"\"Batched assignment of classification and regression targets.\n\n Args:\n pred_box_batch: a tensor of shape [batch_size, num_queries, 4]\n representing predicted bounding boxes.\n gt_box_batch: a tensor of shape [batch_size, num_queries, 4]\n representing groundtruth bounding boxes.\n pred_class_batch: A list of tensors with length batch_size, where each\n each tensor has shape [num_queries, num_classes] to be used\n by certain similarity calculators.\n gt_class_targets_batch: a list of tensors with length batch_size, where\n each tensor has shape [num_gt_boxes_i, num_classes] and\n num_gt_boxes_i is the number of boxes in the ith boxlist of\n gt_box_batch.\n gt_weights_batch: A list of 1-D tf.float32 tensors of shape\n [num_boxes] containing weights for groundtruth boxes.\n unmatched_class_label_batch: a float32 tensor with shape\n [d_1, d_2, ..., d_k] which is consistent with the classification target\n for each anchor (and can be empty for scalar targets). This shape must\n thus be compatible with the `gt_class_targets_batch`.\n\n Returns:\n batch_cls_targets: a tensor with shape [batch_size, num_pred_boxes,\n num_classes],\n batch_cls_weights: a tensor with shape [batch_size, num_pred_boxes,\n num_classes],\n batch_reg_targets: a tensor with shape [batch_size, num_pred_boxes,\n box_code_dimension]\n batch_reg_weights: a tensor with shape [batch_size, num_pred_boxes].\n \"\"\"\n pred_box_batch = [\n box_list.BoxList(pred_box)\n for pred_box in tf.unstack(pred_box_batch)]\n gt_box_batch = [\n box_list.BoxList(gt_box)\n for gt_box in tf.unstack(gt_box_batch)]\n\n cls_targets_list = []\n cls_weights_list = []\n reg_targets_list = []\n reg_weights_list = []\n if gt_weights_batch is None:\n gt_weights_batch = [None] * len(gt_class_targets_batch)\n if unmatched_class_label_batch is None:\n unmatched_class_label_batch = [None] * len(gt_class_targets_batch)\n pred_class_batch = tf.unstack(pred_class_batch)\n for (pred_boxes, gt_boxes, pred_class_batch, gt_class_targets, gt_weights,\n unmatched_class_label) in zip(pred_box_batch, gt_box_batch,\n pred_class_batch, gt_class_targets_batch,\n gt_weights_batch,\n unmatched_class_label_batch):\n (cls_targets, cls_weights, reg_targets,\n reg_weights) = self.assign(pred_boxes, gt_boxes, pred_class_batch,\n gt_class_targets, gt_weights,\n unmatched_class_label)\n cls_targets_list.append(cls_targets)\n cls_weights_list.append(cls_weights)\n reg_targets_list.append(reg_targets)\n reg_weights_list.append(reg_weights)\n batch_cls_targets = tf.stack(cls_targets_list)\n batch_cls_weights = tf.stack(cls_weights_list)\n batch_reg_targets = tf.stack(reg_targets_list)\n batch_reg_weights = tf.stack(reg_weights_list)\n return (batch_cls_targets, batch_cls_weights, batch_reg_targets,\n batch_reg_weights)\n\n def assign(self,\n pred_boxes,\n gt_boxes,\n pred_classes,\n gt_labels,\n gt_weights=None,\n unmatched_class_label=None):\n \"\"\"Assign classification and regression targets to each box_pred.\n\n For a given set of pred_boxes and groundtruth detections, match pred_boxes\n to gt_boxes and assign classification and regression targets to\n each box_pred as well as weights based on the resulting match (specifying,\n e.g., which pred_boxes should not contribute to training loss).\n\n pred_boxes that are not matched to anything are given a classification\n target of `unmatched_cls_target`.\n\n Args:\n pred_boxes: a BoxList representing N pred_boxes\n gt_boxes: a BoxList representing M groundtruth boxes\n pred_classes: A tensor with shape [max_num_boxes, num_classes]\n to be used by certain similarity calculators.\n gt_labels: a tensor of shape [M, num_classes]\n with labels for each of the ground_truth boxes. The subshape\n [num_classes] can be empty (corresponding to scalar inputs). When set\n to None, gt_labels assumes a binary problem where all\n ground_truth boxes get a positive label (of 1).\n gt_weights: a float tensor of shape [M] indicating the weight to\n assign to all pred_boxes match to a particular groundtruth box. The\n weights must be in [0., 1.]. If None, all weights are set to 1.\n Generally no groundtruth boxes with zero weight match to any pred_boxes\n as matchers are aware of groundtruth weights. Additionally,\n `cls_weights` and `reg_weights` are calculated using groundtruth\n weights as an added safety.\n unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]\n which is consistent with the classification target for each\n anchor (and can be empty for scalar targets). This shape must thus be\n compatible with the groundtruth labels that are passed to the \"assign\"\n function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).\n\n Returns:\n cls_targets: a float32 tensor with shape [num_pred_boxes, num_classes],\n where the subshape [num_classes] is compatible with gt_labels\n which has shape [num_gt_boxes, num_classes].\n cls_weights: a float32 tensor with shape [num_pred_boxes, num_classes],\n representing weights for each element in cls_targets.\n reg_targets: a float32 tensor with shape [num_pred_boxes,\n box_code_dimension]\n reg_weights: a float32 tensor with shape [num_pred_boxes]\n\n \"\"\"\n if not unmatched_class_label:\n unmatched_class_label = tf.constant(\n [1] + [0] * (gt_labels.shape[1] - 1), tf.float32)\n\n if gt_weights is None:\n num_gt_boxes = gt_boxes.num_boxes_static()\n if not num_gt_boxes:\n num_gt_boxes = gt_boxes.num_boxes()\n gt_weights = tf.ones([num_gt_boxes], dtype=tf.float32)\n\n gt_boxes.add_field(fields.BoxListFields.classes, gt_labels)\n pred_boxes.add_field(fields.BoxListFields.classes, pred_classes)\n\n match_quality_matrix = self._similarity_calc.compare(\n gt_boxes,\n pred_boxes)\n match = self._matcher.match(match_quality_matrix,\n valid_rows=tf.greater(gt_weights, 0))\n\n matched_gt_boxes = match.gather_based_on_match(\n gt_boxes.get(),\n unmatched_value=tf.zeros(4),\n ignored_value=tf.zeros(4))\n matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)\n ty, tx, th, tw = matched_gt_boxlist.get_center_coordinates_and_sizes()\n reg_targets = tf.transpose(tf.stack([ty, tx, th, tw]))\n cls_targets = match.gather_based_on_match(\n gt_labels,\n unmatched_value=unmatched_class_label,\n ignored_value=unmatched_class_label)\n reg_weights = match.gather_based_on_match(\n gt_weights,\n ignored_value=0.,\n unmatched_value=0.)\n cls_weights = match.gather_based_on_match(\n gt_weights,\n ignored_value=0.,\n unmatched_value=1)\n\n # convert cls_weights from per-box_pred to per-class.\n class_label_shape = tf.shape(cls_targets)[1:]\n weights_multiple = tf.concat(\n [tf.constant([1]), class_label_shape],\n axis=0)\n cls_weights = tf.expand_dims(cls_weights, -1)\n cls_weights = tf.tile(cls_weights, weights_multiple)\n\n return (cls_targets, cls_weights, reg_targets, reg_weights)\n" ]
[ [ "tensorflow.constant", "tensorflow.test.main", "tensorflow.keras.Input" ], [ "tensorflow.constant", "tensorflow.estimator.Estimator", "tensorflow.executing_eagerly", "tensorflow.ragged.constant", "tensorflow.io.gfile.GFile", "tensorflow.test.main", "tensorflow.keras.Model", "tensorflow.init_scope", "tensorflow.estimator.EstimatorSpec", "numpy.array", "tensorflow.keras.layers.Input" ], [ "tensorflow.keras.models.Model", "tensorflow.shape", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.keras.layers.Input" ], [ "tensorflow.compat.v1.concat", "tensorflow.compat.v1.sqrt", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.logical_or", "tensorflow.compat.v1.constant", "tensorflow.compat.v1.math.square", "tensorflow.compat.v1.ones", "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.one_hot", "tensorflow.compat.v1.math.floor", "tensorflow.compat.v1.maximum", "tensorflow.compat.v1.zeros_like", "tensorflow.compat.v1.unstack", "tensorflow.compat.v1.gather_nd", "tensorflow.compat.v1.where", "tensorflow.compat.v1.ones_like", "tensorflow.compat.v1.argmin", "tensorflow.compat.v1.reduce_any", "tensorflow.compat.v1.less", "tensorflow.compat.v1.fill", "tensorflow.compat.v1.zeros", "tensorflow.compat.v1.tile", "tensorflow.compat.v1.keras.backend.flatten", "tensorflow.compat.v1.cond", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.reduce_max", "tensorflow.compat.v2.image.resize", "tensorflow.compat.v1.stack", "tensorflow.compat.v1.expand_dims", "tensorflow.compat.v1.math.is_nan", "tensorflow.compat.v1.control_dependencies", "tensorflow.compat.v1.gather", "tensorflow.compat.v1.slice", "tensorflow.compat.v1.reduce_min", "tensorflow.compat.v1.range", "tensorflow.compat.v1.math.less", "tensorflow.compat.v1.greater" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MathMachado/tensorflow
[ "56afda20b15f234c23e8393f7e337e7dd2659c2d", "56afda20b15f234c23e8393f7e337e7dd2659c2d", "56afda20b15f234c23e8393f7e337e7dd2659c2d", "56afda20b15f234c23e8393f7e337e7dd2659c2d", "56afda20b15f234c23e8393f7e337e7dd2659c2d", "56afda20b15f234c23e8393f7e337e7dd2659c2d", "56afda20b15f234c23e8393f7e337e7dd2659c2d", "56afda20b15f234c23e8393f7e337e7dd2659c2d", "56afda20b15f234c23e8393f7e337e7dd2659c2d" ]
[ "tensorflow/python/ops/math_grad.py", "tensorflow/python/debug/cli/analyzer_cli_test.py", "tensorflow/examples/speech_commands/input_data_test.py", "tensorflow/contrib/distributions/python/ops/logistic.py", "tensorflow/python/feature_column/sequence_feature_column_integration_test.py", "tensorflow/python/data/experimental/ops/shuffle_ops.py", "tensorflow/python/ops/linalg/linear_operator_identity.py", "tensorflow/contrib/eager/python/examples/revnet/revnet.py", "tensorflow/contrib/timeseries/examples/multivariate.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Gradients for operators defined in math_ops.py.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python import pywrap_tensorflow as c_api\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.util import object_identity\n\n\ndef _safe_shape_div(x, y):\n \"\"\"Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`.\"\"\"\n return x // math_ops.maximum(y, 1)\n\n\[email protected](\"ArgMax\")\ndef _ArgMaxGrad(op, grad):\n del op, grad\n return [None, None]\n\n\[email protected](\"ArgMin\")\ndef _ArgMinGrad(op, grad):\n del op, grad\n return [None, None]\n\n\n# TODO(rmlarsen): Implement gradient.\nops.NotDifferentiable(\"EuclideanNorm\")\n\n\ndef SmartBroadcastGradientArgs(x, y, grad):\n \"\"\"Optimized version of `broadcast_gradient_args` that caches results.\n\n This implementation avoids creating `broadcast_gradient_args` ops in the case\n that the input shapes are fully defined, and provides hints to the calling\n code that can be used to avoid creating reduction and reshaping ops.\n\n Args:\n x: The left input tensor to a broadcasting binary op.\n y: The right input tensor to a broadcasting binary op.\n grad: The incoming gradient tensor for a broadcasting binary op.\n\n Returns:\n A pair of tuples, containing:\n * A 3-tuple of broadcast information for x, containing:\n * The shape of x (as a tuple or Tensor).\n * The reduction indices for x (as a tuple or Tensor).\n * A boolean, which if True, indicates that x's shape differs from grad's\n shape (and so x's gradient must be reduced and/or reshaped).\n * A 3-tuple of broadcast information for y, containing the respective\n details for y.\n \"\"\"\n # NOTE: It may be productive to apply these optimizations in the eager case\n # as well.\n if context.executing_eagerly() or not (\n isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)\n and isinstance(grad, ops.Tensor)):\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n return (sx, rx, True), (sy, ry, True)\n\n # pylint: disable=protected-access\n x_shape_tuple = x._shape_tuple()\n y_shape_tuple = y._shape_tuple()\n grad_shape_tuple = grad._shape_tuple()\n # pylint: enable=protected-access\n\n if (x_shape_tuple is None or None in x_shape_tuple or\n y_shape_tuple is None or None in y_shape_tuple):\n sx = array_ops.shape_internal(x, optimize=False)\n sy = array_ops.shape_internal(y, optimize=False)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n return (sx, rx, True), (sy, ry, True)\n\n x_needs_reduction = x_shape_tuple != grad_shape_tuple\n y_needs_reduction = y_shape_tuple != grad_shape_tuple\n\n # Get the default graph rather than relying on `x.graph`, `y.graph`, or\n # `grad.graph`, because these may be eager tensors.\n g = ops.get_default_graph()\n\n try:\n rx, ry = g._bcast_grad_args_cache[(x_shape_tuple, y_shape_tuple)] # pylint: disable=protected-access\n return (x_shape_tuple, rx, x_needs_reduction), (\n y_shape_tuple, ry, y_needs_reduction)\n except KeyError:\n rx, ry = array_ops.broadcast_gradient_args(x_shape_tuple, y_shape_tuple)\n # TODO(mrry): If this becomes a bottleneck, add a multi-output version of\n # `TF_TryEvaluateConstant()`.\n rx_value = tuple(c_api.TF_TryEvaluateConstant_wrapper(\n rx.graph._c_graph, rx._as_tf_output())) # pylint: disable=protected-access\n assert rx_value is not None\n ry_value = tuple(c_api.TF_TryEvaluateConstant_wrapper(\n ry.graph._c_graph, ry._as_tf_output())) # pylint: disable=protected-access\n assert ry_value is not None\n g._bcast_grad_args_cache[(x_shape_tuple, y_shape_tuple)] = ( # pylint: disable=protected-access\n rx_value, ry_value)\n\n return (x_shape_tuple, rx_value, x_needs_reduction), (\n y_shape_tuple, ry_value, y_needs_reduction)\n\n\n_empty_tuple = ()\n\n\ndef _IsScalar(x):\n return x._shape_tuple() is _empty_tuple # pylint: disable=protected-access\n\n\[email protected](\"Sum\")\ndef _SumGrad(op, grad):\n \"\"\"Gradient for Sum.\"\"\"\n # Fast path for when reducing to a scalar and ndims is known: adds only\n # Reshape and Tile ops (and possibly a Shape).\n input_0_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access\n if input_0_shape is not None:\n axes = tensor_util.constant_value(op.inputs[1])\n if axes is not None:\n rank = len(input_0_shape)\n if np.array_equal(axes, np.arange(rank)): # Reduce all dims.\n if context.executing_eagerly():\n ctx = context.context()\n new_shape = ctx.ones_rank_cache().get(rank)\n if new_shape is None:\n new_shape = constant_op.constant([1] * rank, dtype=dtypes.int32)\n ctx.ones_rank_cache().put(rank, new_shape)\n else:\n new_shape = [1] * rank\n grad = array_ops.reshape(grad, new_shape)\n # If shape is not fully defined (but rank is), we use Shape.\n if None not in input_0_shape:\n input_shape = constant_op.constant(input_0_shape, dtype=dtypes.int32)\n else:\n input_shape = array_ops.shape(op.inputs[0])\n return [array_ops.tile(grad, input_shape), None]\n elif None not in input_0_shape and not context.executing_eagerly():\n # The shape and reduction indices are statically known, so we use a\n # graph-level cache to avoid recomputing `reduced_shape()` for each\n # invocation.\n graph = ops.get_default_graph()\n\n # Canonicalize `axes` to be a tuple of indices. The incoming\n # value may be a scalar or a vector, and may include negative indices.\n axes = tuple(axes.reshape(-1))\n\n try:\n output_shape_kept_dims, tile_scaling = graph._reduced_shape_cache[ # pylint: disable=protected-access\n (input_0_shape, axes)]\n except KeyError:\n\n # Compute and cache `output_shape_kept_dims` and `tile_scaling`.\n def EvaluateAsTuple(t):\n value = c_api.TF_TryEvaluateConstant_wrapper(\n t.graph._c_graph, t._as_tf_output()) # pylint: disable=protected-access\n assert value is not None\n return tuple(value)\n\n output_shape_kept_dims = EvaluateAsTuple(\n math_ops.reduced_shape(input_0_shape, axes))\n tile_scaling = EvaluateAsTuple(\n _safe_shape_div(input_0_shape, output_shape_kept_dims))\n graph._reduced_shape_cache[(input_0_shape, axes)] = ( # pylint:disable=protected-access\n output_shape_kept_dims, tile_scaling)\n\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n return [array_ops.tile(grad, tile_scaling), None]\n\n input_shape = array_ops.shape(op.inputs[0])\n\n if compat.forward_compatible(2019, 9, 23):\n if not op.get_attr(\"keep_dims\"):\n with ops.colocate_with(input_shape):\n # TODO(apassos) remove this once device placement for eager ops makes\n # more sense.\n output_shape_kept_dims = math_ops.reduced_shape(input_shape,\n op.inputs[1])\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n return [array_ops.broadcast_to(grad, input_shape), None]\n with ops.colocate_with(input_shape):\n output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])\n tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n return [array_ops.tile(grad, tile_scaling), None]\n\n\ndef _MinOrMaxGrad(op, grad):\n \"\"\"Gradient for Min or Max. Amazingly it's precisely the same code.\"\"\"\n input_shape = array_ops.shape(op.inputs[0])\n y = op.outputs[0]\n if not op.get_attr(\"keep_dims\"):\n output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])\n y = array_ops.reshape(y, output_shape_kept_dims)\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n else:\n output_shape_kept_dims = array_ops.shape(y)\n\n # Compute the number of selected (maximum or minimum) elements in each\n # reduction dimension. If there are multiple minimum or maximum elements\n # then the gradient will be divided between them.\n indicators = math_ops.cast(math_ops.equal(y, op.inputs[0]), grad.dtype)\n num_selected = array_ops.reshape(\n math_ops.reduce_sum(indicators, op.inputs[1]), output_shape_kept_dims)\n\n return [math_ops.divide(indicators, num_selected) * grad, None]\n\n\[email protected](\"Max\")\ndef _MaxGrad(op, grad):\n \"\"\"Gradient for Max.\"\"\"\n return _MinOrMaxGrad(op, grad)\n\n\[email protected](\"Min\")\ndef _MinGrad(op, grad):\n return _MinOrMaxGrad(op, grad)\n\n\[email protected](\"Mean\")\ndef _MeanGrad(op, grad):\n \"\"\"Gradient for Mean.\"\"\"\n sum_grad = _SumGrad(op, grad)[0]\n input_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access\n output_shape = op.outputs[0]._shape_tuple() # pylint: disable=protected-access\n if (input_shape is not None and output_shape is not None and\n None not in input_shape and None not in output_shape):\n input_size = np.prod(input_shape)\n output_size = np.prod(output_shape)\n factor = input_size // max(output_size, 1)\n factor = constant_op.constant(factor, dtype=sum_grad.dtype)\n else:\n input_shape = array_ops.shape(op.inputs[0])\n output_shape = array_ops.shape(op.outputs[0])\n factor = _safe_shape_div(\n math_ops.reduce_prod(input_shape), math_ops.reduce_prod(output_shape))\n return math_ops.truediv(sum_grad, math_ops.cast(factor, sum_grad.dtype)), None\n\n\[email protected](\"Prod\")\ndef _ProdGrad(op, grad):\n \"\"\"Gradient for Prod.\"\"\"\n # The gradient can be expressed by dividing the product by each entry of the\n # input tensor, but this approach can't deal with zeros in the input.\n # Here, we avoid this problem by composing the output as a product of two\n # cumprod operations.\n\n input_shape = array_ops.shape(op.inputs[0])\n # Reshape reduction indices for the case where the parameter is a scalar\n reduction_indices = array_ops.reshape(op.inputs[1], [-1])\n\n if compat.forward_compatible(2019, 9, 23):\n # Expand grad to full input shape\n if not op.get_attr(\"keep_dims\"):\n output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n\n grad = array_ops.broadcast_to(grad, input_shape)\n else:\n output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])\n tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)\n grad = array_ops.reshape(grad, output_shape_kept_dims)\n grad = array_ops.tile(grad, tile_scaling)\n\n # Pack all reduced dimensions into a single one, so we can perform the\n # cumprod ops. If the reduction dims list is empty, it defaults to float32,\n # so we need to cast here. We put all the shape-related ops on CPU to avoid\n # copying back and forth, and since listdiff is CPU only.\n with ops.device(\"/cpu:0\"):\n rank = array_ops.rank(op.inputs[0])\n reduction_indices = (reduction_indices + rank) % rank\n reduced = math_ops.cast(reduction_indices, dtypes.int32)\n idx = math_ops.range(0, rank)\n other, _ = array_ops.setdiff1d(idx, reduced)\n perm = array_ops.concat([reduced, other], 0)\n reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))\n other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))\n permuted = array_ops.transpose(op.inputs[0], perm)\n permuted_shape = array_ops.shape(permuted)\n reshaped = array_ops.reshape(permuted, (reduced_num, other_num))\n\n # Calculate product, leaving out the current entry\n left = math_ops.cumprod(reshaped, axis=0, exclusive=True)\n right = math_ops.cumprod(reshaped, axis=0, exclusive=True, reverse=True)\n # For complex inputs, the gradient is in the conjugate direction.\n y = array_ops.reshape(\n math_ops.conj(left) * math_ops.conj(right), permuted_shape)\n\n # Invert the transpose and reshape operations.\n # Make sure to set the statically known shape information through a reshape.\n out = grad * array_ops.transpose(y, array_ops.invert_permutation(perm))\n return array_ops.reshape(out, input_shape), None\n\n\[email protected](\"SegmentSum\")\ndef _SegmentSumGrad(op, grad):\n \"\"\"Gradient for SegmentSum.\"\"\"\n return array_ops.gather(grad, op.inputs[1]), None\n\n\[email protected](\"SegmentMean\")\ndef _SegmentMeanGrad(op, grad):\n \"\"\"Gradient for SegmentMean.\"\"\"\n input_rank = array_ops.rank(op.inputs[0])\n ones_shape = array_ops.concat([\n array_ops.shape(op.inputs[1]),\n array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)\n ], 0)\n ones = array_ops.fill(ones_shape, constant_op.constant(1, dtype=grad.dtype))\n scaled_grad = math_ops.divide(grad, math_ops.segment_sum(ones, op.inputs[1]))\n return array_ops.gather(scaled_grad, op.inputs[1]), None\n\n\[email protected](\"SparseSegmentSum\")\ndef _SparseSegmentSumGrad(op, grad):\n \"\"\"Gradient for SparseSegmentSum.\"\"\"\n input_rows = array_ops.shape(op.inputs[0])[0]\n return (math_ops.unsorted_segment_sum(\n array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,\n None)\n\n\[email protected](\"SparseSegmentSumWithNumSegments\")\ndef _SparseSegmentSumWithNumSegmentsGrad(op, grad):\n \"\"\"Gradient for SparseSegmentSumWithNumSegments.\"\"\"\n input_rows = array_ops.shape(op.inputs[0])[0]\n return (math_ops.unsorted_segment_sum(\n array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None,\n None, None)\n\n\[email protected](\"SparseSegmentMean\")\ndef _SparseSegmentMeanGrad(op, grad):\n \"\"\"Gradient for SparseSegmentMean.\"\"\"\n dim0 = array_ops.shape(op.inputs[0])[0]\n return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],\n dim0), None, None)\n\n\[email protected](\"SparseSegmentMeanWithNumSegments\")\ndef _SparseSegmentMeanWithNumSegmentsGrad(op, grad):\n \"\"\"Gradient for SparseSegmentMeanWithNumSegments.\"\"\"\n dim0 = array_ops.shape(op.inputs[0])[0]\n return (math_ops.sparse_segment_mean_grad(grad, op.inputs[1], op.inputs[2],\n dim0), None, None, None)\n\n\[email protected](\"SparseSegmentSqrtN\")\ndef _SparseSegmentSqrtNGrad(op, grad):\n \"\"\"Gradient for SparseSegmentSqrtN.\"\"\"\n dim0 = array_ops.shape(op.inputs[0])[0]\n return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],\n dim0), None, None)\n\n\[email protected](\"SparseSegmentSqrtNWithNumSegments\")\ndef _SparseSegmentSqrtNWithNumSegmentsGrad(op, grad):\n \"\"\"Gradient for SparseSegmentSqrtNWithNumSegments.\"\"\"\n dim0 = array_ops.shape(op.inputs[0])[0]\n return (math_ops.sparse_segment_sqrt_n_grad(grad, op.inputs[1], op.inputs[2],\n dim0), None, None, None)\n\n\ndef _SegmentMinOrMaxGrad(op, grad):\n \"\"\" Gradient for SegmentMin and SegmentMax. \"\"\"\n zeros = array_ops.zeros_like(op.inputs[0], dtype=op.inputs[0].dtype)\n # Get the number of selected (minimum or maximum) elements in each segment.\n gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])\n is_selected = math_ops.equal(op.inputs[0], gathered_outputs)\n num_selected = math_ops.segment_sum(\n math_ops.cast(is_selected, grad.dtype), op.inputs[1])\n # Compute the gradient for each segment. The gradient for the ith segment is\n # divided evenly among the selected elements in that segment.\n weighted_grads = math_ops.divide(grad, num_selected)\n gathered_grads = array_ops.gather(weighted_grads, op.inputs[1])\n return array_ops.where(is_selected, gathered_grads, zeros), None\n\n\[email protected](\"SegmentMin\")\ndef _SegmentMinGrad(op, grad):\n \"\"\"Gradient for SegmentMin.\"\"\"\n return _SegmentMinOrMaxGrad(op, grad)\n\n\[email protected](\"SegmentMax\")\ndef _SegmentMaxGrad(op, grad):\n \"\"\"Gradient for SegmentMax.\"\"\"\n return _SegmentMinOrMaxGrad(op, grad)\n\n\ndef _GatherDropNegatives(params,\n ids,\n zero_clipped_indices=None,\n is_positive=None):\n \"\"\" Helper function for unsorted segment ops.\n\n Gathers params for\n positive segment ids and gathers 0 for inputs with negative segment id.\n Also returns the clipped indices and a boolean mask with the same shape\n as ids where a positive id is masked as true. With this, the latter two\n can be passed as arguments to this function to reuse them.\n \"\"\"\n if zero_clipped_indices is None:\n zero_clipped_indices = math_ops.maximum(ids, array_ops.zeros_like(ids))\n gathered = array_ops.gather(params, zero_clipped_indices)\n if is_positive is None:\n is_positive = math_ops.greater_equal(ids, 0)\n # tf.where(condition, x, y) requires condition to have the same shape as x\n # and y.\n # todo(philjd): remove this if tf.where supports broadcasting (#9284)\n for _ in range(gathered.shape.ndims - is_positive.shape.ndims):\n is_positive = array_ops.expand_dims(is_positive, -1)\n is_positive = (\n is_positive & array_ops.ones_like(gathered, dtype=dtypes.bool))\n # replace gathered params of negative indices with 0\n zero_slice = array_ops.zeros_like(gathered)\n return (array_ops.where(is_positive, gathered, zero_slice),\n zero_clipped_indices, is_positive)\n\n\ndef _UnsortedSegmentMinOrMaxGrad(op, grad):\n \"\"\" Gradient for UnsortedSegmentMin and UnsortedSegmentMax. \"\"\"\n # Get the number of selected (minimum or maximum) elements in each segment.\n gathered_outputs, zero_clipped_indices, is_positive = \\\n _GatherDropNegatives(op.outputs[0], op.inputs[1])\n is_selected = math_ops.equal(op.inputs[0], gathered_outputs)\n is_selected = math_ops.logical_and(is_selected, is_positive)\n num_selected = math_ops.unsorted_segment_sum(\n math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2])\n # Compute the gradient for each segment. The gradient for the ith segment is\n # divided evenly among the selected elements in that segment.\n weighted_grads = math_ops.divide(grad, num_selected)\n gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None,\n zero_clipped_indices, is_positive)\n zeros = array_ops.zeros_like(gathered_grads)\n return array_ops.where(is_selected, gathered_grads, zeros), None, None\n\n\[email protected](\"UnsortedSegmentSum\")\ndef _UnsortedSegmentSumGrad(op, grad):\n \"\"\"Gradient for UnsortedSegmentSum.\"\"\"\n return _GatherDropNegatives(grad, op.inputs[1])[0], None, None\n\n\[email protected](\"UnsortedSegmentMax\")\ndef _UnsortedSegmentMaxGrad(op, grad):\n \"\"\" Gradient for UnsortedSegmentMax. \"\"\"\n return _UnsortedSegmentMinOrMaxGrad(op, grad)\n\n\[email protected](\"UnsortedSegmentMin\")\ndef _UnsortedSegmentMinGrad(op, grad):\n \"\"\" Gradient for UnsortedSegmentMin. \"\"\"\n return _UnsortedSegmentMinOrMaxGrad(op, grad)\n\n\[email protected](\"UnsortedSegmentProd\")\ndef _UnsortedSegmentProdGrad(op, grad):\n \"\"\" Gradient for UnsortedSegmentProd.\n\n The gradient can be expressed for each segment by dividing the segment's\n product by each element of the segment input tensor, but this approach can't\n deal with zeros in the input.\n Unlike reduce_prod we can't use cumsum here as individual segments may have\n a different number of elements. Therefore we consider three cases:\n 1) A segment input contains no zeros and we can safely divide by the input\n tensor.\n 2) A segment contains exactly one zero. Then the gradient of each input of\n the segment is zero except for the 0-input, there the gradient is\n the product of the remaining segment entries.\n 3) A segment contains at least two zeros. The gradient is zero for all\n segment inputs.\n \"\"\"\n # Note that unsorted_segment_sum will filter out the negative indices,\n # so we don't need to do a logical_and with is_positive here\n is_zero = math_ops.equal(op.inputs[0], 0)\n num_zeros = gen_math_ops.unsorted_segment_sum(\n math_ops.cast(is_zero, dtype=dtypes.int32), op.inputs[1], op.inputs[2])\n # handle case 3 and set the gradient to 0 for segments with more than one\n # 0 as input\n grad = array_ops.where(\n math_ops.greater(num_zeros, 1), array_ops.zeros_like(grad), grad)\n # replace all zeros with ones and compute the unsorted_segment_prod\n non_zero_data = array_ops.where(is_zero, array_ops.ones_like(op.inputs[0]),\n op.inputs[0])\n non_zero_prod = gen_math_ops.unsorted_segment_prod(non_zero_data,\n op.inputs[1], op.inputs[2])\n # clip the indices for gather to be positive\n zero_clipped_indices = math_ops.maximum(op.inputs[1],\n array_ops.zeros_like(op.inputs[1]))\n gathered_prod = array_ops.gather(op.outputs[0], zero_clipped_indices)\n gathered_non_zero_prod = array_ops.gather(non_zero_prod, zero_clipped_indices)\n prod_divided_by_el = gathered_prod / op.inputs[0] # May contain nan/inf.\n # Now fetch the individual results for segments containing 0 and those that\n # don't. is_zero will also fetch results for entries with negative index\n # but the following gather_drop_negatives sets the corresponding entry in\n # grad to 0 for these\n partial_derivative = array_ops.where(is_zero, gathered_non_zero_prod,\n prod_divided_by_el)\n gathered_grad = _GatherDropNegatives(grad, op.inputs[1],\n zero_clipped_indices)[0]\n return gathered_grad * partial_derivative, None, None\n\n\[email protected](\"Abs\")\ndef _AbsGrad(op, grad):\n x = op.inputs[0]\n return grad * math_ops.sign(x)\n\n\[email protected](\"Neg\")\ndef _NegGrad(_, grad):\n \"\"\"Returns -grad.\"\"\"\n return -grad\n\n\[email protected](\"Inv\")\ndef _InvGrad(op, grad):\n \"\"\"Returns -grad * (1 / x^2).\"\"\"\n y = op.outputs[0] # y = 1 / x\n return gen_math_ops.reciprocal_grad(y, grad)\n\n\[email protected](\"Reciprocal\")\ndef _ReciprocalGrad(op, grad):\n \"\"\"Returns -grad * (1 / x^2).\"\"\"\n y = op.outputs[0] # y = 1 / x\n return gen_math_ops.reciprocal_grad(y, grad)\n\n\[email protected](\"InvGrad\")\ndef _InvGradGrad(op, grad):\n b = op.inputs[1]\n # op.output[0]: y = -b * conj(a)^2\n with ops.control_dependencies([grad]):\n ca = math_ops.conj(op.inputs[0])\n cg = math_ops.conj(grad)\n return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)\n\n\[email protected](\"ReciprocalGrad\")\ndef _ReciprocalGradGrad(op, grad):\n b = op.inputs[1]\n # op.output[0]: y = -b * conj(a)^2\n with ops.control_dependencies([grad]):\n ca = math_ops.conj(op.inputs[0])\n cg = math_ops.conj(grad)\n return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)\n\n\[email protected](\"Square\")\ndef _SquareGrad(op, grad):\n x = op.inputs[0]\n # Added control dependencies to prevent 2*x from being computed too early.\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n y = constant_op.constant(2.0, dtype=x.dtype)\n return math_ops.multiply(grad, math_ops.multiply(x, y))\n\n\[email protected](\"Sqrt\")\ndef _SqrtGrad(op, grad):\n y = op.outputs[0] # y = x^(1/2)\n return gen_math_ops.sqrt_grad(y, grad)\n\n\[email protected](\"SqrtGrad\")\ndef _SqrtGradGrad(op, grad):\n a = op.inputs[0]\n y = op.outputs[0] # y = 0.5 * b / conj(a)\n with ops.control_dependencies([grad]):\n if compat.forward_compatible(2019, 9, 14):\n ga = gen_math_ops.xdivy(grad, a)\n return -gen_math_ops.mul_no_nan(y, math_ops.conj(ga)), 0.5 * ga\n else:\n ga = grad / a\n return -math_ops.conj(ga) * y, 0.5 * ga\n\n\[email protected](\"Rsqrt\")\ndef _RsqrtGrad(op, grad):\n \"\"\"Returns -0.5 * grad * conj(y)^3.\"\"\"\n y = op.outputs[0] # y = x^(-1/2)\n return gen_math_ops.rsqrt_grad(y, grad)\n\n\[email protected](\"RsqrtGrad\")\ndef _RsqrtGradGrad(op, grad):\n \"\"\"Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3.\"\"\"\n a = op.inputs[0] # a = x^{-1/2}\n b = op.inputs[1] # backprop gradient for a\n with ops.control_dependencies([grad]):\n ca = math_ops.conj(a)\n cg = math_ops.conj(grad)\n grad_a = -1.5 * cg * b * math_ops.square(ca)\n grad_b = gen_math_ops.rsqrt_grad(ca, grad)\n return grad_a, grad_b\n\n\[email protected](\"Exp\")\ndef _ExpGrad(op, grad):\n \"\"\"Returns grad * exp(x).\"\"\"\n y = op.outputs[0] # y = e^x\n with ops.control_dependencies([grad]):\n y = math_ops.conj(y)\n if compat.forward_compatible(2019, 9, 14):\n return math_ops.mul_no_nan(y, grad)\n else:\n return grad * y\n\n\[email protected](\"Expm1\")\ndef _Expm1Grad(op, grad):\n \"\"\"Returns grad * exp(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n y = math_ops.exp(x)\n if compat.forward_compatible(2019, 9, 14):\n return math_ops.mul_no_nan(y, grad)\n else:\n return grad * y\n\n\[email protected](\"Log\")\ndef _LogGrad(op, grad):\n \"\"\"Returns grad * (1/x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n if compat.forward_compatible(2019, 9, 14):\n return gen_math_ops.xdivy(grad, x)\n else:\n return grad * math_ops.reciprocal(x)\n\n\[email protected](\"Log1p\")\ndef _Log1pGrad(op, grad):\n \"\"\"Returns grad * (1/(1 + x)).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n if compat.forward_compatible(2019, 9, 14):\n return gen_math_ops.xdivy(grad, 1 + x)\n else:\n return grad * math_ops.reciprocal(1 + x)\n\n\[email protected](\"Xlogy\")\ndef _XLogyGrad(op, grad):\n \"\"\"Returns gradient of xlogy(x, y) with respect to x and y.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n with ops.control_dependencies([grad]):\n not_zero_x = math_ops.cast(\n math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)\n partial_x = gen_math_ops.xlogy(not_zero_x, y)\n partial_y = gen_math_ops.xdivy(x, y)\n return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),\n array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))\n\n\[email protected](\"Xdivy\")\ndef _XDivyGrad(op, grad):\n \"\"\"Returns gradient of xdivy(x, y) with respect to x and y.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n with ops.control_dependencies([grad]):\n not_zero_x = math_ops.cast(\n math_ops.not_equal(x, math_ops.cast(0., dtype=x.dtype)), dtype=x.dtype)\n partial_x = gen_math_ops.xdivy(not_zero_x, y)\n partial_y = gen_math_ops.xdivy(math_ops.negative(x), y**2)\n return (array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx),\n array_ops.reshape(math_ops.reduce_sum(partial_y * grad, ry), sy))\n\n\[email protected](\"Sinh\")\ndef _SinhGrad(op, grad):\n \"\"\"Returns grad * cosh(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n return grad * math_ops.cosh(x)\n\n\[email protected](\"Cosh\")\ndef _CoshGrad(op, grad):\n \"\"\"Returns grad * sinh(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n return grad * math_ops.sinh(x)\n\n\[email protected](\"Tanh\")\ndef _TanhGrad(op, grad):\n \"\"\"Returns grad * (1 - tanh(x) * tanh(x)).\"\"\"\n y = op.outputs[0] # y = tanh(x)\n with ops.control_dependencies([grad]):\n y = math_ops.conj(y)\n return gen_math_ops.tanh_grad(y, grad)\n\n\[email protected](\"Asinh\")\ndef _AsinhGrad(op, grad):\n \"\"\"Returns grad * 1/cosh(y).\"\"\"\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n y = math_ops.conj(y)\n return grad / math_ops.cosh(y)\n\n\[email protected](\"Acosh\")\ndef _AcoshGrad(op, grad):\n \"\"\"Returns grad * 1/sinh(y).\"\"\"\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n y = math_ops.conj(y)\n if compat.forward_compatible(2019, 9, 14):\n return math_ops.xdivy(grad, math_ops.sinh(y))\n else:\n return grad / math_ops.sinh(y)\n\n\[email protected](\"Atanh\")\ndef _AtanhGrad(op, grad):\n \"\"\"Returns grad * 1/ (1 - x^2).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n x2 = math_ops.square(x)\n one = constant_op.constant(1, dtype=grad.dtype)\n inv = math_ops.reciprocal(math_ops.subtract(one, x2))\n return grad * inv\n\n\[email protected](\"TanhGrad\")\ndef _TanhGradGrad(op, grad):\n with ops.control_dependencies([grad]):\n a = math_ops.conj(op.inputs[0])\n b = math_ops.conj(op.inputs[1])\n return grad * -2.0 * b * a, gen_math_ops.tanh_grad(a, grad)\n\n\[email protected](\"Erf\")\ndef _ErfGrad(op, grad):\n \"\"\"Returns grad * 2/sqrt(pi) * exp(-x**2).\"\"\"\n x = op.inputs[0]\n two_over_root_pi = constant_op.constant(2 / np.sqrt(np.pi), dtype=grad.dtype)\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n return grad * two_over_root_pi * math_ops.exp(-math_ops.square(x))\n\n\[email protected](\"Erfc\")\ndef _ErfcGrad(op, grad):\n \"\"\"Returns -grad * 2/sqrt(pi) * exp(-x**2).\"\"\"\n x = op.inputs[0]\n minus_two_over_root_pi = constant_op.constant(\n -2 / np.sqrt(np.pi), dtype=grad.dtype)\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n return grad * minus_two_over_root_pi * math_ops.exp(-math_ops.square(x))\n\n\[email protected](\"Lgamma\")\ndef _LgammaGrad(op, grad):\n \"\"\"Returns grad * digamma(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n if compat.forward_compatible(2019, 9, 14):\n return math_ops.mul_no_nan(math_ops.digamma(x), grad)\n else:\n return grad * math_ops.digamma(x)\n\n\[email protected](\"Digamma\")\ndef _DigammaGrad(op, grad):\n \"\"\"Compute gradient of the digamma function with respect to its argument.\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n partial_x = math_ops.polygamma(array_ops.constant(1, dtype=x.dtype), x)\n if compat.forward_compatible(2019, 9, 14):\n return math_ops.mul_no_nan(partial_x, grad)\n else:\n return grad * partial_x\n\n\[email protected](\"BesselI0e\")\ndef _BesselI0eGrad(op, grad):\n \"\"\"Compute gradient of bessel_i0e(x) with respect to its argument.\"\"\"\n x = op.inputs[0]\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n partial_x = (math_ops.bessel_i1e(x) - math_ops.sign(x) * y)\n if compat.forward_compatible(2019, 9, 14):\n return math_ops.mul_no_nan(partial_x, grad)\n else:\n return grad * partial_x\n\n\[email protected](\"BesselI1e\")\ndef _BesselI1eGrad(op, grad):\n \"\"\"Compute gradient of bessel_i1e(x) with respect to its argument.\"\"\"\n x = op.inputs[0]\n y = op.outputs[0]\n with ops.control_dependencies([grad]):\n # For x = 0, the correct gradient is 0.5.\n # However, the main branch gives NaN because of the division by x, so\n # we impute the gradient manually.\n # An alternative solution is to express the gradient via bessel_i0e and\n # bessel_i2e, but the latter is not yet implemented in Eigen.\n eps = np.finfo(x.dtype.as_numpy_dtype).eps\n zeros = array_ops.zeros_like(x)\n x_is_not_tiny = math_ops.abs(x) > eps\n safe_x = array_ops.where(x_is_not_tiny, x, eps + zeros)\n dy_dx = math_ops.bessel_i0e(safe_x) - y * (\n math_ops.sign(safe_x) + math_ops.reciprocal(safe_x))\n dy_dx = array_ops.where(x_is_not_tiny, dy_dx, 0.5 + zeros)\n if compat.forward_compatible(2019, 9, 14):\n return math_ops.mul_no_nan(dy_dx, grad)\n else:\n return grad * dy_dx\n\n\[email protected](\"Igamma\")\ndef _IgammaGrad(op, grad):\n \"\"\"Returns gradient of igamma(a, x) with respect to a and x.\"\"\"\n a = op.inputs[0]\n x = op.inputs[1]\n sa = array_ops.shape(a)\n sx = array_ops.shape(x)\n ra, rx = gen_array_ops.broadcast_gradient_args(sa, sx)\n\n with ops.control_dependencies([grad]):\n partial_a = gen_math_ops.igamma_grad_a(a, x)\n # Perform operations in log space before summing, because Gamma(a)\n # and Gamma'(a) can grow large.\n partial_x = math_ops.exp(-x + (a - 1) * math_ops.log(x) -\n math_ops.lgamma(a))\n if compat.forward_compatible(2019, 9, 14):\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.mul_no_nan(partial_a, grad), ra), sa),\n array_ops.reshape(\n math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx),\n sx))\n else:\n return (array_ops.reshape(math_ops.reduce_sum(partial_a * grad, ra), sa),\n array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))\n\n\[email protected](\"Igammac\")\ndef _IgammacGrad(op, grad):\n \"\"\"Returns gradient of igammac(a, x) = 1 - igamma(a, x) w.r.t. a and x.\"\"\"\n igamma_grad_a, igamma_grad_x = _IgammaGrad(op, grad)\n return (-igamma_grad_a, -igamma_grad_x)\n\n\[email protected](\"Betainc\")\ndef _BetaincGrad(op, grad):\n \"\"\"Returns gradient of betainc(a, b, x) with respect to x.\"\"\"\n # TODO(ebrevdo): Perhaps add the derivative w.r.t. a, b\n a, b, x = op.inputs\n\n # two cases: x is a scalar and a/b are same-shaped tensors, or vice\n # versa; so its sufficient to check against shape(a).\n sa = array_ops.shape(a)\n sx = array_ops.shape(x)\n _, rx = gen_array_ops.broadcast_gradient_args(sa, sx)\n\n # Perform operations in log space before summing, because terms\n # can grow large.\n log_beta = (\n gen_math_ops.lgamma(a) + gen_math_ops.lgamma(b) -\n gen_math_ops.lgamma(a + b))\n partial_x = math_ops.exp((b - 1) * math_ops.log(1 - x) +\n (a - 1) * math_ops.log(x) - log_beta)\n\n # TODO(b/36815900): Mark None return values as NotImplemented\n if compat.forward_compatible(2019, 9, 14):\n return (\n None, # da\n None, # db\n array_ops.reshape(\n math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx), sx))\n else:\n return (\n None, # da\n None, # db\n array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))\n\n\[email protected](\"Zeta\")\ndef _ZetaGrad(op, grad):\n \"\"\"Returns gradient of zeta(x, q) with respect to x and q.\"\"\"\n # TODO(tillahoffmann): Add derivative with respect to x\n x = op.inputs[0]\n q = op.inputs[1]\n # Broadcast gradients\n sx = array_ops.shape(x)\n sq = array_ops.shape(q)\n unused_rx, rq = gen_array_ops.broadcast_gradient_args(sx, sq)\n # Evaluate gradient\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n q = math_ops.conj(q)\n partial_q = -x * math_ops.zeta(x + 1, q)\n # TODO(b/36815900): Mark None return values as NotImplemented\n if compat.forward_compatible(2019, 9, 14):\n return (None,\n array_ops.reshape(\n math_ops.reduce_sum(math_ops.mul_no_nan(partial_q, grad), rq),\n sq))\n else:\n return (None,\n array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))\n\n\[email protected](\"Polygamma\")\ndef _PolygammaGrad(op, grad):\n \"\"\"Returns gradient of psi(n, x) with respect to n and x.\"\"\"\n # TODO(tillahoffmann): Add derivative with respect to n\n n = op.inputs[0]\n x = op.inputs[1]\n # Broadcast gradients\n sn = array_ops.shape(n)\n sx = array_ops.shape(x)\n unused_rn, rx = gen_array_ops.broadcast_gradient_args(sn, sx)\n # Evaluate gradient\n with ops.control_dependencies([grad]):\n n = math_ops.conj(n)\n x = math_ops.conj(x)\n partial_x = math_ops.polygamma(n + 1, x)\n # TODO(b/36815900): Mark None return values as NotImplemented\n if compat.forward_compatible(2019, 9, 14):\n return (None,\n array_ops.reshape(\n math_ops.reduce_sum(math_ops.mul_no_nan(partial_x, grad), rx),\n sx))\n else:\n return (None,\n array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))\n\n\[email protected](\"Sigmoid\")\ndef _SigmoidGrad(op, grad):\n \"\"\"Returns grad * sigmoid(x) * (1 - sigmoid(x)).\"\"\"\n y = op.outputs[0] # y = sigmoid(x)\n with ops.control_dependencies([grad]):\n y = math_ops.conj(y)\n return gen_math_ops.sigmoid_grad(y, grad)\n\n\[email protected](\"SigmoidGrad\")\ndef _SigmoidGradGrad(op, grad):\n with ops.control_dependencies([grad]):\n a = math_ops.conj(op.inputs[0])\n b = math_ops.conj(op.inputs[1])\n gb = grad * b\n return gb - 2.0 * gb * a, gen_math_ops.sigmoid_grad(a, grad)\n\n\[email protected](\"Sign\")\ndef _SignGrad(op, _):\n \"\"\"Returns 0.\"\"\"\n x = op.inputs[0]\n return array_ops.zeros(array_ops.shape(x), dtype=x.dtype)\n\n\[email protected](\"Sin\")\ndef _SinGrad(op, grad):\n \"\"\"Returns grad * cos(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n return grad * math_ops.cos(x)\n\n\[email protected](\"Cos\")\ndef _CosGrad(op, grad):\n \"\"\"Returns grad * -sin(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n return -grad * math_ops.sin(x)\n\n\[email protected](\"Tan\")\ndef _TanGrad(op, grad):\n \"\"\"Returns grad * 1/sec^2(x).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n secx = math_ops.reciprocal(math_ops.cos(x))\n secx2 = math_ops.square(secx)\n if compat.forward_compatible(2019, 9, 14):\n return math_ops.mul_no_nan(secx2, grad)\n else:\n return secx2 * grad\n\n\[email protected](\"Asin\")\ndef _AsinGrad(op, grad):\n \"\"\"Returns grad * 1/sqrt(1-x^2).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n x2 = math_ops.square(x)\n one = constant_op.constant(1, dtype=grad.dtype)\n den = math_ops.sqrt(math_ops.subtract(one, x2))\n if compat.forward_compatible(2019, 9, 14):\n return math_ops.xdivy(grad, den)\n else:\n inv = math_ops.reciprocal(den)\n return grad * inv\n\n\[email protected](\"Acos\")\ndef _AcosGrad(op, grad):\n \"\"\"Returns grad * -1/sqrt(1-x^2).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n x2 = math_ops.square(x)\n one = constant_op.constant(1, dtype=grad.dtype)\n den = math_ops.sqrt(math_ops.subtract(one, x2))\n if compat.forward_compatible(2019, 9, 14):\n return -math_ops.xdivy(grad, den)\n else:\n inv = math_ops.reciprocal(den)\n return -grad * inv\n\n\[email protected](\"Atan\")\ndef _AtanGrad(op, grad):\n \"\"\"Returns grad * 1/ (1 + x^2).\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n x = math_ops.conj(x)\n x2 = math_ops.square(x)\n one = constant_op.constant(1, dtype=grad.dtype)\n inv = math_ops.reciprocal(math_ops.add(one, x2))\n return grad * inv\n\n\[email protected](\"Atan2\")\ndef _Atan2Grad(op, grad):\n \"\"\"Returns grad * x / (x^2 + y^2), grad * -y / (x^2 + y^2).\"\"\"\n y = op.inputs[0]\n x = op.inputs[1]\n with ops.control_dependencies([grad]):\n if compat.forward_compatible(2019, 9, 14):\n grad_inv = math_ops.xdivy(grad, (math_ops.square(x) + math_ops.square(y)))\n else:\n grad_inv = grad / (math_ops.square(x) + math_ops.square(y))\n return x * grad_inv, -y * grad_inv\n\n\[email protected](\"AddN\")\ndef _AddNGrad(op, grad):\n \"\"\"Copies the gradient to all inputs.\"\"\"\n # Not broadcasting.\n return [grad] * len(op.inputs)\n\n\ndef _ShapesFullySpecifiedAndEqual(x, y, grad):\n # pylint: disable=protected-access\n x_shape = x._shape_tuple()\n y_shape = y._shape_tuple()\n grad_shape = grad._shape_tuple()\n # pylint: enable=protected-access\n return (x_shape == y_shape and x_shape == grad_shape and\n x_shape is not None and None not in x_shape)\n\n\[email protected](\"Add\")\[email protected](\"AddV2\")\ndef _AddGrad(op, grad):\n \"\"\"Gradient for Add.\"\"\"\n y = op.inputs[1]\n skip_input_indices = None\n try:\n skip_input_indices = op.skip_input_indices\n if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(\n y):\n return grad, None\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n x = op.inputs[0]\n if (isinstance(grad, ops.Tensor) and\n _ShapesFullySpecifiedAndEqual(x, y, grad)):\n return grad, grad\n (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (\n SmartBroadcastGradientArgs(x, y, grad))\n if skip_input_indices is not None and 0 in skip_input_indices:\n gx = None\n elif not must_reduce_x:\n gx = grad\n else:\n gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)\n if skip_input_indices is not None and 1 in skip_input_indices:\n gy = None\n elif not must_reduce_y:\n gy = grad\n else:\n gy = array_ops.reshape(math_ops.reduce_sum(grad, ry), sy)\n return (gx, gy)\n\n\[email protected](\"Sub\")\ndef _SubGrad(op, grad):\n \"\"\"Gradient for Sub.\"\"\"\n y = op.inputs[1]\n skip_input_indices = None\n try:\n skip_input_indices = op.skip_input_indices\n if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(\n y):\n return grad, None\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n x = op.inputs[0]\n if (isinstance(grad, ops.Tensor) and\n _ShapesFullySpecifiedAndEqual(x, y, grad)):\n return grad, -grad\n (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (\n SmartBroadcastGradientArgs(x, y, grad))\n if skip_input_indices is not None and 0 in skip_input_indices:\n gx = None\n elif not must_reduce_x:\n gx = grad\n else:\n gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)\n if skip_input_indices is not None and 1 in skip_input_indices:\n gy = None\n elif not must_reduce_y:\n gy = -grad\n else:\n gy = array_ops.reshape(math_ops.reduce_sum(-grad, ry), sy)\n return (gx, gy)\n\n\[email protected](\"Mul\")\ndef _MulGrad(op, grad):\n \"\"\"The gradient of scalar multiplication.\"\"\"\n y = op.inputs[1]\n skip_input_indices = None\n try:\n skip_input_indices = op.skip_input_indices\n if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(\n y):\n return gen_math_ops.mul(grad, math_ops.conj(y)), None\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n x = op.inputs[0]\n if (isinstance(grad, ops.Tensor) and\n _ShapesFullySpecifiedAndEqual(x, y, grad) and\n grad.dtype in (dtypes.int32, dtypes.float32)):\n return gen_math_ops.mul(grad, y), gen_math_ops.mul(grad, x)\n assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, \" vs. \", y.dtype)\n\n (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (\n SmartBroadcastGradientArgs(x, y, grad))\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n if skip_input_indices is not None and 0 in skip_input_indices:\n gx = None\n elif not must_reduce_x:\n gx = gen_math_ops.mul(grad, y)\n else:\n gx = array_ops.reshape(\n math_ops.reduce_sum(gen_math_ops.mul(grad, y), rx), sx)\n if skip_input_indices is not None and 1 in skip_input_indices:\n gy = None\n elif not must_reduce_y:\n gy = gen_math_ops.mul(x, grad)\n else:\n gy = array_ops.reshape(\n math_ops.reduce_sum(gen_math_ops.mul(x, grad), ry), sy)\n return (gx, gy)\n\n\[email protected](\"MulNoNan\")\ndef _MulNoNanGrad(op, grad):\n \"\"\"The gradient of scalar multiplication with NaN-suppression.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n if (isinstance(grad, ops.Tensor) and\n _ShapesFullySpecifiedAndEqual(x, y, grad)):\n return gen_math_ops.mul_no_nan(grad, y), gen_math_ops.mul_no_nan(x, grad)\n assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, \" vs. \", y.dtype)\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n return (array_ops.reshape(\n math_ops.reduce_sum(gen_math_ops.mul_no_nan(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(gen_math_ops.mul_no_nan(x, grad), ry), sy))\n\n\[email protected](\"Div\")\ndef _DivGrad(op, grad):\n \"\"\"The gradient for the Div operator.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n if compat.forward_compatible(2019, 9, 14):\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(\n math_ops.mul_no_nan(\n math_ops.divide(math_ops.divide(-x, y), y), grad), ry),\n sy))\n else:\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.divide(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(\n grad * math_ops.divide(math_ops.divide(-x, y), y), ry), sy))\n\n\[email protected](\"FloorDiv\")\ndef _FloorDivGrad(_, unused_grad):\n \"\"\"The gradient for the FloorDiv operator.\"\"\"\n return None, None\n\n\[email protected](\"FloorMod\")\ndef _FloorModGrad(op, grad):\n \"\"\"Returns grad * (1, -floor(x/y)).\"\"\"\n x = math_ops.conj(op.inputs[0])\n y = math_ops.conj(op.inputs[1])\n\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n floor_xy = math_ops.floor_div(x, y)\n gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)\n gy = array_ops.reshape(\n math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)\n return gx, gy\n\n\[email protected](\"TruncateDiv\")\ndef _TruncateDivGrad(_, unused_grad):\n return None, None\n\n\[email protected](\"RealDiv\")\ndef _RealDivGrad(op, grad):\n \"\"\"RealDiv op gradient.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n if compat.forward_compatible(2019, 9, 14):\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.xdivy(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(\n math_ops.mul_no_nan(\n math_ops.realdiv(math_ops.realdiv(-x, y), y), grad),\n ry), sy))\n else:\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.realdiv(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(\n grad * math_ops.realdiv(math_ops.realdiv(-x, y), y), ry),\n sy))\n\n\[email protected](\"DivNoNan\")\ndef _DivNoNanGrad(op, grad):\n \"\"\"DivNoNan op gradient.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n if compat.forward_compatible(2019, 9, 14):\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(\n math_ops.mul_no_nan(\n math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),\n grad), ry), sy))\n else:\n return (array_ops.reshape(\n math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),\n array_ops.reshape(\n math_ops.reduce_sum(\n grad * math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),\n ry), sy))\n\n\[email protected](\"Pow\")\ndef _PowGrad(op, grad):\n \"\"\"Returns grad * (y*x^(y-1), z*log(x)).\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n use_mul_no_nan = compat.forward_compatible(2019, 9, 14)\n skip_input_indices = None\n try:\n skip_input_indices = op.skip_input_indices\n # TODO(mrry): If `y` is a constant, we can combine `tf.sub()` and the\n # constant `1` into a single constant op.\n if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(\n y):\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n if use_mul_no_nan:\n return gen_math_ops.mul_no_nan(y * math_ops.pow(x, y - 1), grad), None\n else:\n return grad * y * math_ops.pow(x, y - 1), None\n\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n\n (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (\n SmartBroadcastGradientArgs(x, y, grad))\n x = math_ops.conj(x)\n y = math_ops.conj(y)\n\n if skip_input_indices is None or 0 not in skip_input_indices:\n if use_mul_no_nan:\n gx = gen_math_ops.mul_no_nan(y * math_ops.pow(x, y - 1), grad)\n else:\n gx = grad * y * math_ops.pow(x, y - 1)\n if must_reduce_x:\n gx = array_ops.reshape(math_ops.reduce_sum(gx, rx), sx)\n else:\n gx = None\n\n if skip_input_indices is None or 1 not in skip_input_indices:\n z = math_ops.conj(op.outputs[0])\n\n # Avoid false singularity at x = 0\n if x.dtype.is_complex:\n # real(x) < 0 is fine for the complex case\n mask = math_ops.not_equal(x, 0)\n else:\n # There's no sensible real value to return if x < 0, so return 0\n mask = x > 0\n safe_x = array_ops.where(mask, x, array_ops.ones_like(x))\n log_x = array_ops.where(mask, math_ops.log(safe_x), array_ops.zeros_like(x))\n if use_mul_no_nan:\n gy = gen_math_ops.mul_no_nan(z * log_x, grad)\n else:\n gy = grad * z * log_x\n if must_reduce_y:\n gy = array_ops.reshape(math_ops.reduce_sum(gy, ry), sy)\n else:\n gy = None\n\n return gx, gy\n\n\ndef _MaximumMinimumGradInputOnly(op, grad, selector_op):\n x = op.inputs[0]\n y = op.inputs[1]\n zeros = array_ops.zeros_like(grad)\n xmask = selector_op(x, y)\n xgrad = array_ops.where(xmask, grad, zeros)\n ygrad = None # Return None for ygrad since the config allows that.\n return (xgrad, ygrad)\n\n\ndef _MaximumMinimumGrad(op, grad, selector_op):\n \"\"\"Factor out the code for the gradient of Maximum or Minimum.\"\"\"\n y = op.inputs[1]\n skip_input_indices = None\n try:\n skip_input_indices = op.skip_input_indices\n if skip_input_indices is not None and 1 in skip_input_indices and _IsScalar(\n y):\n # When we want to get gradients for the first input only, and the second\n # input tensor is a scalar, we can do a much simpler calculation\n return _MaximumMinimumGradInputOnly(op, grad, selector_op)\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n x = op.inputs[0]\n gdtype = grad.dtype\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n gradshape = array_ops.shape(grad)\n zeros = array_ops.zeros(gradshape, gdtype)\n xmask = selector_op(x, y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n if skip_input_indices is not None and 0 in skip_input_indices:\n gx = None\n else:\n xgrad = array_ops.where(xmask, grad, zeros)\n gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)\n\n if skip_input_indices is not None and 1 in skip_input_indices:\n gy = None\n else:\n ygrad = array_ops.where(xmask, zeros, grad)\n gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)\n\n return (gx, gy)\n\n\[email protected](\"Maximum\")\ndef _MaximumGrad(op, grad):\n \"\"\"Returns grad*(x > y, x <= y) with type of grad.\"\"\"\n return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)\n\n\[email protected](\"Minimum\")\ndef _MinimumGrad(op, grad):\n \"\"\"Returns grad*(x < y, x >= y) with type of grad.\"\"\"\n return _MaximumMinimumGrad(op, grad, math_ops.less_equal)\n\n\[email protected](\"SquaredDifference\")\ndef _SquaredDifferenceGrad(op, grad):\n \"\"\"Returns the gradient for (x-y)^2.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n skip_input_indices = None\n try:\n skip_input_indices = op.skip_input_indices\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n\n with ops.control_dependencies([grad]):\n # The parens ensure that if grad is IndexedSlices, it'll get multiplied by\n # Tensor (not a number like 2.0) which causes it to convert to Tensor.\n x_grad = math_ops.scalar_mul(2.0, grad) * (x - y)\n\n if (isinstance(grad, ops.Tensor) and\n _ShapesFullySpecifiedAndEqual(x, y, grad)):\n return x_grad, -x_grad\n\n (sx, rx, must_reduce_x), (sy, ry, must_reduce_y) = (\n SmartBroadcastGradientArgs(x, y, grad))\n\n if skip_input_indices is not None and 0 in skip_input_indices:\n gx = None\n elif must_reduce_x:\n gx = array_ops.reshape(math_ops.reduce_sum(x_grad, rx), sx)\n else:\n gx = x_grad\n\n if skip_input_indices is not None and 1 in skip_input_indices:\n gy = None\n elif must_reduce_y:\n gy = -array_ops.reshape(math_ops.reduce_sum(x_grad, ry), sy)\n else:\n gy = -x_grad\n return (gx, gy)\n\n\n# Logical operations have no gradients.\nops.NotDifferentiable(\"Less\")\nops.NotDifferentiable(\"LessEqual\")\nops.NotDifferentiable(\"Greater\")\nops.NotDifferentiable(\"GreaterEqual\")\nops.NotDifferentiable(\"Equal\")\nops.NotDifferentiable(\"ApproximateEqual\")\nops.NotDifferentiable(\"NotEqual\")\nops.NotDifferentiable(\"LogicalAnd\")\nops.NotDifferentiable(\"LogicalOr\")\nops.NotDifferentiable(\"LogicalNot\")\n\n\[email protected](\"Select\")\ndef _SelectGrad(op, grad):\n c = op.inputs[0]\n x = op.inputs[1]\n zeros = array_ops.zeros_like(x)\n return (None, array_ops.where(c, grad, zeros), array_ops.where(\n c, zeros, grad))\n\n\[email protected](\"SelectV2\")\ndef _SelectGradV2(op, grad):\n c = op.inputs[0]\n x = op.inputs[1]\n y = op.inputs[2]\n zeros = array_ops.zeros([], dtype=grad.dtype.base_dtype)\n gx = array_ops.where_v2(c, grad, zeros)\n x_shape = array_ops.shape(x)\n output_shape = array_ops.shape(op.outputs[0])\n # Reduce away broadcasted leading dims.\n reduce_x, _ = gen_array_ops.broadcast_gradient_args(x_shape, output_shape)\n gx = math_ops.reduce_sum(gx, keepdims=True, axis=reduce_x)\n gx = array_ops.reshape(gx, x_shape)\n\n gy = array_ops.where_v2(c, zeros, grad)\n y_shape = array_ops.shape(y)\n # Reduce away broadcasted leading dims.\n reduce_y, _ = gen_array_ops.broadcast_gradient_args(y_shape, output_shape)\n gy = math_ops.reduce_sum(gy, keepdims=True, axis=reduce_y)\n gy = array_ops.reshape(gy, y_shape)\n\n return (None, gx, gy)\n\n\ndef _MatMulGradAgainstFirstOnly(op, grad):\n \"\"\"Gradient for MatMul, only for the first input.\"\"\"\n t_a = op.get_attr(\"transpose_a\")\n t_b = op.get_attr(\"transpose_b\")\n b = math_ops.conj(op.inputs[1])\n if not t_a and not t_b:\n grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)\n elif not t_a and t_b:\n grad_a = gen_math_ops.mat_mul(grad, b)\n elif t_a and not t_b:\n grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)\n elif t_a and t_b:\n grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)\n return grad_a, None\n\n\ndef _MatMulGradAgainstSecondOnly(op, grad):\n \"\"\"Gradient for MatMul, only for the second input.\"\"\"\n t_a = op.get_attr(\"transpose_a\")\n t_b = op.get_attr(\"transpose_b\")\n a = math_ops.conj(op.inputs[0])\n if not t_a and not t_b:\n grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)\n elif not t_a and t_b:\n grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)\n elif t_a and not t_b:\n grad_b = gen_math_ops.mat_mul(a, grad)\n elif t_a and t_b:\n grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)\n return None, grad_b\n\n\[email protected](\"MatMul\")\ndef _MatMulGrad(op, grad):\n \"\"\"Gradient for MatMul.\"\"\"\n try:\n skip_input_indices = op.skip_input_indices\n if skip_input_indices is not None:\n if 1 in skip_input_indices:\n return _MatMulGradAgainstFirstOnly(op, grad)\n elif 0 in skip_input_indices:\n return _MatMulGradAgainstSecondOnly(op, grad)\n except AttributeError:\n # No gradient skipping, so do the full gradient computation\n pass\n\n t_a = op.get_attr(\"transpose_a\")\n t_b = op.get_attr(\"transpose_b\")\n a = math_ops.conj(op.inputs[0])\n b = math_ops.conj(op.inputs[1])\n if not t_a and not t_b:\n grad_a = gen_math_ops.mat_mul(grad, b, transpose_b=True)\n grad_b = gen_math_ops.mat_mul(a, grad, transpose_a=True)\n elif not t_a and t_b:\n grad_a = gen_math_ops.mat_mul(grad, b)\n grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True)\n elif t_a and not t_b:\n grad_a = gen_math_ops.mat_mul(b, grad, transpose_b=True)\n grad_b = gen_math_ops.mat_mul(a, grad)\n elif t_a and t_b:\n grad_a = gen_math_ops.mat_mul(b, grad, transpose_a=True, transpose_b=True)\n grad_b = gen_math_ops.mat_mul(grad, a, transpose_a=True, transpose_b=True)\n return grad_a, grad_b\n\n\[email protected](\"SparseMatMul\")\ndef _SparseMatMulGrad(op, grad):\n \"\"\"Gradient for SparseMatMul.\"\"\"\n\n t_a = op.get_attr(\"transpose_a\")\n t_b = op.get_attr(\"transpose_b\")\n is_sparse = object_identity.ObjectIdentityDictionary()\n is_sparse[op.inputs[0]] = op.get_attr(\"a_is_sparse\")\n is_sparse[op.inputs[1]] = op.get_attr(\"b_is_sparse\")\n # Use heuristic to figure out if grad might be sparse\n is_sparse[grad] = not context.executing_eagerly() and (\n grad.op.type == \"ReluGrad\")\n\n def _SparseMatMul(t1, t2, out_dtype, transpose_a=False, transpose_b=False):\n \"\"\"Helper function to create SparseMatMul op.\"\"\"\n\n assert t1 in is_sparse and t2 in is_sparse\n t1_sparse = is_sparse[t1]\n t2_sparse = is_sparse[t2]\n if transpose_b:\n t2 = array_ops.transpose(t2)\n transpose_b = False\n prod = math_ops.matmul(\n t1,\n t2,\n transpose_a=transpose_a,\n transpose_b=transpose_b,\n a_is_sparse=t1_sparse,\n b_is_sparse=t2_sparse)\n if prod.dtype != out_dtype:\n prod = math_ops.cast(prod, out_dtype)\n return prod\n\n dtype_a = op.inputs[0].dtype\n dtype_b = op.inputs[1].dtype\n if not t_a and not t_b:\n return (_SparseMatMul(grad, op.inputs[1], dtype_a, transpose_b=True),\n _SparseMatMul(op.inputs[0], grad, dtype_b, transpose_a=True))\n elif not t_a and t_b:\n return (_SparseMatMul(grad, op.inputs[1], dtype_a),\n _SparseMatMul(grad, op.inputs[0], dtype_b, transpose_a=True))\n elif t_a and not t_b:\n return (_SparseMatMul(op.inputs[1], grad, dtype_a, transpose_b=True),\n _SparseMatMul(op.inputs[0], grad, dtype_b))\n elif t_a and t_b:\n return (_SparseMatMul(\n op.inputs[1], grad, dtype_a, transpose_a=True, transpose_b=True),\n _SparseMatMul(\n grad, op.inputs[0], dtype_b, transpose_a=True,\n transpose_b=True))\n\n\[email protected](\"Floor\")\ndef _FloorGrad(_, unused_grad):\n return [None]\n\n\[email protected](\"Ceil\")\ndef _CeilGrad(_, unused_grad):\n return [None]\n\n\[email protected](\"Round\")\ndef _RoundGrad(_, unused_grad):\n return [None]\n\n\[email protected](\"Rint\")\ndef _RintGrad(_, unused_grad):\n # the gradient of Rint is zero\n return [None]\n\n\[email protected](\"BatchMatMul\")\ndef _BatchMatMul(op, grad):\n \"\"\"Returns the gradient of x and y given the gradient of x * y.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n adj_x = op.get_attr(\"adj_x\")\n adj_y = op.get_attr(\"adj_y\")\n\n if not adj_x:\n if not adj_y:\n grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)\n grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)\n else:\n grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)\n grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)\n else:\n if not adj_y:\n grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)\n grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)\n else:\n grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)\n grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)\n\n return grad_x, grad_y\n\n\[email protected](\"BatchMatMulV2\")\ndef _BatchMatMulV2(op, grad):\n \"\"\"Returns the gradient of x and y given the gradient of x * y.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n adj_x = op.get_attr(\"adj_x\")\n adj_y = op.get_attr(\"adj_y\")\n\n if not adj_x:\n if not adj_y:\n grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=True)\n grad_y = math_ops.matmul(x, grad, adjoint_a=True, adjoint_b=False)\n else:\n grad_x = math_ops.matmul(grad, y, adjoint_a=False, adjoint_b=False)\n grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=False)\n else:\n if not adj_y:\n grad_x = math_ops.matmul(y, grad, adjoint_a=False, adjoint_b=True)\n grad_y = math_ops.matmul(x, grad, adjoint_a=False, adjoint_b=False)\n else:\n grad_x = math_ops.matmul(y, grad, adjoint_a=True, adjoint_b=True)\n grad_y = math_ops.matmul(grad, x, adjoint_a=True, adjoint_b=True)\n\n # Reduce along the broadcasted batch dimensions, if broadcasting is required.\n shape_x_static = x.get_shape()\n shape_y_static = y.get_shape()\n if not (shape_x_static.is_fully_defined() and\n shape_y_static.is_fully_defined() and\n shape_x_static == shape_y_static):\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx[:-2], sy[:-2])\n grad_x = array_ops.reshape(math_ops.reduce_sum(grad_x, rx), sx)\n grad_y = array_ops.reshape(math_ops.reduce_sum(grad_y, ry), sy)\n\n return grad_x, grad_y\n\n\nops.NotDifferentiable(\"Range\")\nops.NotDifferentiable(\"LinSpace\")\n\n\[email protected](\"Complex\")\ndef _ComplexGrad(op, grad):\n \"\"\"Returns the real and imaginary components of 'grad', respectively.\"\"\"\n x = op.inputs[0]\n y = op.inputs[1]\n sx = array_ops.shape(x)\n sy = array_ops.shape(y)\n rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)\n return (array_ops.reshape(math_ops.reduce_sum(math_ops.real(grad), rx), sx),\n array_ops.reshape(math_ops.reduce_sum(math_ops.imag(grad), ry), sy))\n\n\[email protected](\"Real\")\ndef _RealGrad(_, grad):\n \"\"\"Returns 'grad' as the real part and set the imaginary part 0.\"\"\"\n zero = constant_op.constant(0, dtype=grad.dtype)\n return math_ops.complex(grad, zero)\n\n\[email protected](\"Imag\")\ndef _ImagGrad(_, grad):\n \"\"\"Returns 'grad' as the imaginary part and set the real part 0.\"\"\"\n zero = constant_op.constant(0, dtype=grad.dtype)\n return math_ops.complex(zero, grad)\n\n\[email protected](\"Angle\")\ndef _AngleGrad(op, grad):\n \"\"\"Returns -grad / (Im(x) + iRe(x))\"\"\"\n x = op.inputs[0]\n with ops.control_dependencies([grad]):\n re = math_ops.real(x)\n im = math_ops.imag(x)\n z = math_ops.reciprocal(math_ops.complex(im, re))\n zero = constant_op.constant(0, dtype=grad.dtype)\n complex_grad = math_ops.complex(grad, zero)\n return -complex_grad * z\n\n\[email protected](\"Conj\")\ndef _ConjGrad(_, grad):\n \"\"\"Returns the complex conjugate of grad.\"\"\"\n return math_ops.conj(grad)\n\n\[email protected](\"ComplexAbs\")\ndef _ComplexAbsGrad(op, grad):\n \"\"\"Returns the gradient of ComplexAbs.\"\"\"\n return math_ops.div_no_nan(\n math_ops.complex(\n grad, array_ops.zeros_like(grad)) * op.inputs[0],\n math_ops.complex(\n op.outputs[0], array_ops.zeros_like(op.outputs[0])))\n\n\[email protected](\"Cast\")\ndef _CastGrad(op, grad):\n t = [\n dtypes.float16, dtypes.float32, dtypes.float64, dtypes.bfloat16,\n dtypes.complex64, dtypes.complex128\n ]\n src_type = op.inputs[0].dtype.base_dtype\n dst_type = grad.dtype.base_dtype\n if src_type in t and dst_type in t:\n return math_ops.cast(grad, src_type)\n else:\n return None\n\n\[email protected](\"Cross\")\ndef _CrossGrad(op, grad):\n u = op.inputs[0]\n v = op.inputs[1]\n return (math_ops.cross(v, grad), math_ops.cross(grad, u))\n\n\[email protected](\"Cumsum\")\ndef _CumsumGrad(op, grad):\n axis = op.inputs[1]\n exclusive = op.get_attr(\"exclusive\")\n reverse = op.get_attr(\"reverse\")\n return [\n math_ops.cumsum(grad, axis, exclusive=exclusive, reverse=not reverse),\n None\n ]\n\n\[email protected](\"Cumprod\")\ndef _CumprodGrad(op, grad):\n x = op.inputs[0]\n axis = op.inputs[1]\n exclusive = op.get_attr(\"exclusive\")\n reverse = op.get_attr(\"reverse\")\n\n # TODO This fails when x contains 0 and should be fixed\n prod = math_ops.cumprod(x, axis, exclusive=exclusive, reverse=reverse)\n out = math_ops.cumsum(\n prod * grad, axis, exclusive=exclusive, reverse=not reverse)\n return [out / x, None]\n\n\[email protected](\"CumulativeLogsumexp\")\ndef _CumulativeLogsumexpGrad(op, grad):\n x = op.inputs[0]\n axis = op.inputs[1]\n cumulative_logsumexp = op.outputs[0]\n\n exclusive = op.get_attr(\"exclusive\")\n reverse = op.get_attr(\"reverse\")\n\n # Split the incoming gradient into positive and negative part\n # in order to take logs. This is required for stable results.\n log_grad_positive = array_ops.where_v2(\n math_ops.greater(grad, 0),\n math_ops.log(grad),\n grad.dtype.min)\n\n log_grad_negative = array_ops.where_v2(\n math_ops.less(grad, 0),\n math_ops.log(-grad),\n grad.dtype.min)\n\n output_pos = math_ops.exp(\n math_ops.cumulative_logsumexp(\n log_grad_positive - cumulative_logsumexp,\n axis=axis, reverse=not reverse, exclusive=exclusive) + x)\n\n output_neg = math_ops.exp(\n math_ops.cumulative_logsumexp(\n log_grad_negative - cumulative_logsumexp,\n axis=axis, reverse=not reverse, exclusive=exclusive) + x)\n\n return [output_pos - output_neg, None]\n\n\[email protected](\"NextAfter\")\ndef _NextAfterGrad(op, grad):\n \"\"\"Returns gradient of nextafter(x1, x2) with respect to x1 and x2.\"\"\"\n x1 = op.inputs[0]\n x2 = op.inputs[1]\n s_x1 = array_ops.shape(x1)\n s_x2 = array_ops.shape(x2)\n r_x1, r_x2 = gen_array_ops.broadcast_gradient_args(s_x1, s_x2)\n with ops.control_dependencies([grad]):\n partial_x1 = array_ops.ones(s_x1, dtype=x1.dtype)\n partial_x2 = array_ops.zeros(s_x2, dtype=x2.dtype)\n return (array_ops.reshape(\n math_ops.reduce_sum(partial_x1 * grad, r_x1), s_x1),\n array_ops.reshape(\n math_ops.reduce_sum(partial_x2 * grad, r_x2), s_x2))\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests of the Analyzer CLI Backend.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport tempfile\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.debug.cli import analyzer_cli\nfrom tensorflow.python.debug.cli import cli_config\nfrom tensorflow.python.debug.cli import cli_shared\nfrom tensorflow.python.debug.cli import cli_test_utils\nfrom tensorflow.python.debug.cli import command_parser\nfrom tensorflow.python.debug.cli import debugger_cli_common\nfrom tensorflow.python.debug.lib import debug_data\nfrom tensorflow.python.debug.lib import debug_utils\nfrom tensorflow.python.debug.lib import source_utils\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import tf_inspect\n\n\n# Helper function to accommodate MKL-enabled TensorFlow:\n# MatMul op is supported by MKL and its name is prefixed with \"_Mkl\" during the\n# MKL graph rewrite pass.\ndef _matmul_op_name():\n return \"_MklMatMul\" if test_util.IsMklEnabled() else \"MatMul\"\n\n\ndef _cli_config_from_temp_file():\n return cli_config.CLIConfig(\n config_file_path=os.path.join(tempfile.mkdtemp(), \".tfdbg_config\"))\n\n\ndef no_rewrite_session_config():\n rewriter_config = rewriter_config_pb2.RewriterConfig(\n disable_model_pruning=True,\n constant_folding=rewriter_config_pb2.RewriterConfig.OFF,\n arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,\n dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF,\n pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF)\n\n graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)\n return config_pb2.ConfigProto(graph_options=graph_options)\n\n\ndef line_number_above():\n return tf_inspect.stack()[1][2] - 1\n\n\ndef parse_op_and_node(line):\n \"\"\"Parse a line containing an op node followed by a node name.\n\n For example, if the line is\n \" [Variable] hidden/weights\",\n this function will return (\"Variable\", \"hidden/weights\")\n\n Args:\n line: The line to be parsed, as a str.\n\n Returns:\n Name of the parsed op type.\n Name of the parsed node.\n \"\"\"\n\n op_type = line.strip().split(\" \")[0].replace(\"[\", \"\").replace(\"]\", \"\")\n\n # Not using [-1], to tolerate any other items that might be present behind\n # the node name.\n node_name = line.strip().split(\" \")[1]\n\n return op_type, node_name\n\n\ndef assert_column_header_command_shortcut(tst,\n command,\n reverse,\n node_name_regex,\n op_type_regex,\n tensor_filter_name):\n tst.assertFalse(reverse and \"-r\" in command)\n tst.assertFalse(not(op_type_regex) and (\"-t %s\" % op_type_regex) in command)\n tst.assertFalse(\n not(node_name_regex) and (\"-t %s\" % node_name_regex) in command)\n tst.assertFalse(\n not(tensor_filter_name) and (\"-t %s\" % tensor_filter_name) in command)\n\n\ndef assert_listed_tensors(tst,\n out,\n expected_tensor_names,\n expected_op_types,\n node_name_regex=None,\n op_type_regex=None,\n tensor_filter_name=None,\n sort_by=\"timestamp\",\n reverse=False):\n \"\"\"Check RichTextLines output for list_tensors commands.\n\n Args:\n tst: A test_util.TensorFlowTestCase instance.\n out: The RichTextLines object to be checked.\n expected_tensor_names: (list of str) Expected tensor names in the list.\n expected_op_types: (list of str) Expected op types of the tensors, in the\n same order as the expected_tensor_names.\n node_name_regex: Optional: node name regex filter.\n op_type_regex: Optional: op type regex filter.\n tensor_filter_name: Optional: name of the tensor filter.\n sort_by: (str) (timestamp | op_type | tensor_name) the field by which the\n tensors in the list are sorted.\n reverse: (bool) whether the sorting is in reverse (i.e., descending) order.\n \"\"\"\n\n line_iter = iter(out.lines)\n attr_segs = out.font_attr_segs\n line_counter = 0\n\n num_dumped_tensors = int(next(line_iter).split(\" \")[0])\n line_counter += 1\n tst.assertGreaterEqual(num_dumped_tensors, len(expected_tensor_names))\n\n if op_type_regex is not None:\n tst.assertEqual(\"Op type regex filter: \\\"%s\\\"\" % op_type_regex,\n next(line_iter))\n line_counter += 1\n\n if node_name_regex is not None:\n tst.assertEqual(\"Node name regex filter: \\\"%s\\\"\" % node_name_regex,\n next(line_iter))\n line_counter += 1\n\n tst.assertEqual(\"\", next(line_iter))\n line_counter += 1\n\n # Verify the column heads \"t (ms)\", \"Op type\" and \"Tensor name\" are present.\n line = next(line_iter)\n tst.assertIn(\"t (ms)\", line)\n tst.assertIn(\"Op type\", line)\n tst.assertIn(\"Tensor name\", line)\n\n # Verify the command shortcuts in the top row.\n attr_segs = out.font_attr_segs[line_counter]\n attr_seg = attr_segs[0]\n tst.assertEqual(0, attr_seg[0])\n tst.assertEqual(len(\"t (ms)\"), attr_seg[1])\n command = attr_seg[2][0].content\n tst.assertIn(\"-s timestamp\", command)\n assert_column_header_command_shortcut(\n tst, command, reverse, node_name_regex, op_type_regex,\n tensor_filter_name)\n tst.assertEqual(\"bold\", attr_seg[2][1])\n\n idx0 = line.index(\"Size\")\n attr_seg = attr_segs[1]\n tst.assertEqual(idx0, attr_seg[0])\n tst.assertEqual(idx0 + len(\"Size (B)\"), attr_seg[1])\n command = attr_seg[2][0].content\n tst.assertIn(\"-s dump_size\", command)\n assert_column_header_command_shortcut(tst, command, reverse, node_name_regex,\n op_type_regex, tensor_filter_name)\n tst.assertEqual(\"bold\", attr_seg[2][1])\n\n idx0 = line.index(\"Op type\")\n attr_seg = attr_segs[2]\n tst.assertEqual(idx0, attr_seg[0])\n tst.assertEqual(idx0 + len(\"Op type\"), attr_seg[1])\n command = attr_seg[2][0].content\n tst.assertIn(\"-s op_type\", command)\n assert_column_header_command_shortcut(\n tst, command, reverse, node_name_regex, op_type_regex,\n tensor_filter_name)\n tst.assertEqual(\"bold\", attr_seg[2][1])\n\n idx0 = line.index(\"Tensor name\")\n attr_seg = attr_segs[3]\n tst.assertEqual(idx0, attr_seg[0])\n tst.assertEqual(idx0 + len(\"Tensor name\"), attr_seg[1])\n command = attr_seg[2][0].content\n tst.assertIn(\"-s tensor_name\", command)\n assert_column_header_command_shortcut(\n tst, command, reverse, node_name_regex, op_type_regex,\n tensor_filter_name)\n tst.assertEqual(\"bold\", attr_seg[2][1])\n\n # Verify the listed tensors and their timestamps.\n tensor_timestamps = []\n dump_sizes_bytes = []\n op_types = []\n tensor_names = []\n for line in line_iter:\n items = line.split(\" \")\n items = [item for item in items if item]\n\n rel_time = float(items[0][1:-1])\n tst.assertGreaterEqual(rel_time, 0.0)\n\n tensor_timestamps.append(rel_time)\n dump_sizes_bytes.append(command_parser.parse_readable_size_str(items[1]))\n op_types.append(items[2])\n tensor_names.append(items[3])\n\n # Verify that the tensors should be listed in ascending order of their\n # timestamps.\n if sort_by == \"timestamp\":\n sorted_timestamps = sorted(tensor_timestamps)\n if reverse:\n sorted_timestamps.reverse()\n tst.assertEqual(sorted_timestamps, tensor_timestamps)\n elif sort_by == \"dump_size\":\n sorted_dump_sizes_bytes = sorted(dump_sizes_bytes)\n if reverse:\n sorted_dump_sizes_bytes.reverse()\n tst.assertEqual(sorted_dump_sizes_bytes, dump_sizes_bytes)\n elif sort_by == \"op_type\":\n sorted_op_types = sorted(op_types)\n if reverse:\n sorted_op_types.reverse()\n tst.assertEqual(sorted_op_types, op_types)\n elif sort_by == \"tensor_name\":\n sorted_tensor_names = sorted(tensor_names)\n if reverse:\n sorted_tensor_names.reverse()\n tst.assertEqual(sorted_tensor_names, tensor_names)\n else:\n tst.fail(\"Invalid value in sort_by: %s\" % sort_by)\n\n # Verify that the tensors are all listed.\n for tensor_name, op_type in zip(expected_tensor_names, expected_op_types):\n tst.assertIn(tensor_name, tensor_names)\n index = tensor_names.index(tensor_name)\n tst.assertEqual(op_type, op_types[index])\n\n\ndef assert_node_attribute_lines(tst,\n out,\n node_name,\n op_type,\n device,\n input_op_type_node_name_pairs,\n ctrl_input_op_type_node_name_pairs,\n recipient_op_type_node_name_pairs,\n ctrl_recipient_op_type_node_name_pairs,\n attr_key_val_pairs=None,\n num_dumped_tensors=None,\n show_stack_trace=False,\n stack_trace_available=False):\n \"\"\"Check RichTextLines output for node_info commands.\n\n Args:\n tst: A test_util.TensorFlowTestCase instance.\n out: The RichTextLines object to be checked.\n node_name: Name of the node.\n op_type: Op type of the node, as a str.\n device: Name of the device on which the node resides.\n input_op_type_node_name_pairs: A list of 2-tuples of op type and node name,\n for the (non-control) inputs to the node.\n ctrl_input_op_type_node_name_pairs: A list of 2-tuples of op type and node\n name, for the control inputs to the node.\n recipient_op_type_node_name_pairs: A list of 2-tuples of op type and node\n name, for the (non-control) output recipients to the node.\n ctrl_recipient_op_type_node_name_pairs: A list of 2-tuples of op type and\n node name, for the control output recipients to the node.\n attr_key_val_pairs: Optional: attribute key-value pairs of the node, as a\n list of 2-tuples.\n num_dumped_tensors: Optional: number of tensor dumps from the node.\n show_stack_trace: (bool) whether the stack trace of the node's\n construction is asserted to be present.\n stack_trace_available: (bool) whether Python stack trace is available.\n \"\"\"\n\n line_iter = iter(out.lines)\n\n tst.assertEqual(\"Node %s\" % node_name, next(line_iter))\n tst.assertEqual(\"\", next(line_iter))\n tst.assertEqual(\" Op: %s\" % op_type, next(line_iter))\n tst.assertEqual(\" Device: %s\" % device, next(line_iter))\n tst.assertEqual(\"\", next(line_iter))\n tst.assertEqual(\" %d input(s) + %d control input(s):\" %\n (len(input_op_type_node_name_pairs),\n len(ctrl_input_op_type_node_name_pairs)), next(line_iter))\n\n # Check inputs.\n tst.assertEqual(\" %d input(s):\" % len(input_op_type_node_name_pairs),\n next(line_iter))\n for op_type, node_name in input_op_type_node_name_pairs:\n tst.assertEqual(\" [%s] %s\" % (op_type, node_name), next(line_iter))\n\n tst.assertEqual(\"\", next(line_iter))\n\n # Check control inputs.\n if ctrl_input_op_type_node_name_pairs:\n tst.assertEqual(\" %d control input(s):\" %\n len(ctrl_input_op_type_node_name_pairs), next(line_iter))\n for op_type, node_name in ctrl_input_op_type_node_name_pairs:\n tst.assertEqual(\" [%s] %s\" % (op_type, node_name), next(line_iter))\n\n tst.assertEqual(\"\", next(line_iter))\n\n tst.assertEqual(\" %d recipient(s) + %d control recipient(s):\" %\n (len(recipient_op_type_node_name_pairs),\n len(ctrl_recipient_op_type_node_name_pairs)),\n next(line_iter))\n\n # Check recipients, the order of which is not deterministic.\n tst.assertEqual(\" %d recipient(s):\" %\n len(recipient_op_type_node_name_pairs), next(line_iter))\n\n t_recs = []\n for _ in recipient_op_type_node_name_pairs:\n line = next(line_iter)\n\n op_type, node_name = parse_op_and_node(line)\n t_recs.append((op_type, node_name))\n\n tst.assertItemsEqual(recipient_op_type_node_name_pairs, t_recs)\n\n # Check control recipients, the order of which is not deterministic.\n if ctrl_recipient_op_type_node_name_pairs:\n tst.assertEqual(\"\", next(line_iter))\n\n tst.assertEqual(\" %d control recipient(s):\" %\n len(ctrl_recipient_op_type_node_name_pairs),\n next(line_iter))\n\n t_ctrl_recs = []\n for _ in ctrl_recipient_op_type_node_name_pairs:\n line = next(line_iter)\n\n op_type, node_name = parse_op_and_node(line)\n t_ctrl_recs.append((op_type, node_name))\n\n tst.assertItemsEqual(ctrl_recipient_op_type_node_name_pairs, t_ctrl_recs)\n\n # The order of multiple attributes can be non-deterministic.\n if attr_key_val_pairs:\n tst.assertEqual(\"\", next(line_iter))\n\n tst.assertEqual(\"Node attributes:\", next(line_iter))\n\n kv_pairs = []\n for key, val in attr_key_val_pairs:\n key = next(line_iter).strip().replace(\":\", \"\")\n\n val = next(line_iter).strip()\n\n kv_pairs.append((key, val))\n\n tst.assertEqual(\"\", next(line_iter))\n\n tst.assertItemsEqual(attr_key_val_pairs, kv_pairs)\n\n if num_dumped_tensors is not None:\n tst.assertEqual(\"%d dumped tensor(s):\" % num_dumped_tensors,\n next(line_iter))\n tst.assertEqual(\"\", next(line_iter))\n\n dump_timestamps_ms = []\n for _ in xrange(num_dumped_tensors):\n line = next(line_iter)\n\n tst.assertStartsWith(line.strip(), \"Slot 0 @ DebugIdentity @\")\n tst.assertTrue(line.strip().endswith(\" ms\"))\n\n dump_timestamp_ms = float(line.strip().split(\" @ \")[-1].replace(\"ms\", \"\"))\n tst.assertGreaterEqual(dump_timestamp_ms, 0.0)\n\n dump_timestamps_ms.append(dump_timestamp_ms)\n\n tst.assertEqual(sorted(dump_timestamps_ms), dump_timestamps_ms)\n\n if show_stack_trace:\n tst.assertEqual(\"\", next(line_iter))\n tst.assertEqual(\"\", next(line_iter))\n tst.assertEqual(\"Traceback of node construction:\", next(line_iter))\n if stack_trace_available:\n try:\n depth_counter = 0\n while True:\n for i in range(5):\n line = next(line_iter)\n if i == 0:\n tst.assertEqual(depth_counter, int(line.split(\":\")[0]))\n elif i == 1:\n tst.assertStartsWith(line, \" Line:\")\n elif i == 2:\n tst.assertStartsWith(line, \" Function:\")\n elif i == 3:\n tst.assertStartsWith(line, \" Text:\")\n elif i == 4:\n tst.assertEqual(\"\", line)\n\n depth_counter += 1\n except StopIteration:\n tst.assertEqual(0, i)\n else:\n tst.assertEqual(\"(Unavailable because no Python graph has been loaded)\",\n next(line_iter))\n\n\ndef check_syntax_error_output(tst, out, command_prefix):\n \"\"\"Check RichTextLines output for valid command prefix but invalid syntax.\"\"\"\n\n tst.assertEqual([\n \"Syntax error for command: %s\" % command_prefix,\n \"For help, do \\\"help %s\\\"\" % command_prefix\n ], out.lines)\n\n\ndef check_error_output(tst, out, command_prefix, args):\n \"\"\"Check RichTextLines output from invalid/erroneous commands.\n\n Args:\n tst: A test_util.TensorFlowTestCase instance.\n out: The RichTextLines object to be checked.\n command_prefix: The command prefix of the command that caused the error.\n args: The arguments (excluding prefix) of the command that caused the error.\n \"\"\"\n\n tst.assertGreater(len(out.lines), 2)\n tst.assertStartsWith(out.lines[0],\n \"Error occurred during handling of command: %s %s\" %\n (command_prefix, \" \".join(args)))\n\n\ndef check_main_menu(tst,\n out,\n list_tensors_enabled=False,\n node_info_node_name=None,\n print_tensor_node_name=None,\n list_inputs_node_name=None,\n list_outputs_node_name=None):\n \"\"\"Check the main menu annotation of an output.\"\"\"\n\n tst.assertIn(debugger_cli_common.MAIN_MENU_KEY, out.annotations)\n\n menu = out.annotations[debugger_cli_common.MAIN_MENU_KEY]\n tst.assertEqual(list_tensors_enabled,\n menu.caption_to_item(\"list_tensors\").is_enabled())\n\n menu_item = menu.caption_to_item(\"node_info\")\n if node_info_node_name:\n tst.assertTrue(menu_item.is_enabled())\n tst.assertTrue(menu_item.content.endswith(node_info_node_name))\n else:\n tst.assertFalse(menu_item.is_enabled())\n\n menu_item = menu.caption_to_item(\"print_tensor\")\n if print_tensor_node_name:\n tst.assertTrue(menu_item.is_enabled())\n tst.assertTrue(menu_item.content.endswith(print_tensor_node_name))\n else:\n tst.assertFalse(menu_item.is_enabled())\n\n menu_item = menu.caption_to_item(\"list_inputs\")\n if list_inputs_node_name:\n tst.assertTrue(menu_item.is_enabled())\n tst.assertTrue(menu_item.content.endswith(list_inputs_node_name))\n else:\n tst.assertFalse(menu_item.is_enabled())\n\n menu_item = menu.caption_to_item(\"list_outputs\")\n if list_outputs_node_name:\n tst.assertTrue(menu_item.is_enabled())\n tst.assertTrue(menu_item.content.endswith(list_outputs_node_name))\n else:\n tst.assertFalse(menu_item.is_enabled())\n\n tst.assertTrue(menu.caption_to_item(\"run_info\").is_enabled())\n tst.assertTrue(menu.caption_to_item(\"help\").is_enabled())\n\n\ndef check_menu_item(tst, out, line_index, expected_begin, expected_end,\n expected_command):\n attr_segs = out.font_attr_segs[line_index]\n found_menu_item = False\n for begin, end, attribute in attr_segs:\n attributes = [attribute] if not isinstance(attribute, list) else attribute\n menu_item = [attribute for attribute in attributes if\n isinstance(attribute, debugger_cli_common.MenuItem)]\n if menu_item:\n tst.assertEqual(expected_begin, begin)\n tst.assertEqual(expected_end, end)\n tst.assertEqual(expected_command, menu_item[0].content)\n found_menu_item = True\n break\n tst.assertTrue(found_menu_item)\n\n\ndef create_analyzer_cli(dump):\n \"\"\"Create an analyzer CLI.\n\n Args:\n dump: A `DebugDumpDir` object to base the analyzer CLI on.\n\n Returns:\n 1) A `DebugAnalyzer` object created based on `dump`.\n 2) A `CommandHandlerRegistry` that is based on the `DebugAnalyzer` object\n and has the common tfdbg commands, e.g., lt, ni, li, lo, registered.\n \"\"\"\n # Construct the analyzer.\n analyzer = analyzer_cli.DebugAnalyzer(dump, _cli_config_from_temp_file())\n\n # Construct the handler registry.\n registry = debugger_cli_common.CommandHandlerRegistry()\n\n # Register command handlers.\n registry.register_command_handler(\n \"list_tensors\",\n analyzer.list_tensors,\n analyzer.get_help(\"list_tensors\"),\n prefix_aliases=[\"lt\"])\n registry.register_command_handler(\n \"node_info\",\n analyzer.node_info,\n analyzer.get_help(\"node_info\"),\n prefix_aliases=[\"ni\"])\n registry.register_command_handler(\n \"list_inputs\",\n analyzer.list_inputs,\n analyzer.get_help(\"list_inputs\"),\n prefix_aliases=[\"li\"])\n registry.register_command_handler(\n \"list_outputs\",\n analyzer.list_outputs,\n analyzer.get_help(\"list_outputs\"),\n prefix_aliases=[\"lo\"])\n registry.register_command_handler(\n \"print_tensor\",\n analyzer.print_tensor,\n analyzer.get_help(\"print_tensor\"),\n prefix_aliases=[\"pt\"])\n registry.register_command_handler(\n \"print_source\",\n analyzer.print_source,\n analyzer.get_help(\"print_source\"),\n prefix_aliases=[\"ps\"])\n registry.register_command_handler(\n \"list_source\",\n analyzer.list_source,\n analyzer.get_help(\"list_source\"),\n prefix_aliases=[\"ls\"])\n registry.register_command_handler(\n \"eval\",\n analyzer.evaluate_expression,\n analyzer.get_help(\"eval\"),\n prefix_aliases=[\"ev\"])\n\n return analyzer, registry\n\n\n@test_util.run_v1_only(\"b/120545219\")\nclass AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):\n\n @classmethod\n def setUpClass(cls):\n cls._dump_root = tempfile.mkdtemp()\n cls._dump_root_for_unique = tempfile.mkdtemp()\n\n cls._is_gpu_available = test.is_gpu_available()\n if cls._is_gpu_available:\n gpu_name = test_util.gpu_device_name()\n cls._main_device = \"/job:localhost/replica:0/task:0\" + gpu_name\n else:\n cls._main_device = \"/job:localhost/replica:0/task:0/device:CPU:0\"\n\n cls._curr_file_path = os.path.abspath(\n tf_inspect.getfile(tf_inspect.currentframe()))\n\n cls._sess = session.Session(config=no_rewrite_session_config())\n with cls._sess as sess:\n u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])\n v_init_val = np.array([[2.0], [-1.0]])\n\n u_name = \"simple_mul_add/u\"\n v_name = \"simple_mul_add/v\"\n\n u_init = constant_op.constant(u_init_val, shape=[2, 2], name=\"u_init\")\n u = variables.VariableV1(u_init, name=u_name)\n cls._u_line_number = line_number_above()\n\n v_init = constant_op.constant(v_init_val, shape=[2, 1], name=\"v_init\")\n v = variables.VariableV1(v_init, name=v_name)\n cls._v_line_number = line_number_above()\n\n w = math_ops.matmul(u, v, name=\"simple_mul_add/matmul\")\n cls._w_line_number = line_number_above()\n\n x = math_ops.add(w, w, name=\"simple_mul_add/add\")\n cls._x_line_number = line_number_above()\n\n a = variables.VariableV1([1, 3, 3, 7], name=\"a\")\n\n u.initializer.run()\n v.initializer.run()\n a.initializer.run()\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=\"file://%s\" % cls._dump_root)\n\n # Invoke Session.run().\n run_metadata = config_pb2.RunMetadata()\n sess.run([x], options=run_options, run_metadata=run_metadata)\n cls._debug_dump = debug_data.DebugDumpDir(\n cls._dump_root, partition_graphs=run_metadata.partition_graphs)\n cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)\n\n @classmethod\n def tearDownClass(cls):\n # Tear down temporary dump directory.\n shutil.rmtree(cls._dump_root)\n shutil.rmtree(cls._dump_root_for_unique)\n\n def testMeasureTensorListColumnWidthsGivesRightAnswerForEmptyData(self):\n timestamp_col_width, dump_size_col_width, op_type_col_width = (\n self._analyzer._measure_tensor_list_column_widths([]))\n self.assertEqual(len(\"t (ms)\") + 1, timestamp_col_width)\n self.assertEqual(len(\"Size (B)\") + 1, dump_size_col_width)\n self.assertEqual(len(\"Op type\") + 1, op_type_col_width)\n\n def testMeasureTensorListColumnWidthsGivesRightAnswerForData(self):\n dump = self._debug_dump.dumped_tensor_data[0]\n self.assertLess(dump.dump_size_bytes, 1000)\n self.assertEqual(\n \"VariableV2\", self._debug_dump.node_op_type(dump.node_name))\n _, dump_size_col_width, op_type_col_width = (\n self._analyzer._measure_tensor_list_column_widths([dump]))\n # The length of str(dump.dump_size_bytes) is less than the length of\n # \"Size (B)\" (8). So the column width should be determined by the length of\n # \"Size (B)\".\n self.assertEqual(len(\"Size (B)\") + 1, dump_size_col_width)\n # The length of \"VariableV2\" is greater than the length of \"Op type\". So the\n # column should be determined by the length of \"VariableV2\".\n self.assertEqual(len(\"VariableV2\") + 1, op_type_col_width)\n\n def testListTensors(self):\n # Use shorthand alias for the command prefix.\n out = self._registry.dispatch_command(\"lt\", [])\n\n assert_listed_tensors(self, out, [\n \"simple_mul_add/u:0\", \"simple_mul_add/v:0\", \"simple_mul_add/u/read:0\",\n \"simple_mul_add/v/read:0\", \"simple_mul_add/matmul:0\",\n \"simple_mul_add/add:0\"\n ], [\n \"VariableV2\", \"VariableV2\", \"Identity\", \"Identity\",\n _matmul_op_name(), \"Add\"\n ])\n\n # Check the main menu.\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorsInReverseTimeOrderWorks(self):\n # Use shorthand alias for the command prefix.\n out = self._registry.dispatch_command(\"lt\", [\"-s\", \"timestamp\", \"-r\"])\n assert_listed_tensors(\n self,\n out, [\n \"simple_mul_add/u:0\", \"simple_mul_add/v:0\",\n \"simple_mul_add/u/read:0\", \"simple_mul_add/v/read:0\",\n \"simple_mul_add/matmul:0\", \"simple_mul_add/add:0\"\n ], [\n \"VariableV2\", \"VariableV2\", \"Identity\", \"Identity\",\n _matmul_op_name(), \"Add\"\n ],\n sort_by=\"timestamp\",\n reverse=True)\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorsInDumpSizeOrderWorks(self):\n out = self._registry.dispatch_command(\"lt\", [\"-s\", \"dump_size\"])\n assert_listed_tensors(\n self,\n out, [\n \"simple_mul_add/u:0\", \"simple_mul_add/v:0\",\n \"simple_mul_add/u/read:0\", \"simple_mul_add/v/read:0\",\n \"simple_mul_add/matmul:0\", \"simple_mul_add/add:0\"\n ], [\n \"VariableV2\", \"VariableV2\", \"Identity\", \"Identity\",\n _matmul_op_name(), \"Add\"\n ],\n sort_by=\"dump_size\")\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorsInReverseDumpSizeOrderWorks(self):\n out = self._registry.dispatch_command(\"lt\", [\"-s\", \"dump_size\", \"-r\"])\n assert_listed_tensors(\n self,\n out, [\n \"simple_mul_add/u:0\", \"simple_mul_add/v:0\",\n \"simple_mul_add/u/read:0\", \"simple_mul_add/v/read:0\",\n \"simple_mul_add/matmul:0\", \"simple_mul_add/add:0\"\n ], [\n \"VariableV2\", \"VariableV2\", \"Identity\", \"Identity\",\n _matmul_op_name(), \"Add\"\n ],\n sort_by=\"dump_size\",\n reverse=True)\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorsWithInvalidSortByFieldGivesError(self):\n out = self._registry.dispatch_command(\"lt\", [\"-s\", \"foobar\"])\n self.assertIn(\"ValueError: Unsupported key to sort tensors by: foobar\",\n out.lines)\n\n def testListTensorsInOpTypeOrderWorks(self):\n # Use shorthand alias for the command prefix.\n out = self._registry.dispatch_command(\"lt\", [\"-s\", \"op_type\"])\n assert_listed_tensors(\n self,\n out, [\n \"simple_mul_add/u:0\", \"simple_mul_add/v:0\",\n \"simple_mul_add/u/read:0\", \"simple_mul_add/v/read:0\",\n \"simple_mul_add/matmul:0\", \"simple_mul_add/add:0\"\n ], [\n \"VariableV2\", \"VariableV2\", \"Identity\", \"Identity\",\n _matmul_op_name(), \"Add\"\n ],\n sort_by=\"op_type\",\n reverse=False)\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorsInReverseOpTypeOrderWorks(self):\n # Use shorthand alias for the command prefix.\n out = self._registry.dispatch_command(\"lt\", [\"-s\", \"op_type\", \"-r\"])\n assert_listed_tensors(\n self,\n out, [\n \"simple_mul_add/u:0\", \"simple_mul_add/v:0\",\n \"simple_mul_add/u/read:0\", \"simple_mul_add/v/read:0\",\n \"simple_mul_add/matmul:0\", \"simple_mul_add/add:0\"\n ], [\n \"VariableV2\", \"VariableV2\", \"Identity\", \"Identity\",\n _matmul_op_name(), \"Add\"\n ],\n sort_by=\"op_type\",\n reverse=True)\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorsInTensorNameOrderWorks(self):\n # Use shorthand alias for the command prefix.\n out = self._registry.dispatch_command(\"lt\", [\"-s\", \"tensor_name\"])\n assert_listed_tensors(\n self,\n out, [\n \"simple_mul_add/u:0\", \"simple_mul_add/v:0\",\n \"simple_mul_add/u/read:0\", \"simple_mul_add/v/read:0\",\n \"simple_mul_add/matmul:0\", \"simple_mul_add/add:0\"\n ], [\n \"VariableV2\", \"VariableV2\", \"Identity\", \"Identity\",\n _matmul_op_name(), \"Add\"\n ],\n sort_by=\"tensor_name\",\n reverse=False)\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorsInReverseTensorNameOrderWorks(self):\n # Use shorthand alias for the command prefix.\n out = self._registry.dispatch_command(\"lt\", [\"-s\", \"tensor_name\", \"-r\"])\n assert_listed_tensors(\n self,\n out, [\n \"simple_mul_add/u:0\", \"simple_mul_add/v:0\",\n \"simple_mul_add/u/read:0\", \"simple_mul_add/v/read:0\",\n \"simple_mul_add/matmul:0\", \"simple_mul_add/add:0\"\n ], [\n \"VariableV2\", \"VariableV2\", \"Identity\", \"Identity\",\n _matmul_op_name(), \"Add\"\n ],\n sort_by=\"tensor_name\",\n reverse=True)\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorsFilterByNodeNameRegex(self):\n out = self._registry.dispatch_command(\"list_tensors\",\n [\"--node_name_filter\", \".*read.*\"])\n assert_listed_tensors(\n self,\n out, [\"simple_mul_add/u/read:0\", \"simple_mul_add/v/read:0\"],\n [\"Identity\", \"Identity\"],\n node_name_regex=\".*read.*\")\n\n out = self._registry.dispatch_command(\"list_tensors\", [\"-n\", \"^read\"])\n assert_listed_tensors(self, out, [], [], node_name_regex=\"^read\")\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorFilterByOpTypeRegex(self):\n out = self._registry.dispatch_command(\"list_tensors\",\n [\"--op_type_filter\", \"Identity\"])\n assert_listed_tensors(\n self,\n out, [\"simple_mul_add/u/read:0\", \"simple_mul_add/v/read:0\"],\n [\"Identity\", \"Identity\"],\n op_type_regex=\"Identity\")\n\n out = self._registry.dispatch_command(\n \"list_tensors\", [\"-t\", \"(Add|\" + _matmul_op_name() + \")\"])\n assert_listed_tensors(\n self,\n out, [\"simple_mul_add/add:0\", \"simple_mul_add/matmul:0\"],\n [\"Add\", _matmul_op_name()],\n op_type_regex=(\"(Add|\" + _matmul_op_name() + \")\"))\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorFilterByNodeNameRegexAndOpTypeRegex(self):\n out = self._registry.dispatch_command(\n \"list_tensors\", [\"-t\", \"(Add|MatMul)\", \"-n\", \".*add$\"])\n assert_listed_tensors(\n self,\n out, [\"simple_mul_add/add:0\"], [\"Add\"],\n node_name_regex=\".*add$\",\n op_type_regex=\"(Add|MatMul)\")\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorWithFilterAndNodeNameExclusionWorks(self):\n # First, create and register the filter.\n def is_2x1_vector(datum, tensor):\n del datum # Unused.\n return list(tensor.shape) == [2, 1]\n self._analyzer.add_tensor_filter(\"is_2x1_vector\", is_2x1_vector)\n\n # Use shorthand alias for the command prefix.\n out = self._registry.dispatch_command(\n \"lt\", [\"-f\", \"is_2x1_vector\", \"--filter_exclude_node_names\", \".*v.*\"])\n\n # If the --filter_exclude_node_names were not used, then the matching\n # tensors would be:\n # - simple_mul_add/v:0\n # - simple_mul_add/v/read:0\n # - simple_mul_add/matmul:0\n # - simple_mul_add/add:0\n #\n # With the --filter_exclude_node_names option, only the last two should\n # show up in the result.\n assert_listed_tensors(\n self,\n out, [\"simple_mul_add/matmul:0\", \"simple_mul_add/add:0\"],\n [_matmul_op_name(), \"Add\"],\n tensor_filter_name=\"is_2x1_vector\")\n\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorsFilterNanOrInf(self):\n \"\"\"Test register and invoke a tensor filter.\"\"\"\n\n # First, register the filter.\n self._analyzer.add_tensor_filter(\"has_inf_or_nan\",\n debug_data.has_inf_or_nan)\n\n # Use shorthand alias for the command prefix.\n out = self._registry.dispatch_command(\"lt\", [\"-f\", \"has_inf_or_nan\"])\n\n # This TF graph run did not generate any bad numerical values.\n assert_listed_tensors(\n self, out, [], [], tensor_filter_name=\"has_inf_or_nan\")\n # TODO(cais): A test with some actual bad numerical values.\n\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorNonexistentFilter(self):\n \"\"\"Test attempt to use a nonexistent tensor filter.\"\"\"\n\n out = self._registry.dispatch_command(\"lt\", [\"-f\", \"foo_filter\"])\n\n self.assertEqual([\"ERROR: There is no tensor filter named \\\"foo_filter\\\".\"],\n out.lines)\n check_main_menu(self, out, list_tensors_enabled=False)\n\n def testListTensorsInvalidOptions(self):\n out = self._registry.dispatch_command(\"list_tensors\", [\"--bar\"])\n check_syntax_error_output(self, out, \"list_tensors\")\n\n def testNodeInfoByNodeName(self):\n node_name = \"simple_mul_add/matmul\"\n out = self._registry.dispatch_command(\"node_info\", [node_name])\n\n recipients = [(\"Add\", \"simple_mul_add/add\"), (\"Add\", \"simple_mul_add/add\")]\n\n assert_node_attribute_lines(self, out, node_name, _matmul_op_name(),\n self._main_device,\n [(\"Identity\", \"simple_mul_add/u/read\"),\n (\"Identity\", \"simple_mul_add/v/read\")], [],\n recipients, [])\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n list_inputs_node_name=node_name,\n print_tensor_node_name=node_name,\n list_outputs_node_name=node_name)\n\n # Verify that the node name is bold in the first line.\n self.assertEqual(\n [(len(out.lines[0]) - len(node_name), len(out.lines[0]), \"bold\")],\n out.font_attr_segs[0])\n\n def testNodeInfoShowAttributes(self):\n node_name = \"simple_mul_add/matmul\"\n out = self._registry.dispatch_command(\"node_info\", [\"-a\", node_name])\n\n test_attr_key_val_pairs = [(\"transpose_a\", \"b: false\"),\n (\"transpose_b\", \"b: false\"),\n (\"T\", \"type: DT_DOUBLE\")]\n if test_util.IsMklEnabled():\n test_attr_key_val_pairs.append((\"_kernel\", 's: \"MklNameChangeOp\"'))\n\n assert_node_attribute_lines(\n self,\n out,\n node_name,\n _matmul_op_name(),\n self._main_device, [(\"Identity\", \"simple_mul_add/u/read\"),\n (\"Identity\", \"simple_mul_add/v/read\")], [],\n [(\"Add\", \"simple_mul_add/add\"), (\"Add\", \"simple_mul_add/add\")], [],\n attr_key_val_pairs=test_attr_key_val_pairs)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n list_inputs_node_name=node_name,\n print_tensor_node_name=node_name,\n list_outputs_node_name=node_name)\n\n def testNodeInfoShowDumps(self):\n node_name = \"simple_mul_add/matmul\"\n out = self._registry.dispatch_command(\"node_info\", [\"-d\", node_name])\n\n assert_node_attribute_lines(\n self,\n out,\n node_name,\n _matmul_op_name(),\n self._main_device, [(\"Identity\", \"simple_mul_add/u/read\"),\n (\"Identity\", \"simple_mul_add/v/read\")], [],\n [(\"Add\", \"simple_mul_add/add\"), (\"Add\", \"simple_mul_add/add\")], [],\n num_dumped_tensors=1)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n list_inputs_node_name=node_name,\n print_tensor_node_name=node_name,\n list_outputs_node_name=node_name)\n check_menu_item(self, out, 16,\n len(out.lines[16]) - len(out.lines[16].strip()),\n len(out.lines[16]), \"pt %s:0 -n 0\" % node_name)\n\n def testNodeInfoShowStackTraceUnavailableIsIndicated(self):\n self._debug_dump.set_python_graph(None)\n\n node_name = \"simple_mul_add/matmul\"\n out = self._registry.dispatch_command(\"node_info\", [\"-t\", node_name])\n\n assert_node_attribute_lines(\n self,\n out,\n node_name,\n _matmul_op_name(),\n self._main_device, [(\"Identity\", \"simple_mul_add/u/read\"),\n (\"Identity\", \"simple_mul_add/v/read\")], [],\n [(\"Add\", \"simple_mul_add/add\"), (\"Add\", \"simple_mul_add/add\")], [],\n show_stack_trace=True,\n stack_trace_available=False)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n list_inputs_node_name=node_name,\n print_tensor_node_name=node_name,\n list_outputs_node_name=node_name)\n\n def testNodeInfoShowStackTraceAvailableWorks(self):\n self._debug_dump.set_python_graph(self._sess.graph)\n\n node_name = \"simple_mul_add/matmul\"\n out = self._registry.dispatch_command(\"node_info\", [\"-t\", node_name])\n\n assert_node_attribute_lines(\n self,\n out,\n node_name,\n _matmul_op_name(),\n self._main_device, [(\"Identity\", \"simple_mul_add/u/read\"),\n (\"Identity\", \"simple_mul_add/v/read\")], [],\n [(\"Add\", \"simple_mul_add/add\"), (\"Add\", \"simple_mul_add/add\")], [],\n show_stack_trace=True,\n stack_trace_available=True)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n list_inputs_node_name=node_name,\n print_tensor_node_name=node_name,\n list_outputs_node_name=node_name)\n\n def testNodeInfoByTensorName(self):\n node_name = \"simple_mul_add/u/read\"\n tensor_name = node_name + \":0\"\n out = self._registry.dispatch_command(\"node_info\", [tensor_name])\n\n assert_node_attribute_lines(self, out, node_name, \"Identity\",\n self._main_device,\n [(\"VariableV2\", \"simple_mul_add/u\")], [],\n [(_matmul_op_name(), \"simple_mul_add/matmul\")],\n [])\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n list_inputs_node_name=node_name,\n print_tensor_node_name=node_name,\n list_outputs_node_name=node_name)\n\n def testNodeInfoNonexistentNodeName(self):\n out = self._registry.dispatch_command(\"node_info\", [\"bar\"])\n self.assertEqual(\n [\"ERROR: There is no node named \\\"bar\\\" in the partition graphs\"],\n out.lines)\n # Check color indicating error.\n self.assertEqual({0: [(0, 59, cli_shared.COLOR_RED)]}, out.font_attr_segs)\n check_main_menu(self, out, list_tensors_enabled=True)\n\n def testPrintTensor(self):\n node_name = \"simple_mul_add/matmul\"\n tensor_name = node_name + \":0\"\n out = self._registry.dispatch_command(\n \"print_tensor\", [tensor_name], screen_info={\"cols\": 80})\n\n self.assertEqual([\n \"Tensor \\\"%s:DebugIdentity\\\":\" % tensor_name,\n \" dtype: float64\",\n \" shape: (2, 1)\",\n \"\",\n \"array([[ 7.],\",\n \" [-2.]])\",\n ], out.lines)\n\n self.assertIn(\"tensor_metadata\", out.annotations)\n self.assertIn(4, out.annotations)\n self.assertIn(5, out.annotations)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n node_info_node_name=node_name,\n list_inputs_node_name=node_name,\n list_outputs_node_name=node_name)\n\n def testPrintTensorAndWriteToNpyFile(self):\n node_name = \"simple_mul_add/matmul\"\n tensor_name = node_name + \":0\"\n npy_path = os.path.join(self._dump_root, \"matmul.npy\")\n out = self._registry.dispatch_command(\n \"print_tensor\", [tensor_name, \"-w\", npy_path],\n screen_info={\"cols\": 80})\n\n self.assertEqual([\n \"Tensor \\\"%s:DebugIdentity\\\":\" % tensor_name,\n \" dtype: float64\",\n \" shape: (2, 1)\",\n \"\",\n ], out.lines[:4])\n self.assertTrue(out.lines[4].startswith(\"Saved value to: %s (\" % npy_path))\n # Load the numpy file and verify its contents.\n self.assertAllClose([[7.0], [-2.0]], np.load(npy_path))\n\n def testPrintTensorHighlightingRanges(self):\n node_name = \"simple_mul_add/matmul\"\n tensor_name = node_name + \":0\"\n out = self._registry.dispatch_command(\n \"print_tensor\", [tensor_name, \"--ranges\", \"[-inf, 0.0]\"],\n screen_info={\"cols\": 80})\n\n self.assertEqual([\n \"Tensor \\\"%s:DebugIdentity\\\": \" % tensor_name +\n \"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)\",\n \" dtype: float64\",\n \" shape: (2, 1)\",\n \"\",\n \"array([[ 7.],\",\n \" [-2.]])\",\n ], out.lines)\n\n self.assertIn(\"tensor_metadata\", out.annotations)\n self.assertIn(4, out.annotations)\n self.assertIn(5, out.annotations)\n self.assertEqual([(8, 11, \"bold\")], out.font_attr_segs[5])\n\n out = self._registry.dispatch_command(\n \"print_tensor\", [tensor_name, \"--ranges\", \"[[-inf, -5.5], [5.5, inf]]\"],\n screen_info={\"cols\": 80})\n\n self.assertEqual([\n \"Tensor \\\"%s:DebugIdentity\\\": \" % tensor_name +\n \"Highlighted([[-inf, -5.5], [5.5, inf]]): \"\n \"1 of 2 element(s) (50.00%)\",\n \" dtype: float64\",\n \" shape: (2, 1)\",\n \"\",\n \"array([[ 7.],\",\n \" [-2.]])\",\n ], out.lines)\n\n self.assertIn(\"tensor_metadata\", out.annotations)\n self.assertIn(4, out.annotations)\n self.assertIn(5, out.annotations)\n self.assertEqual([(9, 11, \"bold\")], out.font_attr_segs[4])\n self.assertNotIn(5, out.font_attr_segs)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n node_info_node_name=node_name,\n list_inputs_node_name=node_name,\n list_outputs_node_name=node_name)\n\n def testPrintTensorHighlightingRangesAndIncludingNumericSummary(self):\n node_name = \"simple_mul_add/matmul\"\n tensor_name = node_name + \":0\"\n out = self._registry.dispatch_command(\n \"print_tensor\", [tensor_name, \"--ranges\", \"[-inf, 0.0]\", \"-s\"],\n screen_info={\"cols\": 80})\n\n self.assertEqual([\n \"Tensor \\\"%s:DebugIdentity\\\": \" % tensor_name +\n \"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)\",\n \" dtype: float64\",\n \" shape: (2, 1)\",\n \"\",\n \"Numeric summary:\",\n \"| - + | total |\",\n \"| 1 1 | 2 |\",\n \"| min max mean std |\",\n \"| -2.0 7.0 2.5 4.5 |\",\n \"\",\n \"array([[ 7.],\",\n \" [-2.]])\",\n ], out.lines)\n\n self.assertIn(\"tensor_metadata\", out.annotations)\n self.assertIn(10, out.annotations)\n self.assertIn(11, out.annotations)\n self.assertEqual([(8, 11, \"bold\")], out.font_attr_segs[11])\n\n def testPrintTensorWithSlicing(self):\n node_name = \"simple_mul_add/matmul\"\n tensor_name = node_name + \":0\"\n out = self._registry.dispatch_command(\n \"print_tensor\", [tensor_name + \"[1, :]\"], screen_info={\"cols\": 80})\n\n self.assertEqual([\n \"Tensor \\\"%s:DebugIdentity[1, :]\\\":\" % tensor_name, \" dtype: float64\",\n \" shape: (1,)\", \"\", \"array([-2.])\"\n ], out.lines)\n\n self.assertIn(\"tensor_metadata\", out.annotations)\n self.assertIn(4, out.annotations)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n node_info_node_name=node_name,\n list_inputs_node_name=node_name,\n list_outputs_node_name=node_name)\n\n def testPrintTensorInvalidSlicingString(self):\n node_name = \"simple_mul_add/matmul\"\n tensor_name = node_name + \":0\"\n out = self._registry.dispatch_command(\n \"print_tensor\", [tensor_name + \"[1, foo()]\"], screen_info={\"cols\": 80})\n\n self.assertEqual(\"Error occurred during handling of command: print_tensor \"\n + tensor_name + \"[1, foo()]:\", out.lines[0])\n self.assertEqual(\"ValueError: Invalid tensor-slicing string.\",\n out.lines[-2])\n\n def testPrintTensorValidExplicitNumber(self):\n node_name = \"simple_mul_add/matmul\"\n tensor_name = node_name + \":0\"\n out = self._registry.dispatch_command(\n \"print_tensor\", [tensor_name, \"-n\", \"0\"], screen_info={\"cols\": 80})\n\n self.assertEqual([\n \"Tensor \\\"%s:DebugIdentity\\\":\" % tensor_name,\n \" dtype: float64\",\n \" shape: (2, 1)\",\n \"\",\n \"array([[ 7.],\",\n \" [-2.]])\",\n ], out.lines)\n\n self.assertIn(\"tensor_metadata\", out.annotations)\n self.assertIn(4, out.annotations)\n self.assertIn(5, out.annotations)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n node_info_node_name=node_name,\n list_inputs_node_name=node_name,\n list_outputs_node_name=node_name)\n\n def testPrintTensorInvalidExplicitNumber(self):\n node_name = \"simple_mul_add/matmul\"\n tensor_name = node_name + \":0\"\n out = self._registry.dispatch_command(\n \"print_tensor\", [tensor_name, \"-n\", \"1\"], screen_info={\"cols\": 80})\n\n self.assertEqual([\n \"ERROR: Invalid number (1) for tensor simple_mul_add/matmul:0, \"\n \"which generated one dump.\"\n ], out.lines)\n\n self.assertNotIn(\"tensor_metadata\", out.annotations)\n\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n node_info_node_name=node_name,\n list_inputs_node_name=node_name,\n list_outputs_node_name=node_name)\n\n def testPrintTensorMissingOutputSlotLeadsToOnlyDumpedTensorPrinted(self):\n node_name = \"simple_mul_add/matmul\"\n out = self._registry.dispatch_command(\"print_tensor\", [node_name])\n\n self.assertEqual([\n \"Tensor \\\"%s:0:DebugIdentity\\\":\" % node_name, \" dtype: float64\",\n \" shape: (2, 1)\", \"\", \"array([[ 7.],\", \" [-2.]])\"\n ], out.lines)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n node_info_node_name=node_name,\n list_inputs_node_name=node_name,\n list_outputs_node_name=node_name)\n\n def testPrintTensorNonexistentNodeName(self):\n out = self._registry.dispatch_command(\n \"print_tensor\", [\"simple_mul_add/matmul/foo:0\"])\n\n self.assertEqual([\n \"ERROR: Node \\\"simple_mul_add/matmul/foo\\\" does not exist in partition \"\n \"graphs\"\n ], out.lines)\n check_main_menu(self, out, list_tensors_enabled=True)\n\n def testEvalExpression(self):\n node_name = \"simple_mul_add/matmul\"\n tensor_name = node_name + \":0\"\n out = self._registry.dispatch_command(\n \"eval\", [\"np.matmul(`%s`, `%s`.T)\" % (tensor_name, tensor_name)],\n screen_info={\"cols\": 80})\n\n cli_test_utils.assert_lines_equal_ignoring_whitespace(\n self,\n [\"Tensor \\\"from eval of expression \"\n \"'np.matmul(`simple_mul_add/matmul:0`, \"\n \"`simple_mul_add/matmul:0`.T)'\\\":\",\n \" dtype: float64\",\n \" shape: (2, 2)\",\n \"\",\n \"Numeric summary:\",\n \"| - + | total |\",\n \"| 2 2 | 4 |\",\n \"| min max mean std |\"],\n out.lines[:8])\n cli_test_utils.assert_array_lines_close(\n self, [-14.0, 49.0, 6.25, 25.7524270701], out.lines[8:9])\n cli_test_utils.assert_array_lines_close(\n self, [[49.0, -14.0], [-14.0, 4.0]], out.lines[10:])\n\n def testEvalExpressionAndWriteToNpyFile(self):\n node_name = \"simple_mul_add/matmul\"\n tensor_name = node_name + \":0\"\n npy_path = os.path.join(self._dump_root, \"matmul_eval.npy\")\n out = self._registry.dispatch_command(\n \"eval\",\n [\"np.matmul(`%s`, `%s`.T)\" % (tensor_name, tensor_name), \"-w\",\n npy_path], screen_info={\"cols\": 80})\n\n self.assertEqual([\n \"Tensor \\\"from eval of expression \"\n \"'np.matmul(`simple_mul_add/matmul:0`, \"\n \"`simple_mul_add/matmul:0`.T)'\\\":\",\n \" dtype: float64\",\n \" shape: (2, 2)\",\n \"\"], out.lines[:4])\n\n self.assertTrue(out.lines[4].startswith(\"Saved value to: %s (\" % npy_path))\n # Load the numpy file and verify its contents.\n self.assertAllClose([[49.0, -14.0], [-14.0, 4.0]], np.load(npy_path))\n\n def testAddGetTensorFilterLambda(self):\n analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,\n _cli_config_from_temp_file())\n analyzer.add_tensor_filter(\"foo_filter\", lambda x, y: True)\n self.assertTrue(analyzer.get_tensor_filter(\"foo_filter\")(None, None))\n\n def testAddGetTensorFilterNestedFunction(self):\n analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,\n _cli_config_from_temp_file())\n\n def foo_filter(unused_arg_0, unused_arg_1):\n return True\n\n analyzer.add_tensor_filter(\"foo_filter\", foo_filter)\n self.assertTrue(analyzer.get_tensor_filter(\"foo_filter\")(None, None))\n\n def testAddTensorFilterEmptyName(self):\n analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,\n _cli_config_from_temp_file())\n\n with self.assertRaisesRegexp(ValueError,\n \"Input argument filter_name cannot be empty.\"):\n analyzer.add_tensor_filter(\"\", lambda datum, tensor: True)\n\n def testAddTensorFilterNonStrName(self):\n analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,\n _cli_config_from_temp_file())\n\n with self.assertRaisesRegexp(\n TypeError,\n \"Input argument filter_name is expected to be str, \"\"but is not\"):\n analyzer.add_tensor_filter(1, lambda datum, tensor: True)\n\n def testAddGetTensorFilterNonCallable(self):\n analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,\n _cli_config_from_temp_file())\n\n with self.assertRaisesRegexp(\n TypeError, \"Input argument filter_callable is expected to be callable, \"\n \"but is not.\"):\n analyzer.add_tensor_filter(\"foo_filter\", \"bar\")\n\n def testGetNonexistentTensorFilter(self):\n analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,\n _cli_config_from_temp_file())\n\n analyzer.add_tensor_filter(\"foo_filter\", lambda datum, tensor: True)\n with self.assertRaisesRegexp(ValueError,\n \"There is no tensor filter named \\\"bar\\\"\"):\n analyzer.get_tensor_filter(\"bar\")\n\n def _findSourceLine(self, annotated_source, line_number):\n \"\"\"Find line of given line number in annotated source.\n\n Args:\n annotated_source: (debugger_cli_common.RichTextLines) the annotated source\n line_number: (int) 1-based line number\n\n Returns:\n (int) If line_number is found, 0-based line index in\n annotated_source.lines. Otherwise, None.\n \"\"\"\n\n index = None\n for i, line in enumerate(annotated_source.lines):\n if line.startswith(\"L%d \" % line_number):\n index = i\n break\n return index\n\n def testPrintSourceForOpNamesWholeFileWorks(self):\n self._debug_dump.set_python_graph(self._sess.graph)\n out = self._registry.dispatch_command(\n \"print_source\", [self._curr_file_path], screen_info={\"cols\": 80})\n\n # Verify the annotation of the line that creates u.\n index = self._findSourceLine(out, self._u_line_number)\n self.assertEqual(\n [\"L%d u = variables.VariableV1(u_init, name=u_name)\" %\n self._u_line_number,\n \" simple_mul_add/u\",\n \" simple_mul_add/u/Assign\",\n \" simple_mul_add/u/read\"],\n out.lines[index : index + 4])\n self.assertEqual(\"pt simple_mul_add/u\",\n out.font_attr_segs[index + 1][0][2].content)\n # simple_mul_add/u/Assign is not used in this run because the Variable has\n # already been initialized.\n self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2])\n self.assertEqual(\"pt simple_mul_add/u/read\",\n out.font_attr_segs[index + 3][0][2].content)\n\n # Verify the annotation of the line that creates v.\n index = self._findSourceLine(out, self._v_line_number)\n self.assertEqual(\n [\"L%d v = variables.VariableV1(v_init, name=v_name)\" %\n self._v_line_number,\n \" simple_mul_add/v\"],\n out.lines[index : index + 2])\n self.assertEqual(\"pt simple_mul_add/v\",\n out.font_attr_segs[index + 1][0][2].content)\n\n # Verify the annotation of the line that creates w.\n index = self._findSourceLine(out, self._w_line_number)\n self.assertEqual(\n [\"L%d \" % self._w_line_number +\n \"w = math_ops.matmul(u, v, name=\\\"simple_mul_add/matmul\\\")\",\n \" simple_mul_add/matmul\"],\n out.lines[index : index + 2])\n self.assertEqual(\"pt simple_mul_add/matmul\",\n out.font_attr_segs[index + 1][0][2].content)\n\n # Verify the annotation of the line that creates x.\n index = self._findSourceLine(out, self._x_line_number)\n self.assertEqual(\n [\"L%d \" % self._x_line_number +\n \"x = math_ops.add(w, w, name=\\\"simple_mul_add/add\\\")\",\n \" simple_mul_add/add\"],\n out.lines[index : index + 2])\n self.assertEqual(\"pt simple_mul_add/add\",\n out.font_attr_segs[index + 1][0][2].content)\n\n def testPrintSourceForTensorNamesWholeFileWorks(self):\n self._debug_dump.set_python_graph(self._sess.graph)\n out = self._registry.dispatch_command(\n \"print_source\",\n [self._curr_file_path, \"--tensors\"],\n screen_info={\"cols\": 80})\n\n # Verify the annotation of the line that creates u.\n index = self._findSourceLine(out, self._u_line_number)\n self.assertEqual(\n [\"L%d u = variables.VariableV1(u_init, name=u_name)\" %\n self._u_line_number,\n \" simple_mul_add/u/read:0\",\n \" simple_mul_add/u:0\"],\n out.lines[index : index + 3])\n self.assertEqual(\"pt simple_mul_add/u/read:0\",\n out.font_attr_segs[index + 1][0][2].content)\n self.assertEqual(\"pt simple_mul_add/u:0\",\n out.font_attr_segs[index + 2][0][2].content)\n\n def testPrintSourceForOpNamesStartingAtSpecifiedLineWorks(self):\n self._debug_dump.set_python_graph(self._sess.graph)\n out = self._registry.dispatch_command(\n \"print_source\",\n [self._curr_file_path, \"-b\", \"3\"],\n screen_info={\"cols\": 80})\n\n self.assertEqual(\n 2, out.annotations[debugger_cli_common.INIT_SCROLL_POS_KEY])\n\n index = self._findSourceLine(out, self._u_line_number)\n self.assertEqual(\n [\"L%d u = variables.VariableV1(u_init, name=u_name)\" %\n self._u_line_number,\n \" simple_mul_add/u\",\n \" simple_mul_add/u/Assign\",\n \" simple_mul_add/u/read\"],\n out.lines[index : index + 4])\n self.assertEqual(\"pt simple_mul_add/u\",\n out.font_attr_segs[index + 1][0][2].content)\n # simple_mul_add/u/Assign is not used in this run because the Variable has\n # already been initialized.\n self.assertEqual(cli_shared.COLOR_BLUE, out.font_attr_segs[index + 2][0][2])\n self.assertEqual(\"pt simple_mul_add/u/read\",\n out.font_attr_segs[index + 3][0][2].content)\n\n def testPrintSourceForOpNameSettingMaximumElementCountWorks(self):\n self._debug_dump.set_python_graph(self._sess.graph)\n out = self._registry.dispatch_command(\n \"print_source\",\n [self._curr_file_path, \"-m\", \"1\"],\n screen_info={\"cols\": 80})\n\n index = self._findSourceLine(out, self._u_line_number)\n self.assertEqual(\n [\"L%d u = variables.VariableV1(u_init, name=u_name)\" %\n self._u_line_number,\n \" simple_mul_add/u\",\n \" (... Omitted 2 of 3 op(s) ...) +5\"],\n out.lines[index : index + 3])\n self.assertEqual(\"pt simple_mul_add/u\",\n out.font_attr_segs[index + 1][0][2].content)\n more_elements_command = out.font_attr_segs[index + 2][-1][2].content\n self.assertStartsWith(more_elements_command,\n \"ps %s \" % self._curr_file_path)\n self.assertIn(\" -m 6\", more_elements_command)\n\n def testListSourceWorks(self):\n self._debug_dump.set_python_graph(self._sess.graph)\n out = self._registry.dispatch_command(\"list_source\", [])\n\n non_tf_lib_files_start = [\n i for i in xrange(len(out.lines))\n if out.lines[i].startswith(\"Source file path\")][0] + 1\n non_tf_lib_files_end = [\n i for i in xrange(len(out.lines))\n if out.lines[i].startswith(\"TensorFlow Python library file(s):\")][0] - 1\n non_tf_lib_files = [\n line.split(\" \")[0] for line\n in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]]\n self.assertIn(self._curr_file_path, non_tf_lib_files)\n\n # Check that the TF library files are marked with special color attribute.\n for i in xrange(non_tf_lib_files_end + 1, len(out.lines)):\n if not out.lines[i]:\n continue\n for attr_seg in out.font_attr_segs[i]:\n self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or\n attr_seg[2] == cli_shared.COLOR_GRAY)\n\n def testListSourceWithNodeNameFilterWithMatchesWorks(self):\n self._debug_dump.set_python_graph(self._sess.graph)\n out = self._registry.dispatch_command(\"list_source\", [\"-n\", \".*/read\"])\n\n self.assertStartsWith(out.lines[1], \"Node name regex filter: \\\".*/read\\\"\")\n\n non_tf_lib_files_start = [\n i for i in xrange(len(out.lines))\n if out.lines[i].startswith(\"Source file path\")][0] + 1\n non_tf_lib_files_end = [\n i for i in xrange(len(out.lines))\n if out.lines[i].startswith(\"TensorFlow Python library file(s):\")][0] - 1\n non_tf_lib_files = [\n line.split(\" \")[0] for line\n in out.lines[non_tf_lib_files_start : non_tf_lib_files_end]]\n self.assertIn(self._curr_file_path, non_tf_lib_files)\n\n # Check that the TF library files are marked with special color attribute.\n for i in xrange(non_tf_lib_files_end + 1, len(out.lines)):\n if not out.lines[i]:\n continue\n for attr_seg in out.font_attr_segs[i]:\n self.assertTrue(cli_shared.COLOR_GRAY in attr_seg[2] or\n attr_seg[2] == cli_shared.COLOR_GRAY)\n\n def testListSourceWithNodeNameFilterWithNoMatchesWorks(self):\n self._debug_dump.set_python_graph(self._sess.graph)\n out = self._registry.dispatch_command(\"list_source\", [\"-n\", \"^$\"])\n\n self.assertEqual([\n \"List of source files that created nodes in this run\",\n \"Node name regex filter: \\\"^$\\\"\", \"\",\n \"[No source file information.]\"], out.lines)\n\n def testListSourceWithPathAndNodeNameFiltersWorks(self):\n self._debug_dump.set_python_graph(self._sess.graph)\n out = self._registry.dispatch_command(\n \"list_source\", [\"-p\", self._curr_file_path, \"-n\", \".*read\"])\n\n self.assertEqual([\n \"List of source files that created nodes in this run\",\n \"File path regex filter: \\\"%s\\\"\" % self._curr_file_path,\n \"Node name regex filter: \\\".*read\\\"\", \"\"], out.lines[:4])\n\n def testListSourceWithCompiledPythonSourceWorks(self):\n def fake_list_source_files_against_dump(dump,\n path_regex_whitelist=None,\n node_name_regex_whitelist=None):\n del dump, path_regex_whitelist, node_name_regex_whitelist\n return [(\"compiled_1.pyc\", False, 10, 20, 30, 4),\n (\"compiled_2.pyo\", False, 10, 20, 30, 5),\n (\"uncompiled.py\", False, 10, 20, 30, 6)]\n\n with test.mock.patch.object(\n source_utils, \"list_source_files_against_dump\",\n side_effect=fake_list_source_files_against_dump):\n out = self._registry.dispatch_command(\"list_source\", [])\n\n self.assertStartsWith(out.lines[4], \"compiled_1.pyc\")\n self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]),\n out.font_attr_segs[4][0])\n self.assertStartsWith(out.lines[5], \"compiled_2.pyo\")\n self.assertEqual((0, 14, [cli_shared.COLOR_WHITE]),\n out.font_attr_segs[5][0])\n self.assertStartsWith(out.lines[6], \"uncompiled.py\")\n self.assertEqual(0, out.font_attr_segs[6][0][0])\n self.assertEqual(13, out.font_attr_segs[6][0][1])\n self.assertEqual(cli_shared.COLOR_WHITE, out.font_attr_segs[6][0][2][0])\n self.assertEqual(\"ps uncompiled.py -b 6\",\n out.font_attr_segs[6][0][2][1].content)\n\n def testListInputInvolvingNodesWithMultipleOutputs(self):\n \"\"\"List an input tree containing tensors from non-:0 output slot.\"\"\"\n\n with session.Session(config=no_rewrite_session_config()) as sess:\n x = variables.VariableV1([1, 3, 3, 7], name=\"x\")\n _, idx = array_ops.unique(x, name=\"x_unique\")\n idx_times_two = math_ops.multiply(idx, 2, name=\"idx_times_two\")\n self.evaluate(x.initializer)\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=\"file://%s\" % self._dump_root_for_unique)\n run_metadata = config_pb2.RunMetadata()\n self.assertAllEqual(\n [0, 2, 2, 4],\n sess.run(idx_times_two,\n options=run_options,\n run_metadata=run_metadata))\n debug_dump = debug_data.DebugDumpDir(\n self._dump_root_for_unique,\n partition_graphs=run_metadata.partition_graphs)\n _, registry = create_analyzer_cli(debug_dump)\n\n out = registry.dispatch_command(\"li\", [\"idx_times_two\"])\n self.assertEqual(\n [\"Inputs to node \\\"idx_times_two\\\" (Depth limit = 1):\",\n \"|- (1) x_unique:1\"], out.lines[:2])\n\n\nclass AnalyzerCLIPrintLargeTensorTest(test_util.TensorFlowTestCase):\n\n @classmethod\n def setUpClass(cls):\n cls._dump_root = tempfile.mkdtemp()\n\n with session.Session(config=no_rewrite_session_config()) as sess:\n # 2400 elements should exceed the default threshold (2000).\n x = constant_op.constant(np.zeros([300, 8]), name=\"large_tensors/x\")\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=\"file://%s\" % cls._dump_root)\n\n # Invoke Session.run().\n run_metadata = config_pb2.RunMetadata()\n sess.run(x, options=run_options, run_metadata=run_metadata)\n\n cls._debug_dump = debug_data.DebugDumpDir(\n cls._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n # Construct the analyzer and command registry.\n cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)\n\n @classmethod\n def tearDownClass(cls):\n # Tear down temporary dump directory.\n shutil.rmtree(cls._dump_root)\n\n def testPrintLargeTensorWithoutAllOption(self):\n out = self._registry.dispatch_command(\n \"print_tensor\", [\"large_tensors/x:0\"], screen_info={\"cols\": 80})\n\n # Assert that ellipses are present in the tensor value printout.\n self.assertIn(\"...,\", out.lines[4])\n\n # 2100 still exceeds 2000.\n out = self._registry.dispatch_command(\n \"print_tensor\", [\"large_tensors/x:0[:, 0:7]\"],\n screen_info={\"cols\": 80})\n\n self.assertIn(\"...,\", out.lines[4])\n\n def testPrintLargeTensorWithAllOption(self):\n out = self._registry.dispatch_command(\n \"print_tensor\", [\"large_tensors/x:0\", \"-a\"],\n screen_info={\"cols\": 80})\n\n # Assert that ellipses are not present in the tensor value printout.\n self.assertNotIn(\"...,\", out.lines[4])\n\n out = self._registry.dispatch_command(\n \"print_tensor\", [\"large_tensors/x:0[:, 0:7]\", \"--all\"],\n screen_info={\"cols\": 80})\n self.assertNotIn(\"...,\", out.lines[4])\n\n\n@test_util.run_v1_only(\"b/120545219\")\nclass AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):\n\n @classmethod\n def setUpClass(cls):\n cls._dump_root = tempfile.mkdtemp()\n\n cls._is_gpu_available = test.is_gpu_available()\n if cls._is_gpu_available:\n gpu_name = test_util.gpu_device_name()\n cls._main_device = \"/job:localhost/replica:0/task:0\" + gpu_name\n else:\n cls._main_device = \"/job:localhost/replica:0/task:0/device:CPU:0\"\n\n with session.Session(config=no_rewrite_session_config()) as sess:\n x_init_val = np.array([5.0, 3.0])\n x_init = constant_op.constant(x_init_val, shape=[2])\n x = variables.VariableV1(x_init, name=\"control_deps/x\")\n\n y = math_ops.add(x, x, name=\"control_deps/y\")\n y = control_flow_ops.with_dependencies(\n [x], y, name=\"control_deps/ctrl_dep_y\")\n\n z = math_ops.multiply(x, y, name=\"control_deps/z\")\n\n z = control_flow_ops.with_dependencies(\n [x, y], z, name=\"control_deps/ctrl_dep_z\")\n\n x.initializer.run()\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_utils.watch_graph(\n run_options,\n sess.graph,\n debug_ops=[\"DebugIdentity\"],\n debug_urls=\"file://%s\" % cls._dump_root)\n\n # Invoke Session.run().\n run_metadata = config_pb2.RunMetadata()\n sess.run(z, options=run_options, run_metadata=run_metadata)\n\n debug_dump = debug_data.DebugDumpDir(\n cls._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n # Construct the analyzer and command handler registry.\n _, cls._registry = create_analyzer_cli(debug_dump)\n\n @classmethod\n def tearDownClass(cls):\n # Tear down temporary dump directory.\n shutil.rmtree(cls._dump_root)\n\n def testNodeInfoWithControlDependencies(self):\n # Call node_info on a node with control inputs.\n out = self._registry.dispatch_command(\"node_info\",\n [\"control_deps/ctrl_dep_y\"])\n\n assert_node_attribute_lines(\n self, out, \"control_deps/ctrl_dep_y\", \"Identity\",\n self._main_device, [(\"Add\", \"control_deps/y\")],\n [(\"VariableV2\", \"control_deps/x\")],\n [(\"Mul\", \"control_deps/z\")],\n [(\"Identity\", \"control_deps/ctrl_dep_z\")])\n\n # Call node info on a node with control recipients.\n out = self._registry.dispatch_command(\"ni\", [\"control_deps/x\"])\n\n assert_node_attribute_lines(self, out, \"control_deps/x\", \"VariableV2\",\n self._main_device, [], [],\n [(\"Identity\", \"control_deps/x/read\")],\n [(\"Identity\", \"control_deps/ctrl_dep_y\"),\n (\"Identity\", \"control_deps/ctrl_dep_z\")])\n\n # Verify the menu items (command shortcuts) in the output.\n check_menu_item(self, out, 10,\n len(out.lines[10]) - len(\"control_deps/x/read\"),\n len(out.lines[10]), \"ni -a -d -t control_deps/x/read\")\n if out.lines[13].endswith(\"control_deps/ctrl_dep_y\"):\n y_line = 13\n z_line = 14\n else:\n y_line = 14\n z_line = 13\n check_menu_item(self, out, y_line,\n len(out.lines[y_line]) - len(\"control_deps/ctrl_dep_y\"),\n len(out.lines[y_line]),\n \"ni -a -d -t control_deps/ctrl_dep_y\")\n check_menu_item(self, out, z_line,\n len(out.lines[z_line]) - len(\"control_deps/ctrl_dep_z\"),\n len(out.lines[z_line]),\n \"ni -a -d -t control_deps/ctrl_dep_z\")\n\n def testListInputsNonRecursiveNoControl(self):\n \"\"\"List inputs non-recursively, without any control inputs.\"\"\"\n\n # Do not include node op types.\n node_name = \"control_deps/z\"\n out = self._registry.dispatch_command(\"list_inputs\", [node_name])\n\n self.assertEqual([\n \"Inputs to node \\\"%s\\\" (Depth limit = 1):\" % node_name,\n \"|- (1) control_deps/x/read\", \"| |- ...\",\n \"|- (1) control_deps/ctrl_dep_y\", \" |- ...\", \"\", \"Legend:\",\n \" (d): recursion depth = d.\"\n ], out.lines)\n\n # Include node op types.\n out = self._registry.dispatch_command(\"li\", [\"-t\", node_name])\n\n self.assertEqual([\n \"Inputs to node \\\"%s\\\" (Depth limit = 1):\" % node_name,\n \"|- (1) [Identity] control_deps/x/read\", \"| |- ...\",\n \"|- (1) [Identity] control_deps/ctrl_dep_y\", \" |- ...\", \"\", \"Legend:\",\n \" (d): recursion depth = d.\", \" [Op]: Input node has op type Op.\"\n ], out.lines)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n node_info_node_name=node_name,\n print_tensor_node_name=node_name,\n list_outputs_node_name=node_name)\n\n # Verify that the node name has bold attribute.\n self.assertEqual([(16, 16 + len(node_name), \"bold\")], out.font_attr_segs[0])\n\n # Verify the menu items (command shortcuts) in the output.\n check_menu_item(self, out, 1,\n len(out.lines[1]) - len(\"control_deps/x/read\"),\n len(out.lines[1]), \"li -c -r control_deps/x/read\")\n check_menu_item(self, out, 3,\n len(out.lines[3]) - len(\"control_deps/ctrl_dep_y\"),\n len(out.lines[3]), \"li -c -r control_deps/ctrl_dep_y\")\n\n def testListInputsNonRecursiveNoControlUsingTensorName(self):\n \"\"\"List inputs using the name of an output tensor of the node.\"\"\"\n\n # Do not include node op types.\n node_name = \"control_deps/z\"\n tensor_name = node_name + \":0\"\n out = self._registry.dispatch_command(\"list_inputs\", [tensor_name])\n\n self.assertEqual([\n \"Inputs to node \\\"%s\\\" (Depth limit = 1):\" % node_name,\n \"|- (1) control_deps/x/read\", \"| |- ...\",\n \"|- (1) control_deps/ctrl_dep_y\", \" |- ...\", \"\", \"Legend:\",\n \" (d): recursion depth = d.\"\n ], out.lines)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n node_info_node_name=node_name,\n print_tensor_node_name=node_name,\n list_outputs_node_name=node_name)\n check_menu_item(self, out, 1,\n len(out.lines[1]) - len(\"control_deps/x/read\"),\n len(out.lines[1]), \"li -c -r control_deps/x/read\")\n check_menu_item(self, out, 3,\n len(out.lines[3]) - len(\"control_deps/ctrl_dep_y\"),\n len(out.lines[3]), \"li -c -r control_deps/ctrl_dep_y\")\n\n def testListInputsNonRecursiveWithControls(self):\n \"\"\"List inputs non-recursively, with control inputs.\"\"\"\n node_name = \"control_deps/ctrl_dep_z\"\n out = self._registry.dispatch_command(\"li\", [\"-t\", node_name, \"-c\"])\n\n self.assertEqual([\n \"Inputs to node \\\"%s\\\" (Depth limit = 1, \" % node_name +\n \"control inputs included):\", \"|- (1) [Mul] control_deps/z\", \"| |- ...\",\n \"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y\", \"| |- ...\",\n \"|- (1) (Ctrl) [VariableV2] control_deps/x\", \"\", \"Legend:\",\n \" (d): recursion depth = d.\", \" (Ctrl): Control input.\",\n \" [Op]: Input node has op type Op.\"\n ], out.lines)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n node_info_node_name=node_name,\n print_tensor_node_name=node_name,\n list_outputs_node_name=node_name)\n check_menu_item(self, out, 1,\n len(out.lines[1]) - len(\"control_deps/z\"),\n len(out.lines[1]), \"li -c -r control_deps/z\")\n check_menu_item(self, out, 3,\n len(out.lines[3]) - len(\"control_deps/ctrl_dep_y\"),\n len(out.lines[3]), \"li -c -r control_deps/ctrl_dep_y\")\n check_menu_item(self, out, 5,\n len(out.lines[5]) - len(\"control_deps/x\"),\n len(out.lines[5]), \"li -c -r control_deps/x\")\n\n def testListInputsRecursiveWithControls(self):\n \"\"\"List inputs recursively, with control inputs.\"\"\"\n node_name = \"control_deps/ctrl_dep_z\"\n out = self._registry.dispatch_command(\"li\", [\"-c\", \"-r\", \"-t\", node_name])\n\n self.assertEqual([\n \"Inputs to node \\\"%s\\\" (Depth limit = 20, \" % node_name +\n \"control inputs included):\", \"|- (1) [Mul] control_deps/z\",\n \"| |- (2) [Identity] control_deps/x/read\",\n \"| | |- (3) [VariableV2] control_deps/x\",\n \"| |- (2) [Identity] control_deps/ctrl_dep_y\",\n \"| |- (3) [Add] control_deps/y\",\n \"| | |- (4) [Identity] control_deps/x/read\",\n \"| | | |- (5) [VariableV2] control_deps/x\",\n \"| | |- (4) [Identity] control_deps/x/read\",\n \"| | |- (5) [VariableV2] control_deps/x\",\n \"| |- (3) (Ctrl) [VariableV2] control_deps/x\",\n \"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y\",\n \"| |- (2) [Add] control_deps/y\",\n \"| | |- (3) [Identity] control_deps/x/read\",\n \"| | | |- (4) [VariableV2] control_deps/x\",\n \"| | |- (3) [Identity] control_deps/x/read\",\n \"| | |- (4) [VariableV2] control_deps/x\",\n \"| |- (2) (Ctrl) [VariableV2] control_deps/x\",\n \"|- (1) (Ctrl) [VariableV2] control_deps/x\", \"\", \"Legend:\",\n \" (d): recursion depth = d.\", \" (Ctrl): Control input.\",\n \" [Op]: Input node has op type Op.\"\n ], out.lines)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n node_info_node_name=node_name,\n print_tensor_node_name=node_name,\n list_outputs_node_name=node_name)\n check_menu_item(self, out, 1,\n len(out.lines[1]) - len(\"control_deps/z\"),\n len(out.lines[1]), \"li -c -r control_deps/z\")\n check_menu_item(self, out, 11,\n len(out.lines[11]) - len(\"control_deps/ctrl_dep_y\"),\n len(out.lines[11]), \"li -c -r control_deps/ctrl_dep_y\")\n check_menu_item(self, out, 18,\n len(out.lines[18]) - len(\"control_deps/x\"),\n len(out.lines[18]), \"li -c -r control_deps/x\")\n\n def testListInputsRecursiveWithControlsWithDepthLimit(self):\n \"\"\"List inputs recursively, with control inputs and a depth limit.\"\"\"\n node_name = \"control_deps/ctrl_dep_z\"\n out = self._registry.dispatch_command(\n \"li\", [\"-c\", \"-r\", \"-t\", \"-d\", \"2\", node_name])\n\n self.assertEqual([\n \"Inputs to node \\\"%s\\\" (Depth limit = 2, \" % node_name +\n \"control inputs included):\", \"|- (1) [Mul] control_deps/z\",\n \"| |- (2) [Identity] control_deps/x/read\", \"| | |- ...\",\n \"| |- (2) [Identity] control_deps/ctrl_dep_y\", \"| |- ...\",\n \"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y\",\n \"| |- (2) [Add] control_deps/y\", \"| | |- ...\",\n \"| |- (2) (Ctrl) [VariableV2] control_deps/x\",\n \"|- (1) (Ctrl) [VariableV2] control_deps/x\", \"\", \"Legend:\",\n \" (d): recursion depth = d.\", \" (Ctrl): Control input.\",\n \" [Op]: Input node has op type Op.\"\n ], out.lines)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n node_info_node_name=node_name,\n print_tensor_node_name=node_name,\n list_outputs_node_name=node_name)\n check_menu_item(self, out, 1,\n len(out.lines[1]) - len(\"control_deps/z\"),\n len(out.lines[1]), \"li -c -r control_deps/z\")\n check_menu_item(self, out, 10,\n len(out.lines[10]) - len(\"control_deps/x\"),\n len(out.lines[10]), \"li -c -r control_deps/x\")\n\n def testListInputsNodeWithoutInputs(self):\n \"\"\"List the inputs to a node without any input.\"\"\"\n node_name = \"control_deps/x\"\n out = self._registry.dispatch_command(\"li\", [\"-c\", \"-r\", \"-t\", node_name])\n\n self.assertEqual([\n \"Inputs to node \\\"%s\\\" (Depth limit = 20, control \" % node_name +\n \"inputs included):\", \" [None]\", \"\", \"Legend:\",\n \" (d): recursion depth = d.\", \" (Ctrl): Control input.\",\n \" [Op]: Input node has op type Op.\"\n ], out.lines)\n check_main_menu(\n self,\n out,\n list_tensors_enabled=True,\n node_info_node_name=node_name,\n print_tensor_node_name=node_name,\n list_outputs_node_name=node_name)\n\n def testListInputsNonexistentNode(self):\n out = self._registry.dispatch_command(\n \"list_inputs\", [\"control_deps/z/foo\"])\n\n self.assertEqual([\n \"ERROR: There is no node named \\\"control_deps/z/foo\\\" in the \"\n \"partition graphs\"], out.lines)\n\n def testListRecipientsRecursiveWithControlsWithDepthLimit(self):\n \"\"\"List recipients recursively, with control inputs and a depth limit.\"\"\"\n\n out = self._registry.dispatch_command(\n \"lo\", [\"-c\", \"-r\", \"-t\", \"-d\", \"1\", \"control_deps/x\"])\n\n self.assertEqual([\n \"Recipients of node \\\"control_deps/x\\\" (Depth limit = 1, control \"\n \"recipients included):\",\n \"|- (1) [Identity] control_deps/x/read\",\n \"| |- ...\",\n \"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y\",\n \"| |- ...\",\n \"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_z\",\n \"\", \"Legend:\", \" (d): recursion depth = d.\",\n \" (Ctrl): Control input.\",\n \" [Op]: Input node has op type Op.\"], out.lines)\n check_menu_item(self, out, 1,\n len(out.lines[1]) - len(\"control_deps/x/read\"),\n len(out.lines[1]), \"lo -c -r control_deps/x/read\")\n check_menu_item(self, out, 3,\n len(out.lines[3]) - len(\"control_deps/ctrl_dep_y\"),\n len(out.lines[3]), \"lo -c -r control_deps/ctrl_dep_y\")\n check_menu_item(self, out, 5,\n len(out.lines[5]) - len(\"control_deps/ctrl_dep_z\"),\n len(out.lines[5]), \"lo -c -r control_deps/ctrl_dep_z\")\n\n # Verify the bold attribute of the node name.\n self.assertEqual([(20, 20 + len(\"control_deps/x\"), \"bold\")],\n out.font_attr_segs[0])\n\n\n@test_util.run_v1_only(\"b/120545219\")\nclass AnalyzerCLIWhileLoopTest(test_util.TensorFlowTestCase):\n\n @classmethod\n def setUpClass(cls):\n cls._dump_root = tempfile.mkdtemp()\n\n with session.Session(config=no_rewrite_session_config()) as sess:\n loop_var = constant_op.constant(0, name=\"while_loop_test/loop_var\")\n cond = lambda loop_var: math_ops.less(loop_var, 10)\n body = lambda loop_var: math_ops.add(loop_var, 1)\n while_loop = control_flow_ops.while_loop(\n cond, body, [loop_var], parallel_iterations=1)\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n debug_url = \"file://%s\" % cls._dump_root\n\n watch_opts = run_options.debug_options.debug_tensor_watch_opts\n\n # Add debug tensor watch for \"while/Identity\".\n watch = watch_opts.add()\n watch.node_name = \"while/Identity\"\n watch.output_slot = 0\n watch.debug_ops.append(\"DebugIdentity\")\n watch.debug_urls.append(debug_url)\n\n # Invoke Session.run().\n run_metadata = config_pb2.RunMetadata()\n sess.run(while_loop, options=run_options, run_metadata=run_metadata)\n\n cls._debug_dump = debug_data.DebugDumpDir(\n cls._dump_root, partition_graphs=run_metadata.partition_graphs)\n\n cls._analyzer, cls._registry = create_analyzer_cli(cls._debug_dump)\n\n @classmethod\n def tearDownClass(cls):\n # Tear down temporary dump directory.\n shutil.rmtree(cls._dump_root)\n\n def testMultipleDumpsPrintTensorNoNumber(self):\n output = self._registry.dispatch_command(\"pt\", [\"while/Identity:0\"])\n\n self.assertEqual(\"Tensor \\\"while/Identity:0\\\" generated 10 dumps:\",\n output.lines[0])\n\n for i in xrange(10):\n self.assertTrue(output.lines[i + 1].startswith(\"#%d\" % i))\n self.assertTrue(output.lines[i + 1].endswith(\n \" ms] while/Identity:0:DebugIdentity\"))\n\n self.assertEqual(\n \"You can use the -n (--number) flag to specify which dump to print.\",\n output.lines[-3])\n self.assertEqual(\"For example:\", output.lines[-2])\n self.assertEqual(\" print_tensor while/Identity:0 -n 0\", output.lines[-1])\n\n def testMultipleDumpsPrintTensorWithNumber(self):\n for i in xrange(5):\n output = self._registry.dispatch_command(\n \"pt\", [\"while/Identity:0\", \"-n\", \"%d\" % i])\n\n self.assertEqual(\"Tensor \\\"while/Identity:0:DebugIdentity (dump #%d)\\\":\" %\n i, output.lines[0])\n self.assertEqual(\" dtype: int32\", output.lines[1])\n self.assertEqual(\" shape: ()\", output.lines[2])\n self.assertEqual(\"\", output.lines[3])\n self.assertTrue(output.lines[4].startswith(\"array(%d\" % i))\n self.assertTrue(output.lines[4].endswith(\")\"))\n\n def testMultipleDumpsPrintTensorInvalidNumber(self):\n output = self._registry.dispatch_command(\"pt\",\n [\"while/Identity:0\", \"-n\", \"10\"])\n\n self.assertEqual([\n \"ERROR: Specified number (10) exceeds the number of available dumps \"\n \"(10) for tensor while/Identity:0\"\n ], output.lines)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for data input for speech commands.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio\nfrom tensorflow.examples.speech_commands import input_data\nfrom tensorflow.examples.speech_commands import models\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import test\n\n\nclass InputDataTest(test.TestCase):\n\n def _getWavData(self):\n with self.cached_session() as sess:\n sample_data = tf.zeros([32000, 2])\n wav_encoder = contrib_audio.encode_wav(sample_data, 16000)\n wav_data = self.evaluate(wav_encoder)\n return wav_data\n\n def _saveTestWavFile(self, filename, wav_data):\n with open(filename, \"wb\") as f:\n f.write(wav_data)\n\n def _saveWavFolders(self, root_dir, labels, how_many):\n wav_data = self._getWavData()\n for label in labels:\n dir_name = os.path.join(root_dir, label)\n os.mkdir(dir_name)\n for i in range(how_many):\n file_path = os.path.join(dir_name, \"some_audio_%d.wav\" % i)\n self._saveTestWavFile(file_path, wav_data)\n\n def _model_settings(self):\n return {\n \"desired_samples\": 160,\n \"fingerprint_size\": 40,\n \"label_count\": 4,\n \"window_size_samples\": 100,\n \"window_stride_samples\": 100,\n \"fingerprint_width\": 40,\n \"preprocess\": \"mfcc\",\n }\n\n def _runGetDataTest(self, preprocess, window_length_ms):\n tmp_dir = self.get_temp_dir()\n wav_dir = os.path.join(tmp_dir, \"wavs\")\n os.mkdir(wav_dir)\n self._saveWavFolders(wav_dir, [\"a\", \"b\", \"c\"], 100)\n background_dir = os.path.join(wav_dir, \"_background_noise_\")\n os.mkdir(background_dir)\n wav_data = self._getWavData()\n for i in range(10):\n file_path = os.path.join(background_dir, \"background_audio_%d.wav\" % i)\n self._saveTestWavFile(file_path, wav_data)\n model_settings = models.prepare_model_settings(\n 4, 16000, 1000, window_length_ms, 20, 40, preprocess)\n with self.cached_session() as sess:\n audio_processor = input_data.AudioProcessor(\n \"\", wav_dir, 10, 10, [\"a\", \"b\"], 10, 10, model_settings, tmp_dir)\n result_data, result_labels = audio_processor.get_data(\n 10, 0, model_settings, 0.3, 0.1, 100, \"training\", sess)\n self.assertEqual(10, len(result_data))\n self.assertEqual(10, len(result_labels))\n\n def testPrepareWordsList(self):\n words_list = [\"a\", \"b\"]\n self.assertGreater(\n len(input_data.prepare_words_list(words_list)), len(words_list))\n\n def testWhichSet(self):\n self.assertEqual(\n input_data.which_set(\"foo.wav\", 10, 10),\n input_data.which_set(\"foo.wav\", 10, 10))\n self.assertEqual(\n input_data.which_set(\"foo_nohash_0.wav\", 10, 10),\n input_data.which_set(\"foo_nohash_1.wav\", 10, 10))\n\n @test_util.run_deprecated_v1\n def testPrepareDataIndex(self):\n tmp_dir = self.get_temp_dir()\n self._saveWavFolders(tmp_dir, [\"a\", \"b\", \"c\"], 100)\n audio_processor = input_data.AudioProcessor(\"\", tmp_dir, 10, 10,\n [\"a\", \"b\"], 10, 10,\n self._model_settings(), tmp_dir)\n self.assertLess(0, audio_processor.set_size(\"training\"))\n self.assertTrue(\"training\" in audio_processor.data_index)\n self.assertTrue(\"validation\" in audio_processor.data_index)\n self.assertTrue(\"testing\" in audio_processor.data_index)\n self.assertEquals(input_data.UNKNOWN_WORD_INDEX,\n audio_processor.word_to_index[\"c\"])\n\n def testPrepareDataIndexEmpty(self):\n tmp_dir = self.get_temp_dir()\n self._saveWavFolders(tmp_dir, [\"a\", \"b\", \"c\"], 0)\n with self.assertRaises(Exception) as e:\n _ = input_data.AudioProcessor(\"\", tmp_dir, 10, 10, [\"a\", \"b\"], 10, 10,\n self._model_settings(), tmp_dir)\n self.assertTrue(\"No .wavs found\" in str(e.exception))\n\n def testPrepareDataIndexMissing(self):\n tmp_dir = self.get_temp_dir()\n self._saveWavFolders(tmp_dir, [\"a\", \"b\", \"c\"], 100)\n with self.assertRaises(Exception) as e:\n _ = input_data.AudioProcessor(\"\", tmp_dir, 10, 10, [\"a\", \"b\", \"d\"], 10,\n 10, self._model_settings(), tmp_dir)\n self.assertTrue(\"Expected to find\" in str(e.exception))\n\n @test_util.run_deprecated_v1\n def testPrepareBackgroundData(self):\n tmp_dir = self.get_temp_dir()\n background_dir = os.path.join(tmp_dir, \"_background_noise_\")\n os.mkdir(background_dir)\n wav_data = self._getWavData()\n for i in range(10):\n file_path = os.path.join(background_dir, \"background_audio_%d.wav\" % i)\n self._saveTestWavFile(file_path, wav_data)\n self._saveWavFolders(tmp_dir, [\"a\", \"b\", \"c\"], 100)\n audio_processor = input_data.AudioProcessor(\"\", tmp_dir, 10, 10,\n [\"a\", \"b\"], 10, 10,\n self._model_settings(), tmp_dir)\n self.assertEqual(10, len(audio_processor.background_data))\n\n def testLoadWavFile(self):\n tmp_dir = self.get_temp_dir()\n file_path = os.path.join(tmp_dir, \"load_test.wav\")\n wav_data = self._getWavData()\n self._saveTestWavFile(file_path, wav_data)\n sample_data = input_data.load_wav_file(file_path)\n self.assertIsNotNone(sample_data)\n\n def testSaveWavFile(self):\n tmp_dir = self.get_temp_dir()\n file_path = os.path.join(tmp_dir, \"load_test.wav\")\n save_data = np.zeros([16000, 1])\n input_data.save_wav_file(file_path, save_data, 16000)\n loaded_data = input_data.load_wav_file(file_path)\n self.assertIsNotNone(loaded_data)\n self.assertEqual(16000, len(loaded_data))\n\n @test_util.run_deprecated_v1\n def testPrepareProcessingGraph(self):\n tmp_dir = self.get_temp_dir()\n wav_dir = os.path.join(tmp_dir, \"wavs\")\n os.mkdir(wav_dir)\n self._saveWavFolders(wav_dir, [\"a\", \"b\", \"c\"], 100)\n background_dir = os.path.join(wav_dir, \"_background_noise_\")\n os.mkdir(background_dir)\n wav_data = self._getWavData()\n for i in range(10):\n file_path = os.path.join(background_dir, \"background_audio_%d.wav\" % i)\n self._saveTestWavFile(file_path, wav_data)\n model_settings = {\n \"desired_samples\": 160,\n \"fingerprint_size\": 40,\n \"label_count\": 4,\n \"window_size_samples\": 100,\n \"window_stride_samples\": 100,\n \"fingerprint_width\": 40,\n \"preprocess\": \"mfcc\",\n }\n audio_processor = input_data.AudioProcessor(\"\", wav_dir, 10, 10, [\"a\", \"b\"],\n 10, 10, model_settings, tmp_dir)\n self.assertIsNotNone(audio_processor.wav_filename_placeholder_)\n self.assertIsNotNone(audio_processor.foreground_volume_placeholder_)\n self.assertIsNotNone(audio_processor.time_shift_padding_placeholder_)\n self.assertIsNotNone(audio_processor.time_shift_offset_placeholder_)\n self.assertIsNotNone(audio_processor.background_data_placeholder_)\n self.assertIsNotNone(audio_processor.background_volume_placeholder_)\n self.assertIsNotNone(audio_processor.output_)\n\n @test_util.run_deprecated_v1\n def testGetDataAverage(self):\n self._runGetDataTest(\"average\", 10)\n\n @test_util.run_deprecated_v1\n def testGetDataAverageLongWindow(self):\n self._runGetDataTest(\"average\", 30)\n\n @test_util.run_deprecated_v1\n def testGetDataMfcc(self):\n self._runGetDataTest(\"mfcc\", 30)\n\n @test_util.run_deprecated_v1\n def testGetDataMicro(self):\n self._runGetDataTest(\"micro\", 20)\n\n @test_util.run_deprecated_v1\n def testGetUnprocessedData(self):\n tmp_dir = self.get_temp_dir()\n wav_dir = os.path.join(tmp_dir, \"wavs\")\n os.mkdir(wav_dir)\n self._saveWavFolders(wav_dir, [\"a\", \"b\", \"c\"], 100)\n model_settings = {\n \"desired_samples\": 160,\n \"fingerprint_size\": 40,\n \"label_count\": 4,\n \"window_size_samples\": 100,\n \"window_stride_samples\": 100,\n \"fingerprint_width\": 40,\n \"preprocess\": \"mfcc\",\n }\n audio_processor = input_data.AudioProcessor(\"\", wav_dir, 10, 10, [\"a\", \"b\"],\n 10, 10, model_settings, tmp_dir)\n result_data, result_labels = audio_processor.get_unprocessed_data(\n 10, model_settings, \"training\")\n self.assertEqual(10, len(result_data))\n self.assertEqual(10, len(result_labels))\n\n @test_util.run_deprecated_v1\n def testGetFeaturesForWav(self):\n tmp_dir = self.get_temp_dir()\n wav_dir = os.path.join(tmp_dir, \"wavs\")\n os.mkdir(wav_dir)\n self._saveWavFolders(wav_dir, [\"a\", \"b\", \"c\"], 1)\n desired_samples = 1600\n model_settings = {\n \"desired_samples\": desired_samples,\n \"fingerprint_size\": 40,\n \"label_count\": 4,\n \"window_size_samples\": 100,\n \"window_stride_samples\": 100,\n \"fingerprint_width\": 40,\n \"average_window_width\": 6,\n \"preprocess\": \"average\",\n }\n with self.cached_session() as sess:\n audio_processor = input_data.AudioProcessor(\n \"\", wav_dir, 10, 10, [\"a\", \"b\"], 10, 10, model_settings, tmp_dir)\n sample_data = np.zeros([desired_samples, 1])\n for i in range(desired_samples):\n phase = i % 4\n if phase == 0:\n sample_data[i, 0] = 0\n elif phase == 1:\n sample_data[i, 0] = -1\n elif phase == 2:\n sample_data[i, 0] = 0\n elif phase == 3:\n sample_data[i, 0] = 1\n test_wav_path = os.path.join(tmp_dir, \"test_wav.wav\")\n input_data.save_wav_file(test_wav_path, sample_data, 16000)\n\n results = audio_processor.get_features_for_wav(test_wav_path,\n model_settings, sess)\n spectrogram = results[0]\n self.assertEqual(1, spectrogram.shape[0])\n self.assertEqual(16, spectrogram.shape[1])\n self.assertEqual(11, spectrogram.shape[2])\n self.assertNear(0, spectrogram[0, 0, 0], 0.1)\n self.assertNear(200, spectrogram[0, 0, 5], 0.1)\n\n def testGetFeaturesRange(self):\n model_settings = {\n \"preprocess\": \"average\",\n }\n features_min, _ = input_data.get_features_range(model_settings)\n self.assertNear(0.0, features_min, 1e-5)\n\n def testGetMfccFeaturesRange(self):\n model_settings = {\n \"preprocess\": \"mfcc\",\n }\n features_min, features_max = input_data.get_features_range(model_settings)\n self.assertLess(features_min, features_max)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The Logistic distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops.distributions import distribution\nfrom tensorflow.python.util import deprecation\n\n\nclass Logistic(distribution.Distribution):\n \"\"\"The Logistic distribution with location `loc` and `scale` parameters.\n\n #### Mathematical details\n\n The cumulative density function of this distribution is:\n\n ```none\n cdf(x; mu, sigma) = 1 / (1 + exp(-(x - mu) / sigma))\n ```\n\n where `loc = mu` and `scale = sigma`.\n\n The Logistic distribution is a member of the [location-scale family](\n https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be\n constructed as,\n\n ```none\n X ~ Logistic(loc=0, scale=1)\n Y = loc + scale * X\n ```\n\n #### Examples\n\n Examples of initialization of one or a batch of distributions.\n\n ```python\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n # Define a single scalar Logistic distribution.\n dist = tfd.Logistic(loc=0., scale=3.)\n\n # Evaluate the cdf at 1, returning a scalar.\n dist.cdf(1.)\n\n # Define a batch of two scalar valued Logistics.\n # The first has mean 1 and scale 11, the second 2 and 22.\n dist = tfd.Logistic(loc=[1, 2.], scale=[11, 22.])\n\n # Evaluate the pdf of the first distribution on 0, and the second on 1.5,\n # returning a length two tensor.\n dist.prob([0, 1.5])\n\n # Get 3 samples, returning a 3 x 2 tensor.\n dist.sample([3])\n\n # Arguments are broadcast when possible.\n # Define a batch of two scalar valued Logistics.\n # Both have mean 1, but different scales.\n dist = tfd.Logistic(loc=1., scale=[11, 22.])\n\n # Evaluate the pdf of both distributions on the same point, 3.0,\n # returning a length 2 tensor.\n dist.prob(3.0)\n ```\n\n \"\"\"\n\n @deprecation.deprecated(\n \"2018-10-01\",\n \"The TensorFlow Distributions library has moved to \"\n \"TensorFlow Probability \"\n \"(https://github.com/tensorflow/probability). You \"\n \"should update all references to use `tfp.distributions` \"\n \"instead of `tf.contrib.distributions`.\",\n warn_once=True)\n def __init__(self,\n loc,\n scale,\n validate_args=False,\n allow_nan_stats=True,\n name=\"Logistic\"):\n \"\"\"Construct Logistic distributions with mean and scale `loc` and `scale`.\n\n The parameters `loc` and `scale` must be shaped in a way that supports\n broadcasting (e.g. `loc + scale` is a valid operation).\n\n Args:\n loc: Floating point tensor, the means of the distribution(s).\n scale: Floating point tensor, the scales of the distribution(s). Must\n contain only positive values.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: The name to give Ops created by the initializer.\n\n Raises:\n TypeError: if loc and scale are different dtypes.\n \"\"\"\n parameters = dict(locals())\n with ops.name_scope(name, values=[loc, scale]) as name:\n with ops.control_dependencies([check_ops.assert_positive(scale)] if\n validate_args else []):\n self._loc = array_ops.identity(loc, name=\"loc\")\n self._scale = array_ops.identity(scale, name=\"scale\")\n check_ops.assert_same_float_dtype([self._loc, self._scale])\n super(Logistic, self).__init__(\n dtype=self._scale.dtype,\n reparameterization_type=distribution.FULLY_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=[self._loc, self._scale],\n name=name)\n\n @staticmethod\n def _param_shapes(sample_shape):\n return dict(\n zip((\"loc\", \"scale\"), ([ops.convert_to_tensor(\n sample_shape, dtype=dtypes.int32)] * 2)))\n\n @property\n def loc(self):\n \"\"\"Distribution parameter for the location.\"\"\"\n return self._loc\n\n @property\n def scale(self):\n \"\"\"Distribution parameter for scale.\"\"\"\n return self._scale\n\n def _batch_shape_tensor(self):\n return array_ops.broadcast_dynamic_shape(\n array_ops.shape(self.loc), array_ops.shape(self.scale))\n\n def _batch_shape(self):\n return array_ops.broadcast_static_shape(\n self.loc.get_shape(), self.scale.get_shape())\n\n def _event_shape_tensor(self):\n return constant_op.constant([], dtype=dtypes.int32)\n\n def _event_shape(self):\n return tensor_shape.TensorShape([])\n\n def _sample_n(self, n, seed=None):\n # Uniform variates must be sampled from the open-interval `(0, 1)` rather\n # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`\n # because it is the smallest, positive, \"normal\" number. A \"normal\" number\n # is such that the mantissa has an implicit leading 1. Normal, positive\n # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In\n # this case, a subnormal number (i.e., np.nextafter) can cause us to sample\n # 0.\n uniform = random_ops.random_uniform(\n shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),\n minval=np.finfo(self.dtype.as_numpy_dtype).tiny,\n maxval=1.,\n dtype=self.dtype,\n seed=seed)\n sampled = math_ops.log(uniform) - math_ops.log1p(-1. * uniform)\n return sampled * self.scale + self.loc\n\n def _log_prob(self, x):\n return self._log_unnormalized_prob(x) - self._log_normalization()\n\n def _log_cdf(self, x):\n return -nn_ops.softplus(-self._z(x))\n\n def _cdf(self, x):\n return math_ops.sigmoid(self._z(x))\n\n def _log_survival_function(self, x):\n return -nn_ops.softplus(self._z(x))\n\n def _survival_function(self, x):\n return math_ops.sigmoid(-self._z(x))\n\n def _log_unnormalized_prob(self, x):\n z = self._z(x)\n return - z - 2. * nn_ops.softplus(-z)\n\n def _log_normalization(self):\n return math_ops.log(self.scale)\n\n def _entropy(self):\n # Use broadcasting rules to calculate the full broadcast sigma.\n scale = self.scale * array_ops.ones_like(self.loc)\n return 2 + math_ops.log(scale)\n\n def _mean(self):\n return self.loc * array_ops.ones_like(self.scale)\n\n def _stddev(self):\n return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(3)\n\n def _mode(self):\n return self._mean()\n\n def _z(self, x):\n \"\"\"Standardize input `x` to a unit logistic.\"\"\"\n with ops.name_scope(\"standardize\", values=[x]):\n return (x - self.loc) / self.scale\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Integration test for sequence feature columns with SequenceExamples.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport string\nimport tempfile\n\nfrom google.protobuf import text_format\n\nfrom tensorflow.core.example import example_pb2\nfrom tensorflow.core.example import feature_pb2\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.feature_column import dense_features\nfrom tensorflow.python.feature_column import feature_column_v2 as fc\nfrom tensorflow.python.feature_column import sequence_feature_column as sfc\nfrom tensorflow.python.keras.layers import recurrent\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import compat\n\n\nclass SequenceFeatureColumnIntegrationTest(test.TestCase):\n\n def _make_sequence_example(self):\n example = example_pb2.SequenceExample()\n example.context.feature['int_ctx'].int64_list.value.extend([5])\n example.context.feature['float_ctx'].float_list.value.extend([123.6])\n for val in range(0, 10, 2):\n feat = feature_pb2.Feature()\n feat.int64_list.value.extend([val] * val)\n example.feature_lists.feature_list['int_list'].feature.extend([feat])\n for val in range(1, 11, 2):\n feat = feature_pb2.Feature()\n feat.bytes_list.value.extend([compat.as_bytes(str(val))] * val)\n example.feature_lists.feature_list['str_list'].feature.extend([feat])\n\n return example\n\n def _build_feature_columns(self):\n col = fc.categorical_column_with_identity('int_ctx', num_buckets=100)\n ctx_cols = [\n fc.embedding_column(col, dimension=10),\n fc.numeric_column('float_ctx')\n ]\n\n identity_col = sfc.sequence_categorical_column_with_identity(\n 'int_list', num_buckets=10)\n bucket_col = sfc.sequence_categorical_column_with_hash_bucket(\n 'bytes_list', hash_bucket_size=100)\n seq_cols = [\n fc.embedding_column(identity_col, dimension=10),\n fc.embedding_column(bucket_col, dimension=20)\n ]\n\n return ctx_cols, seq_cols\n\n def test_sequence_example_into_input_layer(self):\n examples = [_make_sequence_example().SerializeToString()] * 100\n ctx_cols, seq_cols = self._build_feature_columns()\n\n def _parse_example(example):\n ctx, seq = parsing_ops.parse_single_sequence_example(\n example,\n context_features=fc.make_parse_example_spec_v2(ctx_cols),\n sequence_features=fc.make_parse_example_spec_v2(seq_cols))\n ctx.update(seq)\n return ctx\n\n ds = dataset_ops.Dataset.from_tensor_slices(examples)\n ds = ds.map(_parse_example)\n ds = ds.batch(20)\n\n # Test on a single batch\n features = ds.make_one_shot_iterator().get_next()\n\n # Tile the context features across the sequence features\n sequence_input_layer = sfc.SequenceFeatures(seq_cols)\n seq_layer, _ = sequence_input_layer(features)\n input_layer = dense_features.DenseFeatures(ctx_cols)\n ctx_layer = input_layer(features)\n input_layer = sfc.concatenate_context_input(ctx_layer, seq_layer)\n\n rnn_layer = recurrent.RNN(recurrent.SimpleRNNCell(10))\n output = rnn_layer(input_layer)\n\n with self.cached_session() as sess:\n sess.run(variables.global_variables_initializer())\n features_r = sess.run(features)\n self.assertAllEqual(features_r['int_list'].dense_shape, [20, 3, 6])\n\n output_r = sess.run(output)\n self.assertAllEqual(output_r.shape, [20, 10])\n\n\nclass SequenceExampleParsingTest(test.TestCase):\n\n def test_seq_ex_in_sequence_categorical_column_with_identity(self):\n self._test_parsed_sequence_example(\n 'int_list', sfc.sequence_categorical_column_with_identity,\n 10, [3, 6], [2, 4, 6])\n\n def test_seq_ex_in_sequence_categorical_column_with_hash_bucket(self):\n self._test_parsed_sequence_example(\n 'bytes_list', sfc.sequence_categorical_column_with_hash_bucket,\n 10, [3, 4], [compat.as_bytes(x) for x in 'acg'])\n\n def test_seq_ex_in_sequence_categorical_column_with_vocabulary_list(self):\n self._test_parsed_sequence_example(\n 'bytes_list', sfc.sequence_categorical_column_with_vocabulary_list,\n list(string.ascii_lowercase), [3, 4],\n [compat.as_bytes(x) for x in 'acg'])\n\n def test_seq_ex_in_sequence_categorical_column_with_vocabulary_file(self):\n _, fname = tempfile.mkstemp()\n with open(fname, 'w') as f:\n f.write(string.ascii_lowercase)\n self._test_parsed_sequence_example(\n 'bytes_list', sfc.sequence_categorical_column_with_vocabulary_file,\n fname, [3, 4], [compat.as_bytes(x) for x in 'acg'])\n\n def _test_parsed_sequence_example(\n self, col_name, col_fn, col_arg, shape, values):\n \"\"\"Helper function to check that each FeatureColumn parses correctly.\n\n Args:\n col_name: string, name to give to the feature column. Should match\n the name that the column will parse out of the features dict.\n col_fn: function used to create the feature column. For example,\n sequence_numeric_column.\n col_arg: second arg that the target feature column is expecting.\n shape: the expected dense_shape of the feature after parsing into\n a SparseTensor.\n values: the expected values at index [0, 2, 6] of the feature\n after parsing into a SparseTensor.\n \"\"\"\n example = _make_sequence_example()\n columns = [\n fc.categorical_column_with_identity('int_ctx', num_buckets=100),\n fc.numeric_column('float_ctx'),\n col_fn(col_name, col_arg)\n ]\n context, seq_features = parsing_ops.parse_single_sequence_example(\n example.SerializeToString(),\n context_features=fc.make_parse_example_spec_v2(columns[:2]),\n sequence_features=fc.make_parse_example_spec_v2(columns[2:]))\n\n with self.cached_session() as sess:\n ctx_result, seq_result = sess.run([context, seq_features])\n self.assertEqual(list(seq_result[col_name].dense_shape), shape)\n self.assertEqual(\n list(seq_result[col_name].values[[0, 2, 6]]), values)\n self.assertEqual(list(ctx_result['int_ctx'].dense_shape), [1])\n self.assertEqual(ctx_result['int_ctx'].values[0], 5)\n self.assertEqual(list(ctx_result['float_ctx'].shape), [1])\n self.assertAlmostEqual(ctx_result['float_ctx'][0], 123.6, places=1)\n\n\n_SEQ_EX_PROTO = \"\"\"\ncontext {\n feature {\n key: \"float_ctx\"\n value {\n float_list {\n value: 123.6\n }\n }\n }\n feature {\n key: \"int_ctx\"\n value {\n int64_list {\n value: 5\n }\n }\n }\n}\nfeature_lists {\n feature_list {\n key: \"bytes_list\"\n value {\n feature {\n bytes_list {\n value: \"a\"\n }\n }\n feature {\n bytes_list {\n value: \"b\"\n value: \"c\"\n }\n }\n feature {\n bytes_list {\n value: \"d\"\n value: \"e\"\n value: \"f\"\n value: \"g\"\n }\n }\n }\n }\n feature_list {\n key: \"float_list\"\n value {\n feature {\n float_list {\n value: 1.0\n }\n }\n feature {\n float_list {\n value: 3.0\n value: 3.0\n value: 3.0\n }\n }\n feature {\n float_list {\n value: 5.0\n value: 5.0\n value: 5.0\n value: 5.0\n value: 5.0\n }\n }\n }\n }\n feature_list {\n key: \"int_list\"\n value {\n feature {\n int64_list {\n value: 2\n value: 2\n }\n }\n feature {\n int64_list {\n value: 4\n value: 4\n value: 4\n value: 4\n }\n }\n feature {\n int64_list {\n value: 6\n value: 6\n value: 6\n value: 6\n value: 6\n value: 6\n }\n }\n }\n }\n}\n\"\"\"\n\n\ndef _make_sequence_example():\n example = example_pb2.SequenceExample()\n return text_format.Parse(_SEQ_EX_PROTO, example)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Experimental shuffle ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.util import random_seed\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import gen_dataset_ops\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export\n\n\nclass _ShuffleAndRepeatDataset(dataset_ops.UnaryUnchangedStructureDataset):\n \"\"\"A `Dataset` that fuses `shuffle` and `repeat`.\"\"\"\n\n def __init__(self, input_dataset, buffer_size, count=None, seed=None):\n self._input_dataset = input_dataset\n self._buffer_size = ops.convert_to_tensor(\n buffer_size, dtype=dtypes.int64, name=\"buffer_size\")\n if count is None:\n self._count = constant_op.constant(-1, dtype=dtypes.int64, name=\"count\")\n else:\n self._count = ops.convert_to_tensor(\n count, dtype=dtypes.int64, name=\"count\")\n self._seed, self._seed2 = random_seed.get_seed(seed)\n variant_tensor = gen_dataset_ops.shuffle_and_repeat_dataset(\n self._input_dataset._variant_tensor, # pylint: disable=protected-access\n buffer_size=self._buffer_size,\n count=self._count,\n seed=self._seed,\n seed2=self._seed2,\n **self._flat_structure)\n super(_ShuffleAndRepeatDataset, self).__init__(input_dataset,\n variant_tensor)\n\n\[email protected](\n None,\n \"Use `tf.data.Dataset.shuffle(buffer_size, seed)` followed by \"\n \"`tf.data.Dataset.repeat(count)`. Static tf.data optimizations will take \"\n \"care of using the fused implementation.\")\n@tf_export(\"data.experimental.shuffle_and_repeat\")\ndef shuffle_and_repeat(buffer_size, count=None, seed=None):\n \"\"\"Shuffles and repeats a Dataset returning a new permutation for each epoch.\n\n `dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size, count))`\n\n is equivalent to\n\n `dataset.shuffle(buffer_size, reshuffle_each_iteration=True).repeat(count)`\n\n The difference is that the latter dataset is not serializable. So,\n if you need to checkpoint an input pipeline with reshuffling you must use\n this implementation.\n\n Args:\n buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the\n maximum number elements that will be buffered when prefetching.\n count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\n number of times the dataset should be repeated. The default behavior\n (if `count` is `None` or `-1`) is for the dataset be repeated\n indefinitely.\n seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the\n random seed that will be used to create the distribution. See\n `tf.compat.v1.set_random_seed` for behavior.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n `tf.data.Dataset.apply`.\n \"\"\"\n\n def _apply_fn(dataset): # pylint: disable=missing-docstring\n return _ShuffleAndRepeatDataset(dataset, buffer_size, count, seed)\n\n return _apply_fn\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"`LinearOperator` acting like the identity matrix.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.linalg import linalg_impl as linalg\nfrom tensorflow.python.ops.linalg import linear_operator\nfrom tensorflow.python.ops.linalg import linear_operator_util\nfrom tensorflow.python.util.tf_export import tf_export\n\n__all__ = [\n \"LinearOperatorIdentity\",\n \"LinearOperatorScaledIdentity\",\n]\n\n\nclass BaseLinearOperatorIdentity(linear_operator.LinearOperator):\n \"\"\"Base class for Identity operators.\"\"\"\n\n def _check_num_rows_possibly_add_asserts(self):\n \"\"\"Static check of init arg `num_rows`, possibly add asserts.\"\"\"\n # Possibly add asserts.\n if self._assert_proper_shapes:\n self._num_rows = control_flow_ops.with_dependencies([\n check_ops.assert_rank(\n self._num_rows,\n 0,\n message=\"Argument num_rows must be a 0-D Tensor.\"),\n check_ops.assert_non_negative(\n self._num_rows,\n message=\"Argument num_rows must be non-negative.\"),\n ], self._num_rows)\n\n # Static checks.\n if not self._num_rows.dtype.is_integer:\n raise TypeError(\"Argument num_rows must be integer type. Found:\"\n \" %s\" % self._num_rows)\n\n num_rows_static = self._num_rows_static\n\n if num_rows_static is None:\n return # Cannot do any other static checks.\n\n if num_rows_static.ndim != 0:\n raise ValueError(\"Argument num_rows must be a 0-D Tensor. Found:\"\n \" %s\" % num_rows_static)\n\n if num_rows_static < 0:\n raise ValueError(\"Argument num_rows must be non-negative. Found:\"\n \" %s\" % num_rows_static)\n\n def _min_matrix_dim(self):\n \"\"\"Minimum of domain/range dimension, if statically available, else None.\"\"\"\n domain_dim = tensor_shape.dimension_value(self.domain_dimension)\n range_dim = tensor_shape.dimension_value(self.range_dimension)\n if domain_dim is None or range_dim is None:\n return None\n return min(domain_dim, range_dim)\n\n def _min_matrix_dim_tensor(self):\n \"\"\"Minimum of domain/range dimension, as a tensor.\"\"\"\n return math_ops.reduce_min(self.shape_tensor()[-2:])\n\n def _ones_diag(self):\n \"\"\"Returns the diagonal of this operator as all ones.\"\"\"\n if self.shape.is_fully_defined():\n d_shape = self.batch_shape.concatenate([self._min_matrix_dim()])\n else:\n d_shape = array_ops.concat(\n [self.batch_shape_tensor(),\n [self._min_matrix_dim_tensor()]], axis=0)\n\n return array_ops.ones(shape=d_shape, dtype=self.dtype)\n\n\n@tf_export(\"linalg.LinearOperatorIdentity\")\nclass LinearOperatorIdentity(BaseLinearOperatorIdentity):\n \"\"\"`LinearOperator` acting like a [batch] square identity matrix.\n\n This operator acts like a [batch] identity matrix `A` with shape\n `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a\n batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is\n an `N x N` matrix. This matrix `A` is not materialized, but for\n purposes of broadcasting this shape will be relevant.\n\n `LinearOperatorIdentity` is initialized with `num_rows`, and optionally\n `batch_shape`, and `dtype` arguments. If `batch_shape` is `None`, this\n operator efficiently passes through all arguments. If `batch_shape` is\n provided, broadcasting may occur, which will require making copies.\n\n ```python\n # Create a 2 x 2 identity matrix.\n operator = LinearOperatorIdentity(num_rows=2, dtype=tf.float32)\n\n operator.to_dense()\n ==> [[1., 0.]\n [0., 1.]]\n\n operator.shape\n ==> [2, 2]\n\n operator.log_abs_determinant()\n ==> 0.\n\n x = ... Shape [2, 4] Tensor\n operator.matmul(x)\n ==> Shape [2, 4] Tensor, same as x.\n\n y = tf.random.normal(shape=[3, 2, 4])\n # Note that y.shape is compatible with operator.shape because operator.shape\n # is broadcast to [3, 2, 2].\n # This broadcast does NOT require copying data, since we can infer that y\n # will be passed through without changing shape. We are always able to infer\n # this if the operator has no batch_shape.\n x = operator.solve(y)\n ==> Shape [3, 2, 4] Tensor, same as y.\n\n # Create a 2-batch of 2x2 identity matrices\n operator = LinearOperatorIdentity(num_rows=2, batch_shape=[2])\n operator.to_dense()\n ==> [[[1., 0.]\n [0., 1.]],\n [[1., 0.]\n [0., 1.]]]\n\n # Here, even though the operator has a batch shape, the input is the same as\n # the output, so x can be passed through without a copy. The operator is able\n # to detect that no broadcast is necessary because both x and the operator\n # have statically defined shape.\n x = ... Shape [2, 2, 3]\n operator.matmul(x)\n ==> Shape [2, 2, 3] Tensor, same as x\n\n # Here the operator and x have different batch_shape, and are broadcast.\n # This requires a copy, since the output is different size than the input.\n x = ... Shape [1, 2, 3]\n operator.matmul(x)\n ==> Shape [2, 2, 3] Tensor, equal to [x, x]\n ```\n\n ### Shape compatibility\n\n This operator acts on [batch] matrix with compatible shape.\n `x` is a batch matrix with compatible shape for `matmul` and `solve` if\n\n ```\n operator.shape = [B1,...,Bb] + [N, N], with b >= 0\n x.shape = [C1,...,Cc] + [N, R],\n and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]\n ```\n\n ### Performance\n\n If `batch_shape` initialization arg is `None`:\n\n * `operator.matmul(x)` is `O(1)`\n * `operator.solve(x)` is `O(1)`\n * `operator.determinant()` is `O(1)`\n\n If `batch_shape` initialization arg is provided, and static checks cannot\n rule out the need to broadcast:\n\n * `operator.matmul(x)` is `O(D1*...*Dd*N*R)`\n * `operator.solve(x)` is `O(D1*...*Dd*N*R)`\n * `operator.determinant()` is `O(B1*...*Bb)`\n\n #### Matrix property hints\n\n This `LinearOperator` is initialized with boolean flags of the form `is_X`,\n for `X = non_singular, self_adjoint, positive_definite, square`.\n These have the following meaning:\n\n * If `is_X == True`, callers should expect the operator to have the\n property `X`. This is a promise that should be fulfilled, but is *not* a\n runtime assert. For example, finite floating point precision may result\n in these promises being violated.\n * If `is_X == False`, callers should expect the operator to not have `X`.\n * If `is_X == None` (the default), callers should have no expectation either\n way.\n \"\"\"\n\n def __init__(self,\n num_rows,\n batch_shape=None,\n dtype=None,\n is_non_singular=True,\n is_self_adjoint=True,\n is_positive_definite=True,\n is_square=True,\n assert_proper_shapes=False,\n name=\"LinearOperatorIdentity\"):\n r\"\"\"Initialize a `LinearOperatorIdentity`.\n\n The `LinearOperatorIdentity` is initialized with arguments defining `dtype`\n and shape.\n\n This operator is able to broadcast the leading (batch) dimensions, which\n sometimes requires copying data. If `batch_shape` is `None`, the operator\n can take arguments of any batch shape without copying. See examples.\n\n Args:\n num_rows: Scalar non-negative integer `Tensor`. Number of rows in the\n corresponding identity matrix.\n batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading\n dimensions. If `None`, this operator has no leading dimensions.\n dtype: Data type of the matrix that this operator represents.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n assert_proper_shapes: Python `bool`. If `False`, only perform static\n checks that initialization and method arguments have proper shape.\n If `True`, and static checks are inconclusive, add asserts to the graph.\n name: A name for this `LinearOperator`\n\n Raises:\n ValueError: If `num_rows` is determined statically to be non-scalar, or\n negative.\n ValueError: If `batch_shape` is determined statically to not be 1-D, or\n negative.\n ValueError: If any of the following is not `True`:\n `{is_self_adjoint, is_non_singular, is_positive_definite}`.\n TypeError: If `num_rows` or `batch_shape` is ref-type (e.g. Variable).\n \"\"\"\n dtype = dtype or dtypes.float32\n self._assert_proper_shapes = assert_proper_shapes\n\n with ops.name_scope(name):\n dtype = dtypes.as_dtype(dtype)\n if not is_self_adjoint:\n raise ValueError(\"An identity operator is always self adjoint.\")\n if not is_non_singular:\n raise ValueError(\"An identity operator is always non-singular.\")\n if not is_positive_definite:\n raise ValueError(\"An identity operator is always positive-definite.\")\n if not is_square:\n raise ValueError(\"An identity operator is always square.\")\n\n super(LinearOperatorIdentity, self).__init__(\n dtype=dtype,\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n\n linear_operator_util.assert_not_ref_type(num_rows, \"num_rows\")\n linear_operator_util.assert_not_ref_type(batch_shape, \"batch_shape\")\n\n self._num_rows = linear_operator_util.shape_tensor(\n num_rows, name=\"num_rows\")\n self._num_rows_static = tensor_util.constant_value(self._num_rows)\n self._check_num_rows_possibly_add_asserts()\n\n if batch_shape is None:\n self._batch_shape_arg = None\n else:\n self._batch_shape_arg = linear_operator_util.shape_tensor(\n batch_shape, name=\"batch_shape_arg\")\n self._batch_shape_static = tensor_util.constant_value(\n self._batch_shape_arg)\n self._check_batch_shape_possibly_add_asserts()\n\n def _shape(self):\n matrix_shape = tensor_shape.TensorShape((self._num_rows_static,\n self._num_rows_static))\n if self._batch_shape_arg is None:\n return matrix_shape\n\n batch_shape = tensor_shape.TensorShape(self._batch_shape_static)\n return batch_shape.concatenate(matrix_shape)\n\n def _shape_tensor(self):\n matrix_shape = array_ops.stack((self._num_rows, self._num_rows), axis=0)\n if self._batch_shape_arg is None:\n return matrix_shape\n\n return array_ops.concat((self._batch_shape_arg, matrix_shape), 0)\n\n def _assert_non_singular(self):\n return control_flow_ops.no_op(\"assert_non_singular\")\n\n def _assert_positive_definite(self):\n return control_flow_ops.no_op(\"assert_positive_definite\")\n\n def _assert_self_adjoint(self):\n return control_flow_ops.no_op(\"assert_self_adjoint\")\n\n def _possibly_broadcast_batch_shape(self, x):\n \"\"\"Return 'x', possibly after broadcasting the leading dimensions.\"\"\"\n # If we have no batch shape, our batch shape broadcasts with everything!\n if self._batch_shape_arg is None:\n return x\n\n # Static attempt:\n # If we determine that no broadcast is necessary, pass x through\n # If we need a broadcast, add to an array of zeros.\n #\n # special_shape is the shape that, when broadcast with x's shape, will give\n # the correct broadcast_shape. Note that\n # We have already verified the second to last dimension of self.shape\n # matches x's shape in assert_compatible_matrix_dimensions.\n # Also, the final dimension of 'x' can have any shape.\n # Therefore, the final two dimensions of special_shape are 1's.\n special_shape = self.batch_shape.concatenate([1, 1])\n bshape = array_ops.broadcast_static_shape(x.shape, special_shape)\n if special_shape.is_fully_defined():\n # bshape.is_fully_defined iff special_shape.is_fully_defined.\n if bshape == x.shape:\n return x\n # Use the built in broadcasting of addition.\n zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)\n return x + zeros\n\n # Dynamic broadcast:\n # Always add to an array of zeros, rather than using a \"cond\", since a\n # cond would require copying data from GPU --> CPU.\n special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0)\n zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)\n return x + zeros\n\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n # Note that adjoint has no effect since this matrix is self-adjoint.\n x = linalg.adjoint(x) if adjoint_arg else x\n if self._assert_proper_shapes:\n aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)\n x = control_flow_ops.with_dependencies([aps], x)\n return self._possibly_broadcast_batch_shape(x)\n\n def _determinant(self):\n return array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype)\n\n def _log_abs_determinant(self):\n return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)\n\n def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n return self._matmul(rhs, adjoint_arg=adjoint_arg)\n\n def _trace(self):\n # Get Tensor of all ones of same shape as self.batch_shape.\n if self.batch_shape.is_fully_defined():\n batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype)\n else:\n batch_of_ones = array_ops.ones(\n shape=self.batch_shape_tensor(), dtype=self.dtype)\n\n if self._min_matrix_dim() is not None:\n return self._min_matrix_dim() * batch_of_ones\n else:\n return (math_ops.cast(self._min_matrix_dim_tensor(), self.dtype) *\n batch_of_ones)\n\n def _diag_part(self):\n return self._ones_diag()\n\n def add_to_tensor(self, mat, name=\"add_to_tensor\"):\n \"\"\"Add matrix represented by this operator to `mat`. Equiv to `I + mat`.\n\n Args:\n mat: `Tensor` with same `dtype` and shape broadcastable to `self`.\n name: A name to give this `Op`.\n\n Returns:\n A `Tensor` with broadcast shape and same `dtype` as `self`.\n \"\"\"\n with self._name_scope(name):\n mat = ops.convert_to_tensor(mat, name=\"mat\")\n mat_diag = array_ops.matrix_diag_part(mat)\n new_diag = 1 + mat_diag\n return array_ops.matrix_set_diag(mat, new_diag)\n\n def _check_num_rows_possibly_add_asserts(self):\n \"\"\"Static check of init arg `num_rows`, possibly add asserts.\"\"\"\n # Possibly add asserts.\n if self._assert_proper_shapes:\n self._num_rows = control_flow_ops.with_dependencies([\n check_ops.assert_rank(\n self._num_rows,\n 0,\n message=\"Argument num_rows must be a 0-D Tensor.\"),\n check_ops.assert_non_negative(\n self._num_rows,\n message=\"Argument num_rows must be non-negative.\"),\n ], self._num_rows)\n\n # Static checks.\n if not self._num_rows.dtype.is_integer:\n raise TypeError(\"Argument num_rows must be integer type. Found:\"\n \" %s\" % self._num_rows)\n\n num_rows_static = self._num_rows_static\n\n if num_rows_static is None:\n return # Cannot do any other static checks.\n\n if num_rows_static.ndim != 0:\n raise ValueError(\"Argument num_rows must be a 0-D Tensor. Found:\"\n \" %s\" % num_rows_static)\n\n if num_rows_static < 0:\n raise ValueError(\"Argument num_rows must be non-negative. Found:\"\n \" %s\" % num_rows_static)\n\n def _check_batch_shape_possibly_add_asserts(self):\n \"\"\"Static check of init arg `batch_shape`, possibly add asserts.\"\"\"\n if self._batch_shape_arg is None:\n return\n\n # Possibly add asserts\n if self._assert_proper_shapes:\n self._batch_shape_arg = control_flow_ops.with_dependencies([\n check_ops.assert_rank(\n self._batch_shape_arg,\n 1,\n message=\"Argument batch_shape must be a 1-D Tensor.\"),\n check_ops.assert_non_negative(\n self._batch_shape_arg,\n message=\"Argument batch_shape must be non-negative.\"),\n ], self._batch_shape_arg)\n\n # Static checks\n if not self._batch_shape_arg.dtype.is_integer:\n raise TypeError(\"Argument batch_shape must be integer type. Found:\"\n \" %s\" % self._batch_shape_arg)\n\n if self._batch_shape_static is None:\n return # Cannot do any other static checks.\n\n if self._batch_shape_static.ndim != 1:\n raise ValueError(\"Argument batch_shape must be a 1-D Tensor. Found:\"\n \" %s\" % self._batch_shape_static)\n\n if np.any(self._batch_shape_static < 0):\n raise ValueError(\"Argument batch_shape must be non-negative. Found:\"\n \"%s\" % self._batch_shape_static)\n\n\n@tf_export(\"linalg.LinearOperatorScaledIdentity\")\nclass LinearOperatorScaledIdentity(BaseLinearOperatorIdentity):\n \"\"\"`LinearOperator` acting like a scaled [batch] identity matrix `A = c I`.\n\n This operator acts like a scaled [batch] identity matrix `A` with shape\n `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a\n batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is\n a scaled version of the `N x N` identity matrix.\n\n `LinearOperatorIdentity` is initialized with `num_rows`, and a `multiplier`\n (a `Tensor`) of shape `[B1,...,Bb]`. `N` is set to `num_rows`, and the\n `multiplier` determines the scale for each batch member.\n\n ```python\n # Create a 2 x 2 scaled identity matrix.\n operator = LinearOperatorIdentity(num_rows=2, multiplier=3.)\n\n operator.to_dense()\n ==> [[3., 0.]\n [0., 3.]]\n\n operator.shape\n ==> [2, 2]\n\n operator.log_abs_determinant()\n ==> 2 * Log[3]\n\n x = ... Shape [2, 4] Tensor\n operator.matmul(x)\n ==> 3 * x\n\n y = tf.random.normal(shape=[3, 2, 4])\n # Note that y.shape is compatible with operator.shape because operator.shape\n # is broadcast to [3, 2, 2].\n x = operator.solve(y)\n ==> 3 * x\n\n # Create a 2-batch of 2x2 identity matrices\n operator = LinearOperatorIdentity(num_rows=2, multiplier=5.)\n operator.to_dense()\n ==> [[[5., 0.]\n [0., 5.]],\n [[5., 0.]\n [0., 5.]]]\n\n x = ... Shape [2, 2, 3]\n operator.matmul(x)\n ==> 5 * x\n\n # Here the operator and x have different batch_shape, and are broadcast.\n x = ... Shape [1, 2, 3]\n operator.matmul(x)\n ==> 5 * x\n ```\n\n ### Shape compatibility\n\n This operator acts on [batch] matrix with compatible shape.\n `x` is a batch matrix with compatible shape for `matmul` and `solve` if\n\n ```\n operator.shape = [B1,...,Bb] + [N, N], with b >= 0\n x.shape = [C1,...,Cc] + [N, R],\n and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]\n ```\n\n ### Performance\n\n * `operator.matmul(x)` is `O(D1*...*Dd*N*R)`\n * `operator.solve(x)` is `O(D1*...*Dd*N*R)`\n * `operator.determinant()` is `O(D1*...*Dd)`\n\n #### Matrix property hints\n\n This `LinearOperator` is initialized with boolean flags of the form `is_X`,\n for `X = non_singular, self_adjoint, positive_definite, square`.\n These have the following meaning\n * If `is_X == True`, callers should expect the operator to have the\n property `X`. This is a promise that should be fulfilled, but is *not* a\n runtime assert. For example, finite floating point precision may result\n in these promises being violated.\n * If `is_X == False`, callers should expect the operator to not have `X`.\n * If `is_X == None` (the default), callers should have no expectation either\n way.\n \"\"\"\n\n def __init__(self,\n num_rows,\n multiplier,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=True,\n assert_proper_shapes=False,\n name=\"LinearOperatorScaledIdentity\"):\n r\"\"\"Initialize a `LinearOperatorScaledIdentity`.\n\n The `LinearOperatorScaledIdentity` is initialized with `num_rows`, which\n determines the size of each identity matrix, and a `multiplier`,\n which defines `dtype`, batch shape, and scale of each matrix.\n\n This operator is able to broadcast the leading (batch) dimensions.\n\n Args:\n num_rows: Scalar non-negative integer `Tensor`. Number of rows in the\n corresponding identity matrix.\n multiplier: `Tensor` of shape `[B1,...,Bb]`, or `[]` (a scalar).\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n assert_proper_shapes: Python `bool`. If `False`, only perform static\n checks that initialization and method arguments have proper shape.\n If `True`, and static checks are inconclusive, add asserts to the graph.\n name: A name for this `LinearOperator`\n\n Raises:\n ValueError: If `num_rows` is determined statically to be non-scalar, or\n negative.\n \"\"\"\n self._assert_proper_shapes = assert_proper_shapes\n\n with ops.name_scope(name, values=[multiplier, num_rows]):\n self._multiplier = linear_operator_util.convert_nonref_to_tensor(\n multiplier, name=\"multiplier\")\n\n # Check and auto-set hints.\n if not self._multiplier.dtype.is_complex:\n if is_self_adjoint is False: # pylint: disable=g-bool-id-comparison\n raise ValueError(\"A real diagonal operator is always self adjoint.\")\n else:\n is_self_adjoint = True\n\n if not is_square:\n raise ValueError(\"A ScaledIdentity operator is always square.\")\n\n linear_operator_util.assert_not_ref_type(num_rows, \"num_rows\")\n\n super(LinearOperatorScaledIdentity, self).__init__(\n dtype=self._multiplier.dtype.base_dtype,\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n\n self._num_rows = linear_operator_util.shape_tensor(\n num_rows, name=\"num_rows\")\n self._num_rows_static = tensor_util.constant_value(self._num_rows)\n self._check_num_rows_possibly_add_asserts()\n self._num_rows_cast_to_dtype = math_ops.cast(self._num_rows, self.dtype)\n self._num_rows_cast_to_real_dtype = math_ops.cast(self._num_rows,\n self.dtype.real_dtype)\n\n def _shape(self):\n matrix_shape = tensor_shape.TensorShape((self._num_rows_static,\n self._num_rows_static))\n\n batch_shape = self.multiplier.shape\n return batch_shape.concatenate(matrix_shape)\n\n def _shape_tensor(self):\n matrix_shape = array_ops.stack((self._num_rows, self._num_rows), axis=0)\n\n batch_shape = array_ops.shape(self.multiplier)\n return array_ops.concat((batch_shape, matrix_shape), 0)\n\n def _assert_non_singular(self):\n return check_ops.assert_positive(\n math_ops.abs(self.multiplier), message=\"LinearOperator was singular\")\n\n def _assert_positive_definite(self):\n return check_ops.assert_positive(\n math_ops.real(self.multiplier),\n message=\"LinearOperator was not positive definite.\")\n\n def _assert_self_adjoint(self):\n imag_multiplier = math_ops.imag(self.multiplier)\n return check_ops.assert_equal(\n array_ops.zeros_like(imag_multiplier),\n imag_multiplier,\n message=\"LinearOperator was not self-adjoint\")\n\n def _make_multiplier_matrix(self, conjugate=False):\n # Shape [B1,...Bb, 1, 1]\n multiplier_matrix = array_ops.expand_dims(\n array_ops.expand_dims(self.multiplier, -1), -1)\n if conjugate:\n multiplier_matrix = math_ops.conj(multiplier_matrix)\n return multiplier_matrix\n\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n x = linalg.adjoint(x) if adjoint_arg else x\n if self._assert_proper_shapes:\n aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)\n x = control_flow_ops.with_dependencies([aps], x)\n return x * self._make_multiplier_matrix(conjugate=adjoint)\n\n def _determinant(self):\n return self.multiplier**self._num_rows_cast_to_dtype\n\n def _log_abs_determinant(self):\n return self._num_rows_cast_to_real_dtype * math_ops.log(\n math_ops.abs(self.multiplier))\n\n def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n rhs = linalg.adjoint(rhs) if adjoint_arg else rhs\n if self._assert_proper_shapes:\n aps = linear_operator_util.assert_compatible_matrix_dimensions(self, rhs)\n rhs = control_flow_ops.with_dependencies([aps], rhs)\n return rhs / self._make_multiplier_matrix(conjugate=adjoint)\n\n def _trace(self):\n # Get Tensor of all ones of same shape as self.batch_shape.\n if self.batch_shape.is_fully_defined():\n batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype)\n else:\n batch_of_ones = array_ops.ones(\n shape=self.batch_shape_tensor(), dtype=self.dtype)\n\n if self._min_matrix_dim() is not None:\n return self.multiplier * self._min_matrix_dim() * batch_of_ones\n else:\n return (self.multiplier * math_ops.cast(self._min_matrix_dim_tensor(),\n self.dtype) * batch_of_ones)\n\n def _diag_part(self):\n return self._ones_diag() * self.multiplier[..., array_ops.newaxis]\n\n def add_to_tensor(self, mat, name=\"add_to_tensor\"):\n \"\"\"Add matrix represented by this operator to `mat`. Equiv to `I + mat`.\n\n Args:\n mat: `Tensor` with same `dtype` and shape broadcastable to `self`.\n name: A name to give this `Op`.\n\n Returns:\n A `Tensor` with broadcast shape and same `dtype` as `self`.\n \"\"\"\n with self._name_scope(name):\n # Shape [B1,...,Bb, 1]\n multiplier_vector = array_ops.expand_dims(self.multiplier, -1)\n\n # Shape [C1,...,Cc, M, M]\n mat = ops.convert_to_tensor(mat, name=\"mat\")\n\n # Shape [C1,...,Cc, M]\n mat_diag = array_ops.matrix_diag_part(mat)\n\n # multiplier_vector broadcasts here.\n new_diag = multiplier_vector + mat_diag\n\n return array_ops.matrix_set_diag(mat, new_diag)\n\n @property\n def multiplier(self):\n \"\"\"The [batch] scalar `Tensor`, `c` in `cI`.\"\"\"\n return self._multiplier\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Reversible residual network compatible with eager execution.\n\nCode for main model.\n\nReference [The Reversible Residual Network: Backpropagation\nWithout Storing Activations](https://arxiv.org/pdf/1707.04585.pdf)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.contrib.eager.python.examples.revnet import blocks\n\n\nclass RevNet(tf.keras.Model):\n \"\"\"RevNet that depends on all the blocks.\"\"\"\n\n def __init__(self, config):\n \"\"\"Initialize RevNet with building blocks.\n\n Args:\n config: tf.contrib.training.HParams object; specifies hyperparameters\n \"\"\"\n super(RevNet, self).__init__(dtype=config.dtype)\n self.axis = 1 if config.data_format == \"channels_first\" else 3\n self.config = config\n\n self._init_block = blocks.InitBlock(config=self.config)\n self._final_block = blocks.FinalBlock(config=self.config)\n self._block_list = self._construct_intermediate_blocks()\n self._moving_average_variables = []\n\n def _construct_intermediate_blocks(self):\n # Precompute input shape after initial block\n stride = self.config.init_stride\n if self.config.init_max_pool:\n stride *= 2\n if self.config.data_format == \"channels_first\":\n w, h = self.config.input_shape[1], self.config.input_shape[2]\n input_shape = (self.config.init_filters, w // stride, h // stride)\n else:\n w, h = self.config.input_shape[0], self.config.input_shape[1]\n input_shape = (w // stride, h // stride, self.config.init_filters)\n\n # Aggregate intermediate blocks\n block_list = tf.contrib.checkpoint.List()\n for i in range(self.config.n_rev_blocks):\n # RevBlock configurations\n n_res = self.config.n_res[i]\n filters = self.config.filters[i]\n if filters % 2 != 0:\n raise ValueError(\"Number of output filters must be even to ensure\"\n \"correct partitioning of channels\")\n stride = self.config.strides[i]\n strides = (self.config.strides[i], self.config.strides[i])\n\n # Add block\n rev_block = blocks.RevBlock(\n n_res,\n filters,\n strides,\n input_shape,\n batch_norm_first=(i != 0), # Only skip on first block\n data_format=self.config.data_format,\n bottleneck=self.config.bottleneck,\n fused=self.config.fused,\n dtype=self.config.dtype)\n block_list.append(rev_block)\n\n # Precompute input shape for the next block\n if self.config.data_format == \"channels_first\":\n w, h = input_shape[1], input_shape[2]\n input_shape = (filters, w // stride, h // stride)\n else:\n w, h = input_shape[0], input_shape[1]\n input_shape = (w // stride, h // stride, filters)\n\n return block_list\n\n def call(self, inputs, training=True):\n \"\"\"Forward pass.\"\"\"\n\n saved_hidden = None\n if training:\n saved_hidden = [inputs]\n\n h = self._init_block(inputs, training=training)\n if training:\n saved_hidden.append(h)\n\n for block in self._block_list:\n h = block(h, training=training)\n if training:\n saved_hidden.append(h)\n\n logits = self._final_block(h, training=training)\n\n return (logits, saved_hidden) if training else (logits, None)\n\n def compute_loss(self, logits, labels):\n \"\"\"Compute cross entropy loss.\"\"\"\n\n if self.config.dtype == tf.float32 or self.config.dtype == tf.float16:\n cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels)\n else:\n # `sparse_softmax_cross_entropy_with_logits` does not have a GPU kernel\n # for float64, int32 pairs\n labels = tf.one_hot(\n labels, depth=self.config.n_classes, axis=1, dtype=self.config.dtype)\n cross_ent = tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=labels)\n\n return tf.reduce_mean(cross_ent)\n\n def compute_gradients(self, saved_hidden, labels, training=True, l2_reg=True):\n \"\"\"Manually computes gradients.\n\n This method silently updates the running averages of batch normalization.\n\n Args:\n saved_hidden: List of hidden states Tensors\n labels: One-hot labels for classification\n training: Use the mini-batch stats in batch norm if set to True\n l2_reg: Apply l2 regularization\n\n Returns:\n A tuple with the first entry being a list of all gradients and the second\n being the loss\n \"\"\"\n\n def _defunable_pop(l):\n \"\"\"Functional style list pop that works with `tfe.defun`.\"\"\"\n t, l = l[-1], l[:-1]\n return t, l\n\n # Backprop through last block\n x = saved_hidden[-1]\n with tf.GradientTape() as tape:\n tape.watch(x)\n logits = self._final_block(x, training=training)\n loss = self.compute_loss(logits, labels)\n grads_combined = tape.gradient(loss,\n [x] + self._final_block.trainable_variables)\n dy, final_grads = grads_combined[0], grads_combined[1:]\n\n # Backprop through intermediate blocks\n intermediate_grads = []\n for block in reversed(self._block_list):\n y, saved_hidden = _defunable_pop(saved_hidden)\n x = saved_hidden[-1]\n dy, grads = block.backward_grads(x, y, dy, training=training)\n intermediate_grads = grads + intermediate_grads\n\n # Backprop through first block\n _, saved_hidden = _defunable_pop(saved_hidden)\n x, saved_hidden = _defunable_pop(saved_hidden)\n assert not saved_hidden\n with tf.GradientTape() as tape:\n y = self._init_block(x, training=training)\n init_grads = tape.gradient(\n y, self._init_block.trainable_variables, output_gradients=dy)\n\n # Ordering match up with `model.trainable_variables`\n grads_all = init_grads + final_grads + intermediate_grads\n if l2_reg:\n grads_all = self._apply_weight_decay(grads_all)\n\n return grads_all, loss\n\n def _apply_weight_decay(self, grads):\n \"\"\"Update gradients to reflect weight decay.\"\"\"\n return [\n g + self.config.weight_decay * v if v.name.endswith(\"kernel:0\") else g\n for g, v in zip(grads, self.trainable_variables)\n ]\n\n def get_moving_stats(self):\n \"\"\"Get moving averages of batch normalization.\"\"\"\n device = \"/gpu:0\" if tf.test.is_gpu_available() else \"/cpu:0\"\n with tf.device(device):\n return [v.read_value() for v in self.moving_average_variables]\n\n def restore_moving_stats(self, values):\n \"\"\"Restore moving averages of batch normalization.\"\"\"\n device = \"/gpu:0\" if tf.test.is_gpu_available() else \"/cpu:0\"\n with tf.device(device):\n for var_, val in zip(self.moving_average_variables, values):\n var_.assign(val)\n\n @property\n def moving_average_variables(self):\n \"\"\"Get all variables that are batch norm moving averages.\"\"\"\n\n def _is_moving_avg(v):\n n = v.name\n return n.endswith(\"moving_mean:0\") or n.endswith(\"moving_variance:0\")\n\n if not self._moving_average_variables:\n self._moving_average_variables = filter(_is_moving_avg, self.variables)\n\n return self._moving_average_variables\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A multivariate TFTS example.\n\nFits a multivariate model, exports it, and visualizes the learned correlations\nby iteratively predicting and sampling from the predictions.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom os import path\nimport tempfile\n\nimport numpy\nimport tensorflow as tf\n\ntry:\n import matplotlib # pylint: disable=g-import-not-at-top\n matplotlib.use(\"TkAgg\") # Need Tk for interactive plots.\n from matplotlib import pyplot # pylint: disable=g-import-not-at-top\n HAS_MATPLOTLIB = True\nexcept ImportError:\n # Plotting requires matplotlib, but the unit test running this code may\n # execute in an environment without it (i.e. matplotlib is not a build\n # dependency). We'd still like to test the TensorFlow-dependent parts of this\n # example, namely train_and_predict.\n HAS_MATPLOTLIB = False\n\n_MODULE_PATH = path.dirname(__file__)\n_DATA_FILE = path.join(_MODULE_PATH, \"data/multivariate_level.csv\")\n\n\ndef multivariate_train_and_sample(\n csv_file_name=_DATA_FILE, export_directory=None, training_steps=500):\n \"\"\"Trains, evaluates, and exports a multivariate model.\"\"\"\n estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(\n periodicities=[], num_features=5)\n reader = tf.contrib.timeseries.CSVReader(\n csv_file_name,\n column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)\n + (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))\n train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(\n # Larger window sizes generally produce a better covariance matrix.\n reader, batch_size=4, window_size=64)\n estimator.train(input_fn=train_input_fn, steps=training_steps)\n evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)\n current_state = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)\n values = [current_state[\"observed\"]]\n times = [current_state[tf.contrib.timeseries.FilteringResults.TIMES]]\n # Export the model so we can do iterative prediction and filtering without\n # reloading model checkpoints.\n if export_directory is None:\n export_directory = tempfile.mkdtemp()\n input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()\n export_location = estimator.export_saved_model(export_directory,\n input_receiver_fn)\n with tf.Graph().as_default():\n numpy.random.seed(1) # Make the example a bit more deterministic\n with tf.compat.v1.Session() as session:\n signatures = tf.saved_model.loader.load(\n session, [tf.saved_model.tag_constants.SERVING], export_location)\n for _ in range(100):\n current_prediction = (\n tf.contrib.timeseries.saved_model_utils.predict_continuation(\n continue_from=current_state, signatures=signatures,\n session=session, steps=1))\n next_sample = numpy.random.multivariate_normal(\n # Squeeze out the batch and series length dimensions (both 1).\n mean=numpy.squeeze(current_prediction[\"mean\"], axis=(0, 1)),\n cov=numpy.squeeze(current_prediction[\"covariance\"], axis=(0, 1)))\n # Update model state so that future predictions are conditional on the\n # value we just sampled.\n filtering_features = {\n tf.contrib.timeseries.TrainEvalFeatures.TIMES: current_prediction[\n tf.contrib.timeseries.FilteringResults.TIMES],\n tf.contrib.timeseries.TrainEvalFeatures.VALUES: next_sample[\n None, None, :]}\n current_state = (\n tf.contrib.timeseries.saved_model_utils.filter_continuation(\n continue_from=current_state,\n session=session,\n signatures=signatures,\n features=filtering_features))\n values.append(next_sample[None, None, :])\n times.append(current_state[\"times\"])\n all_observations = numpy.squeeze(numpy.concatenate(values, axis=1), axis=0)\n all_times = numpy.squeeze(numpy.concatenate(times, axis=1), axis=0)\n return all_times, all_observations\n\n\ndef main(unused_argv):\n if not HAS_MATPLOTLIB:\n raise ImportError(\n \"Please install matplotlib to generate a plot from this example.\")\n all_times, all_observations = multivariate_train_and_sample()\n # Show where sampling starts on the plot\n pyplot.axvline(1000, linestyle=\"dotted\")\n pyplot.plot(all_times, all_observations)\n pyplot.show()\n\n\nif __name__ == \"__main__\":\n tf.compat.v1.app.run(main=main)\n" ]
[ [ "tensorflow.python.ops.math_ops.imag", "tensorflow.python.ops.math_ops.log", "tensorflow.python.ops.math_ops.subtract", "numpy.sqrt", "tensorflow.python.compat.compat.forward_compatible", "tensorflow.python.eager.context.context", "tensorflow.python.ops.math_ops.real", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.python.ops.math_ops.abs", "tensorflow.python.ops.math_ops.mul_no_nan", "tensorflow.python.ops.math_ops.divide", "tensorflow.python.ops.math_ops.less", "tensorflow.python.ops.math_ops.div_no_nan", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.ops.array_ops.tile", "tensorflow.python.ops.math_ops.cumprod", "tensorflow.python.ops.math_ops.negative", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.ops.gen_math_ops.reciprocal_grad", "tensorflow.python.ops.gen_math_ops.unsorted_segment_prod", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.math_ops.sparse_segment_sqrt_n_grad", "tensorflow.python.ops.math_ops.zeta", "tensorflow.python.ops.math_ops.sparse_segment_mean_grad", "tensorflow.python.ops.math_ops.greater_equal", "tensorflow.python.ops.math_ops.polygamma", "tensorflow.python.ops.math_ops.exp", "tensorflow.python.ops.math_ops.cross", "tensorflow.python.ops.math_ops.bessel_i1e", "tensorflow.python.ops.math_ops.logical_and", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.math_ops.bessel_i0e", "tensorflow.python.ops.math_ops.cosh", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.gen_math_ops.xlogy", "tensorflow.python.ops.math_ops.add", "tensorflow.python.ops.gen_math_ops.tanh_grad", "tensorflow.python.ops.math_ops.reduced_shape", "tensorflow.python.ops.math_ops.square", "tensorflow.python.ops.math_ops.pow", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.ops.array_ops.broadcast_gradient_args", "tensorflow.python.ops.array_ops.shape_internal", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.math_ops.cos", "tensorflow.python.ops.math_ops.scalar_mul", "tensorflow.python.ops.array_ops.constant", "tensorflow.python.ops.math_ops.sin", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.gen_math_ops.mat_mul", "tensorflow.python.ops.math_ops.cumulative_logsumexp", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.framework.ops.NotDifferentiable", "tensorflow.python.ops.math_ops.realdiv", "tensorflow.python.ops.math_ops.reduce_prod", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.gen_math_ops.rsqrt_grad", "tensorflow.python.ops.array_ops.setdiff1d", "tensorflow.python.ops.gen_math_ops.mul_no_nan", "tensorflow.python.ops.math_ops.digamma", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.math_ops.conj", "tensorflow.python.ops.array_ops.broadcast_to", "tensorflow.python.ops.gen_math_ops.sigmoid_grad", "tensorflow.python.ops.math_ops.cumsum", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.gen_math_ops.sqrt_grad", "tensorflow.python.ops.math_ops.multiply", "tensorflow.python.ops.math_ops.maximum", "tensorflow.python.ops.array_ops.invert_permutation", "tensorflow.python.ops.array_ops.where_v2", "tensorflow.python.ops.gen_math_ops.mul", "tensorflow.python.ops.math_ops.greater", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.math_ops.not_equal", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.array_ops.rank", "numpy.arange", "tensorflow.python.ops.gen_math_ops.xdivy", "numpy.finfo", "tensorflow.python.ops.math_ops.lgamma", "tensorflow.python.ops.math_ops.xdivy", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.math_ops.sinh", "tensorflow.python.ops.math_ops.complex", "tensorflow.python.ops.math_ops.segment_sum", "tensorflow.python.ops.gen_math_ops.lgamma", "tensorflow.python.ops.gen_array_ops.broadcast_gradient_args", "tensorflow.python.util.object_identity.ObjectIdentityDictionary", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.ops.math_ops.reciprocal", "tensorflow.python.ops.math_ops.sign", "numpy.prod", "tensorflow.python.ops.gen_math_ops.igamma_grad_a", "tensorflow.python.ops.math_ops.floor_div", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.core.protobuf.config_pb2.RunMetadata", "tensorflow.python.debug.cli.cli_test_utils.assert_lines_equal_ignoring_whitespace", "tensorflow.core.protobuf.config_pb2.GraphOptions", "tensorflow.python.ops.control_flow_ops.while_loop", "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.python.framework.test_util.run_v1_only", "tensorflow.python.ops.math_ops.less", "tensorflow.python.ops.math_ops.add", "tensorflow.python.platform.googletest.main", "tensorflow.python.debug.lib.debug_data.DebugDumpDir", "tensorflow.python.ops.math_ops.matmul", "numpy.load", "tensorflow.python.ops.array_ops.unique", "numpy.zeros", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.ops.variables.VariableV1", "numpy.array", "tensorflow.python.debug.cli.cli_test_utils.assert_array_lines_close", "tensorflow.python.util.tf_inspect.currentframe", "tensorflow.python.debug.cli.command_parser.parse_readable_size_str", "tensorflow.python.util.tf_inspect.stack", "tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig", "tensorflow.python.debug.cli.debugger_cli_common.CommandHandlerRegistry", "tensorflow.python.debug.lib.debug_utils.watch_graph", "tensorflow.python.platform.test.mock.patch.object", "tensorflow.python.ops.math_ops.multiply", "tensorflow.python.framework.test_util.gpu_device_name", "tensorflow.python.framework.test_util.IsMklEnabled", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.examples.speech_commands.input_data.get_features_range", "tensorflow.examples.speech_commands.input_data.prepare_words_list", "tensorflow.contrib.framework.python.ops.audio_ops.encode_wav", "tensorflow.zeros", "tensorflow.examples.speech_commands.input_data.load_wav_file", "tensorflow.examples.speech_commands.models.prepare_model_settings", "tensorflow.examples.speech_commands.input_data.save_wav_file", "tensorflow.python.platform.test.main", "tensorflow.examples.speech_commands.input_data.which_set", "tensorflow.examples.speech_commands.input_data.AudioProcessor", "numpy.zeros" ], [ "tensorflow.python.ops.math_ops.log", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.ops.nn_ops.softplus", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.check_ops.assert_positive", "tensorflow.python.ops.check_ops.assert_same_float_dtype", "numpy.finfo", "tensorflow.python.ops.math_ops.log1p", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.core.example.example_pb2.SequenceExample", "tensorflow.python.feature_column.sequence_feature_column.concatenate_context_input", "tensorflow.python.feature_column.feature_column_v2.numeric_column", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.feature_column.feature_column_v2.embedding_column", "tensorflow.python.feature_column.dense_features.DenseFeatures", "tensorflow.python.feature_column.sequence_feature_column.SequenceFeatures", "tensorflow.python.feature_column.feature_column_v2.make_parse_example_spec_v2", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.feature_column.sequence_feature_column.sequence_categorical_column_with_hash_bucket", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.platform.test.main", "tensorflow.python.feature_column.feature_column_v2.categorical_column_with_identity", "tensorflow.python.keras.layers.recurrent.SimpleRNNCell", "tensorflow.core.example.feature_pb2.Feature", "tensorflow.python.feature_column.sequence_feature_column.sequence_categorical_column_with_identity" ], [ "tensorflow.python.ops.gen_dataset_ops.shuffle_and_repeat_dataset", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.data.util.random_seed.get_seed", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.math_ops.imag", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.zeros", "numpy.any", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.ops.math_ops.real", "tensorflow.python.ops.math_ops.abs", "tensorflow.python.ops.linalg.linear_operator_util.shape_tensor", "tensorflow.python.ops.control_flow_ops.with_dependencies", "tensorflow.python.ops.linalg.linear_operator_util.assert_not_ref_type", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.framework.tensor_shape.dimension_value", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.ops.check_ops.assert_non_negative", "tensorflow.python.ops.array_ops.matrix_diag_part", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.array_ops.matrix_set_diag", "tensorflow.python.ops.check_ops.assert_rank", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.linalg.linear_operator_util.assert_compatible_matrix_dimensions", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.math_ops.conj", "tensorflow.python.ops.linalg.linalg_impl.adjoint", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.linalg.linear_operator_util.convert_nonref_to_tensor", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.broadcast_static_shape", "tensorflow.python.ops.array_ops.expand_dims" ], [ "tensorflow.contrib.checkpoint.List", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.device", "tensorflow.contrib.eager.python.examples.revnet.blocks.RevBlock", "tensorflow.contrib.eager.python.examples.revnet.blocks.FinalBlock", "tensorflow.reduce_mean", "tensorflow.contrib.eager.python.examples.revnet.blocks.InitBlock", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.one_hot", "tensorflow.test.is_gpu_available", "tensorflow.GradientTape" ], [ "tensorflow.Graph", "matplotlib.pyplot.axvline", "tensorflow.contrib.timeseries.saved_model_utils.predict_continuation", "numpy.random.seed", "tensorflow.contrib.timeseries.CSVReader", "matplotlib.use", "numpy.squeeze", "tensorflow.contrib.timeseries.WholeDatasetInputFn", "tensorflow.contrib.timeseries.saved_model_utils.filter_continuation", "matplotlib.pyplot.plot", "numpy.concatenate", "tensorflow.compat.v1.Session", "tensorflow.contrib.timeseries.RandomWindowInputFn", "tensorflow.contrib.timeseries.StructuralEnsembleRegressor", "tensorflow.saved_model.loader.load", "matplotlib.pyplot.show", "tensorflow.compat.v1.app.run" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "2.4", "2.3", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "1.4", "2.2", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.6", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shunsvineyard/shunsvineyard
[ "5ef44847da95e8ad881622d2c3571a3aceb1b20c" ]
[ "pocket-learning-algorithm-and-feature-engineering/iris_example.py" ]
[ "# Copyright © 2017, 2019 by Shun Huang. All rights reserved.\n# Licensed under MIT License.\n# See LICENSE in the project root for license information.\n\n\"\"\"An example of supervised learning uses the Iris data set.\nhttps://archive.ics.uci.edu/ml/datasets/Iris\nAttribute Information:\n0. sepal length in cm \n1. sepal width in cm \n2. petal length in cm \n3. petal width in cm \n4. class: \n-- Iris Setosa \n-- Iris Versicolour \n-- Iris Virginica\n\"\"\"\n\nimport urllib.request\n\n# matplotlib is a python 2D plotting library which produces publication\n# quality. Figures in a variety of hardcopy formats and interactive\n# environments across platforms.\n# http://matplotlib.org/2.0.0/index.html\nimport matplotlib.pyplot as plt\n\n# NumPy is the fundamental package for scientific computing with\n# Python. http://www.numpy.org/\nimport numpy as np\n\n# pandas is an open source library providing high-performance, \n# easy-to-use data structures and data analysis tools.\n# http://pandas.pydata.org/\nimport pandas as pd\n\n# Seaborn is a Python data visualization library based on matplotlib.\n# It provides a high-level interface for drawing attractive and\n# informative statistical graphics.\n# http://seaborn.pydata.org/index.html\nimport seaborn as sns\n\nsns.set() # set the default seaborn theme, scaling, and color palette.\n\nimport perceptron_classifier\n\n# Download Iris Data Set from \n# http://archive.ics.uci.edu/ml/datasets/Iris\nURL = \"http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\"\nurllib.request.urlretrieve(URL, \"iris.data\")\n# use pandas\" read_csv function to read iris.data into a python array.\n# Note: the iris.data is headerless, so header is None.\nIRIS_DATA = pd.read_csv(\"iris.data\", header=None)\n\n# Plot the versicolor and virginica.\nVERSICOLOR = IRIS_DATA.iloc[50:100, [0, 2]].values\nVIRGINICA = IRIS_DATA.iloc[100:150, [0, 2]].values\n\nplt.scatter(VERSICOLOR[:, 0], VERSICOLOR[:, 1],\n color=\"blue\", marker=\"x\", label=\"versicolor\")\nplt.scatter(VIRGINICA[:, 0], VIRGINICA[:, 1],\n color=\"green\", marker=\"v\", label=\"virginica\")\n\nplt.xlabel(\"sepal length\")\nplt.ylabel(\"petal length\")\nplt.legend(loc=\"upper left\")\nplt.show()\n\n# Use Perceptron Learning Algorithm onto the versicolor and virginica\n# of the Iris Data Set.\nVERSICOLOR_LABEL = IRIS_DATA.iloc[50:100, 4].values\nVIRGINICA_LABEL = IRIS_DATA.iloc[100:150, 4].values\nLABELS = np.append(VERSICOLOR_LABEL, VIRGINICA_LABEL)\nSAMPLES = np.append(VERSICOLOR, VIRGINICA, axis=0)\n\nperceptron_classifier = perceptron_classifier.PerceptronClassifier(\n number_of_attributes=2, class_labels=('Iris-versicolor', 'Iris-virginica'))\n\nperceptron_classifier.train(SAMPLES, LABELS, 100)\nplt.plot(perceptron_classifier.misclassify_record, color=\"purple\")\n\n# Plot the error rate and show it never converges.\nplt.xlabel(\"number of iteration\")\nplt.ylabel(\"number of misclassification\")\nplt.legend(loc=\"lower right\")\nplt.show()" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.scatter", "matplotlib.pyplot.plot", "numpy.append", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
changgoo/pyathena-1
[ "c461ac3390d773537ce52393e3ebf68a3282aa46", "c461ac3390d773537ce52393e3ebf68a3282aa46", "c461ac3390d773537ce52393e3ebf68a3282aa46", "c461ac3390d773537ce52393e3ebf68a3282aa46", "c461ac3390d773537ce52393e3ebf68a3282aa46" ]
[ "pyathena/tigress_xco/plt_tigress_xco.py", "pyathena/sf_cloud_rad/xray.py", "pyathena/tigress_single_sn/plt_tigress_single_sn.py", "pyathena/util/derivative.py", "pyathena/classic/rst_handler.py" ]
[ "#!/usr/bin/env python\n\nimport os\nimport sys\nimport time\n\nimport os.path as osp\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nimport numpy as np\nimport xarray as xr\nfrom mpi4py import MPI\n\nfrom .load_sim_tigress_xco import LoadSimTIGRESSXCOAll\nfrom ..util.split_container import split_container\nfrom ..plt_tools.plt_joint_pdf import plt_joint_pdf\n\n#field_def = ['density', 'xH2', 'CR_ionization_rate']\n\nfield_def = ['density', 'xH2', 'CR_ionization_rate',\n 'rad_energy_density0', 'rad_energy_density_PE',\n 'rad_energy_density2', 'rad_energy_density3', 'rad_energy_density4',\n 'rad_energy_density_LW', 'rad_energy_density_PE_unatt']\n\ndef read_data(sa, model, num,\n field=field_def, zmin=-256.0, zmax=256.0):\n sa.set_model(model)\n s = sa.sim\n ds = s.load_vtk(num=num)\n # Read data\n dat = ds.get_field(field=field, as_xarray=True)\n dat['nH2'] = 2.0*dat.xH2*dat.density\n dat = dat.where(np.logical_and(dat.z < zmax, dat.z > zmin), drop=True)\n\n #dat = dict(dat)\n\n # dat['taueff'] = -np.log(dat.rad_energy_density_PE/dat.rad_energy_density_PE_unatt)\n # Mask where taueff = inf with tau_eff_max\n # taueff_max = 10.0\n # dat['taueff'] = xr.where(dat['taueff'] == np.inf, taueff_max, dat['taueff'])\n \n return s, ds, dat\n\ndef plt_pdf_density_CRIR(sa, model, num, dat=None, gs=None, savfig=True):\n\n if dat is None:\n s, ds, dat = read_data(sa, model, num)\n\n s = sa.set_model(model)\n ds = s.load_vtk(num=num)\n \n x = dat['density'].values.flatten()\n y = dat['CR_ionization_rate'].values.flatten()\n hexbin_args = dict(xscale='log', yscale='log', mincnt=1, gridsize=30)\n ax1, ax2, ax3 = plt_joint_pdf(x, y, hexbin_args, weights=x, gs=gs)\n ax1.set_xlabel(r'$n_{\\rm H}$')\n ax1.set_ylabel(r'$\\xi_{\\rm CR}$')\n ax1.set_xlim(1e-3, 1e4)\n ax2.set_xlim(1e-3, 1e4)\n\n # Set CRIR range\n h = s.read_hst()\n ylim = (h.xi_CR0.iloc[0]*1e-2, h.xi_CR0.iloc[0]*2.0)\n ax1.set_ylim(*ylim)\n ax3.set_ylim(*ylim)\n #ax1.set_ylim(3e-17, 1e-15)\n #ax3.set_ylim(3e-17, 1e-15)\n plt.suptitle('{0:s}, time: {1:.1f}'.format(s.basename, ds.domain['time']))\n \n if savfig:\n savdir = osp.join('./figures-pdf')\n if not os.path.exists(savdir):\n os.makedirs(savdir)\n plt.savefig(osp.join(savdir, 'pdf-density-CRIR.{0:s}.{1:04d}.png'.format(model, ds.num)))\n \n return plt.gcf()\n\ndef plt_pdf_density_xH2(sa, model, num, dat=None, gs=None, savfig=True):\n\n if dat is None:\n s, ds, dat = read_data(sa, model, num)\n \n s = sa.set_model(model)\n ds = s.load_vtk(num=num)\n\n x = dat['density'].values.flatten()\n y = dat['xH2'].values.flatten()\n hexbin_args = dict(xscale='log', yscale='linear', mincnt=1, gridsize=50,\n norm=mpl.colors.LogNorm())\n ax1, ax2, ax3 = plt_joint_pdf(x, y, hexbin_args, weights=x, gs=gs)\n ax1.set_xlabel(r'$n_{\\rm H}$')\n ax1.set_ylabel(r'$x_{\\rm H_2}$')\n ax1.set_xlim(1e-3, 1e4)\n ax2.set_xlim(1e-3, 1e4)\n ax1.set_ylim(0, 0.55)\n ax3.set_ylim(0, 0.55)\n \n def calc_xH2_equil(n, xi_H=2.0e-16, R_gr=3.0e-17, zeta=5.7e-11):\n a = 2.31*xi_H\n b = -2.0*R_gr*n - 4.95*xi_H - zeta\n c = n*R_gr\n return (-b - np.sqrt(b*b - 4.0*a*c))/(2.0*a)\n \n n = np.logspace(-3, 4)\n h = s.read_hst()\n xH2eq = calc_xH2_equil(n, h.xi_CR0.iloc[num-1], # num-1 because the first row is delted\n R_gr=3.0e-17*s.par['problem']['R_gr_amp'], zeta=0.0)\n \n ax1.semilogx(n, xH2eq, 'r--')\n plt.suptitle('{0:s}, time: {1:.1f}'.format(s.basename,ds.domain['time']))\n \n if savfig:\n savdir = osp.join('./figures-pdf')\n if not os.path.exists(savdir):\n os.makedirs(savdir)\n plt.savefig(osp.join(savdir, 'pdf-density-xH2.{0:s}.{1:04d}.png'.format(model, ds.num)))\n \n return plt.gcf()\n\n\ndef plt_hst_mass(mhd_model='R2_2pc'):\n\n sa = LoadSimTIGRESSXCOAll()\n \n fig, axes = plt.subplots(3, 1, figsize=(12, 15),\n sharex=True)\n\n i = 0\n for mdl in sa.models:\n if not mdl.startswith(mhd_model):\n continue\n \n s = sa.set_model(mdl, verbose=False)\n h = s.read_hst(merge_mhd=True, force_override=True)\n hmhd = s.read_hst_mhd()\n plt.sca(axes[0])\n if i == 0:\n label = 'total'\n plt.plot(h.time, h.Sigma_gas, 'k-', lw=2, label=label)\n else:\n label = '_nolegend_'\n plt.plot(h.time, h.Sigma_H - h.Sigma_H2, 'o-', label=mdl)\n plt.sca(axes[1])\n plt.plot(h.time, h.Sigma_H2/h.Sigma_H, 'o-', label=mdl)\n plt.sca(axes[2])\n plt.plot(h.time, h.xi_CR0, 'o-')\n \n i += 1\n\n plt.sca(axes[0])\n plt.ylabel(r'$\\Sigma_{\\rm HI} [Msun/pc^2]$')\n plt.legend(loc=1)\n plt.yscale('log')\n plt.title('Gas surface density')\n plt.grid()\n plt.gca().grid(which='minor', alpha=0.2)\n plt.gca().grid(which='major', alpha=0.5)\n\n # H2 fraction\n plt.sca(axes[1])\n plt.ylim(3e-2, 1)\n plt.yscale('log')\n plt.title('H2 mass fraction')\n plt.ylabel(r'$M_{\\rm H_2}/M_{\\rm H,tot}$')\n plt.grid()\n plt.gca().grid(which='minor', alpha=0.2)\n plt.gca().grid(which='major', alpha=0.5)\n\n # CRIR\n plt.sca(axes[2])\n plt.yscale('log')\n plt.title('CRIR')\n plt.xlabel('time [Myr]')\n plt.ylabel(r'$\\xi_{\\rm CR,0}\\;[{\\rm s}^{-1}]$')\n plt.grid()\n plt.gca().grid(which='minor', alpha=0.2)\n plt.gca().grid(which='major', alpha=0.5)\n \n dtime = h.time.iloc[-1] - h.time.iloc[0]\n plt.xlim(h.time.iloc[0]*0.9, h.time.iloc[-1] + 0.8*dtime)\n \n return fig\n \ndef plt_two_joint_pdfs(sa, model, num, savfig=True):\n\n fig = plt.figure(figsize=(14, 6))\n gs0 = gridspec.GridSpec(1, 2, wspace=0.25)\n gs00 = gridspec.GridSpecFromSubplotSpec(4, 4, subplot_spec=gs0[0])\n gs01 = gridspec.GridSpecFromSubplotSpec(4, 4, subplot_spec=gs0[1])\n\n s, ds, dat = read_data(sa, model, num)\n plt_pdf_density_xH2(sa, model, num, dat, gs=gs00, savfig=False)\n fig = plt_pdf_density_CRIR(sa, model, num, dat, gs=gs01, savfig=savfig)\n if savfig:\n plt.close(fig)\n else:\n return fig\n \nif __name__ == '__main__':\n\n COMM = MPI.COMM_WORLD\n sa = LoadSimTIGRESSXCOAll()\n\n models = sa.models\n\n # Measure execution time\n time0 = time.time()\n for model in models:\n if not model.startswith('R8'):\n continue\n s = sa.set_model(model, verbose=False)\n nums = s.nums\n \n if COMM.rank == 0:\n print('model, nums', model, nums)\n nums = split_container(nums, COMM.size)\n else:\n nums = None\n \n mynums = COMM.scatter(nums, root=0)\n print('[rank, mynums]:', COMM.rank, mynums)\n\n for num in mynums:\n print(num, end=' ')\n plt_two_joint_pdfs(sa, model, num)\n # break\n \n COMM.barrier()\n if COMM.rank == 0:\n print('')\n print('################################################')\n print('# Done with model', model)\n print('# Execution time [sec]: {:.1f}'.format(time.time()-time0))\n print('################################################')\n print('')\n", "# xray.py\n\nimport pandas as pd\n\nfrom ..fields.xray_emissivity import get_xray_emissivity\nfrom ..load_sim import LoadSim\n\nclass Xray:\n \n @LoadSim.Decorators.check_pickle\n def read_xray_all(self, nums=None, prefix='xray_all',\n savdir=None, force_override=False):\n rr = dict()\n if nums is None:\n nums = self.nums\n\n \n print('num:', end=' ')\n for i,num in enumerate(nums):\n print(num, end=' ')\n r = self.read_xray(num=num, savdir=savdir, force_override=False)\n if i == 0:\n for k in r.keys():\n rr[k] = []\n\n for k in r.keys():\n try:\n rr[k].append(r[k].value.item())\n except:\n rr[k].append(r[k])\n\n rr = pd.DataFrame(rr)\n return rr\n \n @LoadSim.Decorators.check_pickle\n def read_xray(self, num, Z_gas=1.0, emin_keV=0.5, emax_keV=7.0, prefix='L_X',\n savdir=None, force_override=False):\n \"\"\"\n Function to calculate x-ray luminosity of the snapshot\n \"\"\"\n \n ds = self.load_vtk(num)\n dV = ds.domain['dx'].prod()*(self.u.length.cgs.value)**3\n d = ds.get_field(['density','temperature'])\n em = get_xray_emissivity(d['temperature'].data, Z_gas,\n emin_keV, emax_keV, energy=True)\n d['j_X'] = d['density']**2*em\n #dd['I_X'] = d['j_X'].sum(dim='z')*d.domain['dx'][2]*self.u.length.cgs.value\n\n res = dict()\n res['time'] = ds.domain['time']\n res['L_X'] = float(d['j_X'].sum()*dV)\n\n return res\n \n", "#!/usr/bin/env python\n\nimport os\nimport sys\nimport time\n\nimport os.path as osp\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport xarray as xr\nfrom mpi4py import MPI\n\nfrom .load_sim_tigress_single_sn import LoadSimTIGRESSSingleSNAll\nfrom ..util.split_container import split_container\nfrom ..util.units import Units\nfrom ..classic.cooling import coolftn\n\ndef plt_rprofiles(ds0, ds1, dat0, dat1, r1d=None, j=100):\n # import pyathena as pa\n u = Units()\n cf = coolftn()\n \n muH = 1.4271\n nHeq = cf.heat/cf.cool\n Peq = cf.T1*nHeq*muH\n\n fig, axes = plt.subplots(2, 3, figsize=(18, 10), \n gridspec_kw=dict(hspace=0.1), constrained_layout=True)\n axes = axes.flatten()\n \n for dat in (dat0, dat1):\n if r1d is None:\n x = np.tile(dat0.x.data[:, None, None], (1, ds0.domain['Nx'][1], ds0.domain['Nx'][2]))\n y = np.tile(dat0.y.data[None, :, None], (ds0.domain['Nx'][0], 1, ds0.domain['Nx'][2]))\n z = np.tile(dat0.z.data[None, None, :], (ds0.domain['Nx'][0], ds0.domain['Nx'][1], 1))\n r3d = np.sqrt(x**2 + y**2 + z**2)\n r1d = r3d.flatten()\n \n d = dat['density'].data.flatten()\n p = dat['pressure'].data.flatten()*u.pok\n c = dat['cool_rate'].data.flatten()\n T = dat['temperature'].data.flatten()\n \n plt_sty = dict(s=2.0, alpha=0.2)\n plt.sca(axes[0])\n plt.scatter(r1d[::j], d[::j], marker='o', **plt_sty)\n \n plt.sca(axes[1])\n plt.scatter(r1d[::j], p[::j], marker='o', **plt_sty)\n\n plt.sca(axes[2])\n plt.scatter(d[::j], p[::j], marker='o', **plt_sty)\n \n plt.sca(axes[3])\n plt.scatter(r1d[::j], c[::j]/d[::j]**2, marker='o', **plt_sty)\n\n plt.sca(axes[4])\n plt.scatter(r1d[::j], T[::j], marker='o', **plt_sty)\n\n plt.sca(axes[5])\n plt.scatter(T[::j], c[::j]/d[::j]**2, marker='o', **plt_sty)\n \n for ax in (axes[0],axes[1],axes[3],axes[4]):\n plt.sca(ax)\n plt.xlabel(r'$r\\;[{\\rm pc}]$')\n plt.xscale('linear')\n plt.yscale('log')\n plt.xlim(0, ds0.domain['Lx'][0]/2.5)\n \n plt.sca(axes[0])\n plt.ylim(5e-4, 2e1)\n plt.ylabel(r'$n_{\\rm H}$')\n \n plt.sca(axes[1])\n plt.ylim(1e1, 5e7)\n plt.ylabel(r'$P/k_{\\rm B}\\;[{\\rm cm}^{-3}\\,{\\rm K}]$')\n\n plt.sca(axes[2])\n plt.xscale('log')\n plt.yscale('log')\n plt.xlim(5e-4, 3e3)\n plt.ylim(1e1, 5e7)\n plt.xlabel(r'$n_{\\rm H}$')\n plt.ylabel(r'$P/k_{\\rm B}\\;[{\\rm cm}^{-3}\\,{\\rm K}]$')\n plt.plot(nHeq, Peq, c='grey', alpha=0.7)\n\n plt.sca(axes[3])\n plt.ylim(1e-27, 1e-18)\n plt.ylabel(r'$\\Lambda(T)\\;[{\\rm cm^3\\,s^{-1}}]$')\n\n plt.sca(axes[4])\n plt.ylim(1e2, 1e7)\n plt.ylabel(r'$T\\;[{\\rm K}]$')\n\n plt.sca(axes[5])\n plt.xscale('log')\n plt.yscale('log')\n plt.xlim(1e1, 5e7)\n plt.ylim(1e-27, 1e-18)\n plt.xlabel(r'$T\\;[{\\rm K}]$')\n plt.ylabel(r'$\\Lambda(T)\\;[{\\rm cm^3\\,s^{-1}}]$')\n plt.plot(nHeq, Peq, c='grey', alpha=0.7)\n \n plt.tight_layout()\n plt.suptitle('{0:s} {1:s} time:{2:5.3f}'.format(s0.basename, s1.basename,\n ds0.domain['time']))\n #plt.subplots_adjust(top=0.95)\n\n return fig, r1d\n\n\n\nif __name__ == '__main__':\n\n COMM = MPI.COMM_WORLD\n sa = LoadSimTIGRESSSingleSNAll()\n\n mpl.rcParams['font.size'] = 14\n savdir = '/tigress/jk11/notebook/TIGRESS-SINGLE-SN/snapshots_new'\n # if not osp.exists(savdir):\n # os.makedirs(savdir)\n\n fields = ['density', 'pressure', 'cool_rate', 'temperature']\n sa = LoadSimTIGRESSSingleSNAll()\n s0 = sa.set_model(sa.models[0]) # old cooling\n s1 = sa.set_model(sa.models[1]) # new cooling\n r1d = None\n nums = s0.nums_id0\n nums = split_container(nums, COMM.size)\n\n mynums = COMM.scatter(nums, root=0)\n print('[rank, mynums]:', COMM.rank, mynums)\n\n if COMM.rank == 0:\n print('nums', nums)\n else:\n nums = None\n\n # Measure execution time\n time0 = time.time()\n for num in mynums:\n print(num, end=' ')\n ds0 = s0.load_vtk(num=num)\n ds1 = s1.load_vtk(num=num)\n dat0 = ds0.get_field(fields, as_xarray=True)\n dat1 = ds1.get_field(fields, as_xarray=True)\n fig, r1d = plt_rprofiles(ds0, ds1, dat0, dat1, r1d=r1d, j=100)\n fig.savefig(osp.join(savdir, 'rprofiles_{0:s}.{1:s}.{2:04d}.png'.format(s0.basename, s1.basename, num)),\n dpi=200)\n\n COMM.barrier()\n if COMM.rank == 0:\n print('')\n print('################################################')\n print('# Done with model', model)\n print('# Execution time [sec]: {:.1f}'.format(time.time()-time0))\n print('################################################')\n print('')\n\n \n", "\nfrom astropy.convolution import convolve, convolve_fft\nfrom astropy.convolution import Gaussian1DKernel, Gaussian2DKernel\nimport numpy as np\n\ndef deriv_kernel(axis=0, dim=1, fft=False, sobel=True, gauss=False, stddev=3.0):\n if sobel:\n smooth = np.array([1, 2, 1])\n else:\n smooth = np.array([1, 1, 1])\n if fft:\n deriv = np.array([1, 0, -1])\n else:\n deriv = np.array([-1, 0, 1])\n\n if dim == 1:\n if gauss:\n return gaussian_deriv_kernel(axis=axis, stddev=stddev, oned=True)\n else:\n return deriv\n elif dim == 2:\n if gauss:\n return gaussian_deriv_kernel(axis=axis, stddev=stddev)\n if axis == 0:\n return np.einsum('i,j', deriv, smooth)\n elif axis == 1:\n return np.einsum('j,i', deriv, smooth)\n elif dim == 3:\n if axis == 0:\n return np.einsum('i,j,k', deriv, smooth, smooth)\n elif axis == 1:\n return np.einsum('j,k,i', deriv, smooth, smooth)\n elif axis == 2:\n return np.einsum('k,i,j', deriv, smooth, smooth)\n\n\ndef gaussian_deriv_kernel(axis=0, stddev=3.0, oned=False):\n if oned:\n gauss = Gaussian1DKernel(stddev)\n else:\n gauss = Gaussian2DKernel(stddev)\n x = np.linspace(0, gauss.shape[axis]-1, gauss.shape[axis])\n dkernel = deriv_central(gauss.array, x, axis=axis)\n if not oned:\n if axis == 0:\n dkernel = dkernel[:, 1:-1]\n elif axis == 1:\n dkernel = dkernel[1:-1, :]\n return dkernel/abs(dkernel).sum()\n else:\n return dkernel\n\n\ndef deriv_direct(yarr, xarr, axis=0):\n dyarr = np.diff(yarr, axis=axis)\n dxarr = np.diff(xarr)\n if yarr.ndim == 1:\n return dyarr/dxarr\n if yarr.ndim == 2:\n if axis == 0:\n dxarr = dxarr[:, np.newaxis]\n if axis == 1:\n dxarr = dxarr[np.newaxis, :]\n return dyarr/dxarr\n if yarr.ndim == 3:\n if axis == 0:\n dxarr = dxarr[:, np.newaxis, np.newaxis]\n if axis == 1:\n dxarr = dxarr[np.newaxis, :, np.newaxis]\n if axis == 2:\n dxarr = dxarr[np.newaxis, np.newaxis, :]\n return dyarr/dxarr\n\n\ndef deriv_central(yarr, xarr, axis=0):\n dx = xarr[2:]-xarr[:-2]\n if yarr.ndim == 1:\n dy = yarr[2:]-yarr[:-2]\n return dy/dx\n elif yarr.ndim == 2:\n yswap = yarr.swapaxes(axis, -1)\n dy = yswap[:, 2:]-yswap[:, :-2]\n dx = dx[np.newaxis, :]\n elif yarr.ndim == 3:\n yswap = yarr.swapaxes(axis, -1)\n dy = yswap[:, :, 2:]-yswap[:, :, :-2]\n dx = dx[np.newaxis, np.newaxis, :]\n\n dydx = dy/dx\n return dydx.swapaxes(axis, -1)\n\n\ndef deriv_convolve(yarr, xarr, axis=0, fft=False, gauss=True, stddev=3.0):\n kernel = deriv_kernel(axis=axis, dim=yarr.ndim,\n fft=fft, gauss=gauss, stddev=stddev)\n norm = abs(kernel).sum()\n\n # print norm,kernel.shape\n if fft:\n dy = convolve_fft(yarr, kernel/float(norm), boundary='wrap')\n else:\n dy = convolve(yarr, kernel/float(norm), normalize_kernel=False,\n boundary='extend')\n dx = xarr[1]-xarr[0]\n #print (dy/dx).max()\n return dy/dx\n\n\ndef gradient(scal, x, y, z, deriv=deriv_convolve):\n\n dsdx = deriv(scal, x, axis=2)\n dsdy = deriv(scal, y, axis=1)\n dsdz = deriv(scal, z, axis=0)\n\n return dsdx, dsdy, dsdz\n\n\ndef divergence(vx, vy, vz, x, y, z, deriv=deriv_convolve):\n\n dvxdx = deriv(vx, x, axis=2)\n dvydy = deriv(vy, y, axis=1)\n dvzdz = deriv(vz, z, axis=0)\n\n return dvxdx, dvydy, dvzdz\n\n\ndef curl(vx, vy, vz, x, y, z, deriv=deriv_convolve):\n\n dvxdy = deriv(vx, y, axis=1)\n dvxdz = deriv(vx, z, axis=0)\n dvydx = deriv(vy, x, axis=2)\n dvydz = deriv(vy, z, axis=0)\n dvzdx = deriv(vz, x, axis=2)\n dvzdy = deriv(vz, y, axis=1)\n xcomp = dvzdy - dvydz\n ycomp = dvxdz - dvzdx\n zcomp = dvydx - dvxdy\n\n return xcomp, ycomp, zcomp\n\n\ndef helicity(vx, vy, vz, x, y, z):\n\n curlx, curly, curlz = curl(vx, vy, vz, x, y, z, deriv=deriv_convolve)\n helicity = vx*curlx + vy*curly + vz*curlz\n print((helicity.mean()))\n\n return helicity\n", "import struct\nimport numpy as np\nimport glob\nimport os\nimport sys\n\n#writer \n\ndef parse_misc_info(rstfile):\n fp=open(rstfile,'rb')\n search_block=['par','time','data','star','user']\n start={}\n size={}\n start['par']=0\n iblock=0\n\n while 1:\n block=search_block[iblock]\n size[block]=fp.tell()-start[block]\n \n l=fp.readline()\n if not l: break\n \n if l.startswith(b'N_STEP') or l.startswith(b'DENSITY') or \\\n l.startswith(b'STAR') or l.startswith(b'USER'): \n iblock+=1\n start[search_block[iblock]]=start[block]+size[block]\n\n data={}\n search_block=['par','time','star','user']\n for block in search_block:\n if block in start:\n fp.seek(start[block])\n data[block]=fp.read(size[block])\n\n fp.close()\n \n return data\n\ndef write_onefile(newfile,data_part,data_par):\n\n fp=open(newfile,'wb')\n fields=['DENSITY', '1-MOMENTUM', '2-MOMENTUM', '3-MOMENTUM', 'ENERGY','POTENTIAL',\n '1-FIELD', '2-FIELD', '3-FIELD',\n 'SCALAR 0','SCALAR 1','SCALAR 2','SCALAR 3','SCALAR 4',\n 'SCALAR 5','SCALAR 6','SCALAR 7','SCALAR 8','SCALAR 9']\n for block in ['par','time']: fp.write(data_par[block])\n\n fp.write(b'DENSITY\\n')\n fp.write(data_part['DENSITY'].flatten().tobytes('C'))\n for f in fields[1:]:\n if f in list(data_part.keys()):\n #print f,data_part[f].shape\n fp.write('\\n{}\\n'.format(f).encode())\n fp.write(data_part[f].flatten().tobytes('C'))\n fp.write(b'\\n')\n for block in ['star','user']: \n if block in data_par: fp.write(data_par[block])\n fp.close()\n\n return\n\ndef write_allfile(pardata,rstdata,grids,grid_disp=np.array([0,0,0]),\n id='newrst',dname='/tigress/changgoo/rst/',itime=0,verbose=False,scalar=0):\n ngrids=len(grids)\n# if not (ds.domain['Nx'][::-1] == rstdata['DENSITY'].shape).all():\n# print 'mismatch in DIMENSIONS!!'\n# print 'restart data dimension:', rstdata['DENSITY'].shape\n# print 'new grid data dimension:', ds.domain['Nx'][::-1] \n#\n# return -1\n\n fields = list(rstdata.keys())\n\n cc_varnames=['DENSITY','1-MOMENTUM','2-MOMENTUM','3-MOMENTUM',\\\n 'ENERGY','POTENTIAL']\n fc_varnames=['1-FIELD','2-FIELD','3-FIELD']\n\n for g in grids:\n i=g['id']\n if i == 0:\n fname=id+'.%4.4d.rst' % itime\n else:\n fname=id+'-id%d.%4.4d.rst' % (i,itime)\n\n gis=g['is']-grid_disp\n gnx=g['Nx']\n gie=gis+gnx\n\n data={}\n for f in cc_varnames:\n if f in fields:\n data[f]=rstdata[f][gis[2]:gie[2],gis[1]:gie[1],gis[0]:gie[0]]\n \n for f in fc_varnames:\n ib,jb,kb=(0,0,0)\n if f in fields:\n if f.startswith('1'): ib=1\n if f.startswith('2'): jb=1\n if f.startswith('3'): kb=1\n data[f]=rstdata[f][gis[2]:gie[2]+kb,gis[1]:gie[1]+jb,gis[0]:gie[0]+ib]\n \n for ns in range(scalar):\n f='SCALAR %d' % ns\n if f in fields:\n data[f]=rstdata[f][gis[2]:gie[2],gis[1]:gie[1],gis[0]:gie[0]]\n if verbose: dname+fname\n write_onefile(dname+fname,data,pardata)\n\n return\n\ndef get_eint(rstdata,neg_correct=True):\n eint=rstdata['ENERGY'].copy()\n eint -= 0.5*rstdata['1-MOMENTUM']**2/rstdata['DENSITY']\n eint -= 0.5*rstdata['2-MOMENTUM']**2/rstdata['DENSITY']\n eint -= 0.5*rstdata['3-MOMENTUM']**2/rstdata['DENSITY']\n \n for i,f in enumerate(['1-FIELD','2-FIELD','3-FIELD']):\n if f is '1-FIELD': Bc=0.5*(rstdata[f][:,:,:-1]+rstdata[f][:,:,1:])\n elif f is '2-FIELD': Bc=0.5*(rstdata[f][:,:-1,:]+rstdata[f][:,1:,:])\n elif f is '3-FIELD': Bc=0.5*(rstdata[f][:-1,:,:]+rstdata[f][1:,:,:])\n eint -= 0.5*Bc**2\n \n if neg_correct:\n k_end,j_end,i_end = eint.shape\n k_str=j_str=i_str = 0\n k,j,i=np.where(eint<0)\n eavg=[]\n for kk,jj,ii in zip(k,j,i):\n kl=kk if kk==k_str else kk-1\n kh=kk+1 if kk==(k_end-1) else kk+2\n jl=jj if jj==j_str else jj-1\n jh=jj+1 if jj==(j_end-1) else jj+2\n il=ii if ii==i_str else ii-1\n ih=ii+1 if ii==(i_end-1) else ii+2\n epart=eint[kl:kh,jl:jh,il:ih]\n e_neg=epart[epart<0]\n Nneg=len(e_neg)\n eavg.append((epart.sum()-e_neg.sum())/(epart.size-e_neg.size))\n print(kk,jj,ii,eint[kk,jj,ii],eavg[-1],epart.sum(),e_neg.sum())\n eint[k,j,i]=np.array(eavg)\n if len(eint[eint<0]) > 0: sys.exit(\"negative energy persist!\")\n \n return eint\n\ndef to_etot(rstdata):\n eint=rstdata['ENERGY'].copy()\n \n eint += 0.5*rstdata['1-MOMENTUM']**2/rstdata['DENSITY']\n eint += 0.5*rstdata['2-MOMENTUM']**2/rstdata['DENSITY']\n eint += 0.5*rstdata['3-MOMENTUM']**2/rstdata['DENSITY']\n \n for i,f in enumerate(['1-FIELD','2-FIELD','3-FIELD']):\n if f is '1-FIELD': Bc=0.5*(rstdata[f][:,:,:-1]+rstdata[f][:,:,1:])\n elif f is '2-FIELD': Bc=0.5*(rstdata[f][:,:-1,:]+rstdata[f][:,1:,:])\n elif f is '3-FIELD': Bc=0.5*(rstdata[f][:-1,:,:]+rstdata[f][1:,:,:])\n eint += 0.5*Bc**2\n return eint\n\ndef degrade(rstdata,scalar=0):\n \n cc_varnames=['DENSITY','1-MOMENTUM','2-MOMENTUM','3-MOMENTUM',\\\n 'ENERGY','POTENTIAL']\n fc_varnames=['1-FIELD','2-FIELD','3-FIELD']\n\n scalar_varnames=[]\n for ns in range(scalar):\n scalar_varnames.append('SCALAR %d' % ns)\n if scalar: cc_varnames += scalar_varnames\n\n rstdata_new={}\n for f in cc_varnames:\n if f is 'ENERGY':\n data=get_eint(rstdata)\n else:\n data=rstdata[f].copy()\n shape=np.array(data.shape)/2\n newdata=np.zeros(shape,dtype='d')\n for i in range(2):\n for j in range(2):\n for k in range(2):\n newdata += data[k::2,j::2,i::2]\n rstdata_new[f]=newdata*0.125\n \n for f in fc_varnames:\n data=rstdata[f].copy()\n shape=np.array(data.shape)/2\n if f is '1-FIELD':\n newdata=np.zeros(shape+np.array([0,0,1]),dtype='d')\n for j in range(2):\n for k in range(2):\n newdata += data[k::2,j::2,::2]\n if f is '2-FIELD':\n newdata=np.zeros(shape+np.array([0,1,0]),dtype='d')\n for i in range(2):\n for k in range(2):\n newdata += data[k::2,::2,i::2]\n if f is '3-FIELD':\n newdata=np.zeros(shape+np.array([1,0,0]),dtype='d')\n for j in range(2):\n for i in range(2):\n newdata += data[::2,j::2,i::2]\n rstdata_new[f]=newdata*0.25\n \n rstdata_new['ENERGY']=to_etot(rstdata_new)\n return rstdata_new\n\ndef refine(rstdata,scalar=0):\n \n cc_varnames=['DENSITY','1-MOMENTUM','2-MOMENTUM','3-MOMENTUM',\\\n 'ENERGY']\n if 'POTENTIAL' in rstdata: cc_varnames += ['POTENTIAL']\n fc_varnames=['1-FIELD','2-FIELD','3-FIELD']\n scalar_varnames=[]\n for ns in range(scalar):\n scalar_varnames.append('SCALAR %d' % ns)\n \n if scalar: cc_varnames += scalar_varnames\n rstdata_new={}\n for f in cc_varnames:\n if f is 'ENERGY':\n data=get_eint(rstdata)\n else:\n data=rstdata[f]\n shape=np.array(data.shape)*2\n newdata=np.zeros(shape,dtype='d')\n for i in range(2):\n for j in range(2):\n for k in range(2):\n newdata[k::2,j::2,i::2] = data.copy()\n rstdata_new[f]=newdata\n \n for f in fc_varnames:\n data=rstdata[f]\n shape=np.array(data.shape)*2\n if f is '1-FIELD':\n newdata=np.zeros(shape-np.array([0,0,1]),dtype='d')\n idata = 0.5*(data[:,:,:-1]+data[:,:,1:])\n\n for j in range(2):\n for k in range(2):\n newdata[k::2,j::2,::2] = data.copy()\n newdata[k::2,j::2,1::2] = idata.copy()\n\n if f is '2-FIELD':\n newdata=np.zeros(shape-np.array([0,1,0]),dtype='d')\n idata = 0.5*(data[:,:-1,:]+data[:,1:,:])\n for i in range(2):\n for k in range(2):\n newdata[k::2,::2,i::2] = data.copy()\n newdata[k::2,1::2,i::2] = idata.copy()\n \n if f is '3-FIELD':\n newdata=np.zeros(shape-np.array([1,0,0]),dtype='d')\n idata = 0.5*(data[:-1,:,:]+data[1:,:,:])\n for j in range(2):\n for i in range(2):\n newdata[::2,j::2,i::2] = data.copy()\n newdata[1::2,j::2,i::2] = idata.copy()\n rstdata_new[f]=newdata\n \n rstdata_new['ENERGY']=to_etot(rstdata_new)\n return rstdata_new\n\ndef calculate_grid(Nx,NBx):\n NGrids=(np.array(Nx)/np.array(NBx)).astype('int')\n NProcs=NGrids[0]*NGrids[1]*NGrids[2]\n grids=[]\n i=0\n print(Nx, NBx, NGrids, NProcs)\n for n in range(NGrids[2]):\n for m in range(NGrids[1]):\n for l in range(NGrids[0]):\n grid={}\n grid['id']=i\n grid['is']=np.array([l*NBx[0],m*NBx[1],n*NBx[2]]).astype('int')\n grid['Nx']=np.array(NBx).astype('int')\n grids.append(grid)\n i += 1 \n\n return grids,NGrids\n\n# reader\n\ndef parse_par(rstfile):\n\n fp=open(rstfile,'rb')\n par={}\n line=fp.readline().decode('utf-8')\n \n while 1:\n\n if line.startswith('<'):\n block=line[1:line.rfind('>')]\n if block == 'par_end': break\n par[block]={}\n line=fp.readline().decode('utf-8')\n\n if block in ['problem','domain1','time']:\n sp = line.strip().split()\n if len(sp) >= 3: par[block][sp[0]]=eval(sp[2])\n else:\n sp=line.split('=')\n if len(sp) == 2: par[block][sp[0].strip()]=sp[1].split('#')[0].strip()\n\n par[block]=fp.tell()\n\n fp.close()\n \n return par\n\ndef parse_rst(var,par,fm):\n \n starpar=False\n if 'star particles' in par['configure']:\n if par['configure']['star particles'] == 'none':\n starpar=False\n else:\n starpar=True\n vtype='param'\n cc_varnames=['DENSITY','1-MOMENTUM','2-MOMENTUM','3-MOMENTUM','ENERGY','POTENTIAL']\n fc_varnames=['1-FIELD','2-FIELD','3-FIELD']\n dm=par['domain1']\n nx1=int(dm['Nx1']/dm['NGrid_x1'])\n nx2=int(dm['Nx2']/dm['NGrid_x2'])\n nx3=int(dm['Nx3']/dm['NGrid_x3'])\n\n if var=='N_STEP':\n ndata=1\n dtype='i'\n elif var=='TIME':\n ndata=1\n dtype='d'\n elif var=='TIME_STEP':\n ndata=1\n if starpar: ndata+=1\n dtype='d'\n elif var in cc_varnames:\n ndata=nx1*nx2*nx3\n dtype='d'\n vtype='ccvar'\n elif var in fc_varnames:\n if var.startswith('1'): nx1 += 1\n if var.startswith('2'): nx2 += 1\n if var.startswith('3'): nx3 += 1\n \n ndata=nx1*nx2*nx3\n dtype='d'\n vtype='fcvar'\n elif var.startswith('SCALAR'):\n ndata=nx1*nx2*nx3\n dtype='d'\n vtype='ccvar'\n elif var.startswith('STAR PARTICLE LIST'):\n ndata=1\n dtype='i'\n vtype='star'\n else:\n return 0\n\n fm[var]={}\n \n fm[var]['ndata']=ndata\n fm[var]['dtype']=dtype\n fm[var]['vtype']=vtype\n \n if vtype == 'ccvar' or vtype == 'fcvar':\n fm[var]['nx']=(nx3,nx2,nx1)\n \n return 1\n\ndef read_star(fp,nscal=0,ghost=True):\n# This works for MST_4pc\n# ivars=['id','merge_history','isnew','active']\n# dvars=['m','x1','x2','x3','v1','v2','v3','age','mage','mdot',\\\n# 'x1_old','x2_old','x3_old',\\\n# 'm_old','M1_old','M2_old','M3_old',\\\n# 'navg','n2avg','v1avg','v2avg','v3avg',\\\n# 'eavg','Vol','radius','SFUV','SNRate',\\\n#'SNprob',\\\n# 'x1sn','x2sn','x3sn',\\\n# ]\n# Latest restart file\n ivars=['id','merge_history','isnew','active']\n dvars=['m','x1','x2','x3','v1','v2','v3','age','mage','mdot',\\\n 'x1_old','x2_old','x3_old',\\\n ]\n# additional fields depending on the version\n for i in range(nscal):\n dvars += ['metal{}'.format(i)]\n\n if ghost:\n dvars += ['mghost','M1ghost','M2ghost','M3ghost']\n for i in range(nscal):\n dvars += ['Sghost{}'.format(i)]\n\n star_dict={}\n dtype='i'\n for var in ivars:\n data=fp.read(struct.calcsize(dtype))\n tmp=struct.unpack('<'+dtype,data)\n star_dict[var]=tmp\n\n dtype='d'\n for var in dvars:\n data=fp.read(struct.calcsize(dtype))\n tmp=struct.unpack('<'+dtype,data)\n #if var is 'm': print(var,tmp)\n star_dict[var]=tmp\n\n return star_dict\n\ndef read_rst_grid(rstfile,verbose=False,starghost=True):\n \n par=parse_par(rstfile)\n\n fp=open(rstfile,'rb')\n fp.seek(par['par_end'])\n rst={}\n data_array={}\n nscal=0\n while 1:\n l=fp.readline().decode('utf-8')\n var=l.strip()\n\n if parse_rst(var,par,rst):\n dtype=rst[var]['dtype']\n ndata=rst[var]['ndata']\n vtype=rst[var]['vtype']\n dsize=ndata*struct.calcsize(dtype)\n data=fp.read(dsize)\n if vtype == 'param': \n if verbose: print(var,struct.unpack('<'+ndata*dtype,data))\n elif vtype == 'star':\n nstar,=struct.unpack('<'+ndata*dtype,data)\n data=fp.read(dsize)\n star_list=[] \n if nstar > 0:\n for i in range(nstar):\n star_list.append(read_star(fp,nscal=nscal,ghost=starghost))\n if verbose: \n print(var, nstar)\n print(star_list[0])\n print(star_list[nstar-1])\n data_array[var]=star_list\n else: \n arr=np.asarray(struct.unpack('<'+ndata*dtype,data))\n arr.shape = rst[var]['nx']\n data_array[var]=arr\n if verbose: print(var, arr.mean(), arr.shape)\n if var.startswith('SCALAR'): nscal += 1\n fp.readline()\n else: \n break\n if verbose: print(l, fp.tell())\n fp.close()\n\n return rst,data_array\n\ndef read(rstfile,grids,NGrids,parfile=None,verbose=False,starghost=True):\n if parfile==None: par=parse_par(rstfile)\n else: par=parse_par(parfile)\n nprocs=len(grids)#par['domain1']['AutoWithNProc']\n field_maps=[]\n rstdata={}\n nx=NGrids*grids[0]['Nx']\n nx=nx[::-1]\n #nx=ds.domain['Nx'][::-1]\n print(nx,nprocs)\n dirname=os.path.dirname(rstfile)\n basename=os.path.basename(rstfile)\n\n fm,data=read_rst_grid(rstfile,verbose=verbose,starghost=starghost)\n\n g=grids[0]\n gis=g['is']\n gnx=g['Nx']\n gie=gis+gnx\n\n print(fm['DENSITY']['nx'],gnx)\n\n\n for k in fm:\n ib,jb,kb=(0,0,0)\n if fm[k]['vtype'] == 'ccvar':\n rstdata[k]=np.empty(nx,dtype=fm[k]['dtype'])\n rstdata[k][gis[2]:gie[2],gis[1]:gie[1],gis[0]:gie[0]]=data[k]\n elif fm[k]['vtype'] == 'fcvar':\n if k.startswith('1'): ib=1\n if k.startswith('2'): jb=1\n if k.startswith('3'): kb=1\n rstdata[k]=np.empty((nx[0]+kb,nx[1]+jb,nx[2]+ib),dtype=fm[k]['dtype'])\n rstdata[k][gis[2]:gie[2]+kb,gis[1]:gie[1]+jb,gis[0]:gie[0]+ib]=data[k]\n#for i in range(nprocs):\n for i in range(1,nprocs):\n g=grids[i]\n gis=g['is']\n gnx=g['Nx']\n gie=gis+gnx\n# if i % 50 == 0: \n# print i,gis,gie\n# print rstfile,g['filename']\n rstfname = '%s/%s-id%d%s' % (dirname,basename[:-9],i,basename[-9:])\n if not os.path.isfile(rstfname):\n rstfname = '%s/../id%d/%s-id%d%s' % (dirname,i,basename[:-9],i,basename[-9:])\n fm,data=read_rst_grid(rstfname,starghost=starghost)\n\n if verbose > 1: print(i,fm['DENSITY']['nx'],gnx)\n\n for k in fm:\n ib,jb,kb=(0,0,0)\n if fm[k]['vtype'] == 'ccvar':\n rstdata[k][gis[2]:gie[2],gis[1]:gie[1],gis[0]:gie[0]]=data[k]\n elif fm[k]['vtype'] == 'fcvar':\n if k.startswith('1'): ib=1\n if k.startswith('2'): jb=1\n if k.startswith('3'): kb=1\n rstdata[k][gis[2]:gie[2]+kb,gis[1]:gie[1]+jb,gis[0]:gie[0]+ib]=data[k]\n\n return rstdata\n\ndef read_part(rstfile,grids,nx,verbose=False):\n nprocs=len(grids)\n field_maps=[]\n rstdata={}\n print(nx,nprocs)\n\n basename=os.path.basename(rstfile)\n pid=basename[:-9]\n fm,data=read_rst_grid(rstfile,verbose=verbose)\n\n g=grids[0]\n gis=g['is']\n gnx=g['Nx']\n gie=gis+gnx\n ks=gis[2]\n\n print(fm['DENSITY']['nx'],gnx)\n\n\n for k in fm:\n ib,jb,kb=(0,0,0)\n if fm[k]['vtype'] == 'ccvar':\n rstdata[k]=np.empty(nx,dtype=fm[k]['dtype'])\n elif fm[k]['vtype'] == 'fcvar':\n if k.startswith('1'): ib=1\n if k.startswith('2'): jb=1\n if k.startswith('3'): kb=1\n rstdata[k]=np.empty((nx[0]+kb,nx[1]+jb,nx[2]+ib),dtype=fm[k]['dtype'])\n\n for i in range(nprocs):\n g=grids[i]\n gis=g['is']\n gnx=g['Nx']\n gie=gis+gnx\n gid=g['id']\n if gid > 0:\n rstfname = rstfile.replace('{}.'.format(pid),'{}-id{}.'.format(pid,gid))\n else:\n rstfname = rstfile\n if not os.path.isfile(rstfname):\n rstfname = rstfile.replace('id{}/{}.'.format(gid,pid),\n 'id{}/{}-id{}.'.format(gid,pid,gid))\n\n fm,data=read_rst_grid(rstfname)\n\n if verbose > 1: print(i,fm['DENSITY']['nx'],gnx)\n\n for k in fm:\n ib,jb,kb=(0,0,0)\n if fm[k]['vtype'] == 'ccvar':\n rstdata[k][gis[2]-ks:gie[2]-ks,gis[1]:gie[1],gis[0]:gie[0]]=data[k]\n elif fm[k]['vtype'] == 'fcvar':\n if k.startswith('1'): ib=1\n if k.startswith('2'): jb=1\n if k.startswith('3'): kb=1\n rstdata[k][gis[2]-ks:gie[2]-ks+kb,gis[1]:gie[1]+jb,gis[0]:gie[0]+ib]=data[k]\n\n return rstdata\n\n\ndef set_xpos_with_dm(dm):\n le=np.array([dm['x1min'],dm['x2min'],dm['x3min']])\n re=np.array([dm['x1max'],dm['x2max'],dm['x3max']])\n Lx=re-le\n Nx=np.array([dm['Nx1'],dm['Nx2'],dm['Nx3']])\n dx=Lx/Nx\n xc={}\n xf={}\n for i,ax in zip(list(range(3)),['x','y','z']):\n xf[ax]=np.arange(le[i],re[i]+dx[i],dx[i])\n xc[ax]=np.arange(le[i],re[i],dx[i])+0.5*dx[i]\n return xf,xc\n\n\ndef set_xpos(ds):\n le=ds.domain['left_edge']\n re=ds.domain['right_edge']\n dx=ds.domain['dx']\n xc={}\n xf={}\n for i,ax in zip(list(range(3)),['x','y','z']):\n xf[ax]=np.arange(le[i],re[i]+dx[i],dx[i])\n xc[ax]=np.arange(le[i],re[i],dx[i])+0.5*dx[i]\n return xf,xc\n\ndef to_hdf5(h5file,rstdata,ds):\n import h5py\n\n Bx=rstdata['1-FIELD']\n By=rstdata['2-FIELD']\n Bz=rstdata['3-FIELD']\n xf,xc=set_xpos(ds)\n\n f=h5py.File(h5file,'a')\n for name in ['Bfields','cell_centered_coord','face_centered_coord']:\n if name in list(f.keys()):\n grp=f[name]\n else:\n grp=f.create_group(name)\n print(name)\n\n grp=f['Bfields']\n for name,B in zip(['Bx','By','Bz'],[Bx,By,Bz]):\n if name in list(grp.keys()):\n dset=grp[name]\n else:\n dset=grp.create_dataset(name,B.shape,data=B,dtype=B.dtype)\n\n for k in list(grp.keys()):\n for i,ax in enumerate(['z','y','x']):\n grp[k].dims[i].label=ax\n\n bfield=f['Bfields']\n ccoord=f['cell_centered_coord']\n fcoord=f['face_centered_coord']\n for ax in ['x','y','z']:\n if ax in list(ccoord.keys()):\n print(ax)\n else:\n ccoord[ax] = xc[ax]\n \n if ax in list(fcoord.keys()):\n print(ax)\n else:\n fcoord[ax] = xf[ax]\n\n for b in list(bfield.keys()):\n bax=b[-1]\n\n for i,ax in enumerate(['z','y','x']):\n if ax == bax:\n bfield[b].dims[i].attach_scale(fcoord[ax])\n else:\n bfield[b].dims[i].attach_scale(ccoord[ax])\n\n f.close()\n\ndef divB(rstdata):\n Bx=rstdata['1-FIELD']\n By=rstdata['2-FIELD']\n Bz=rstdata['3-FIELD']\n dBx=np.diff(Bx,axis=2)\n dBy=np.diff(By,axis=1)\n dBz=np.diff(Bz,axis=0)\n dB = dBx+dBy+dBz\n return dB\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.sqrt", "matplotlib.pyplot.plot", "matplotlib.pyplot.gca", "matplotlib.pyplot.gcf", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.logspace", "matplotlib.pyplot.ylim", "numpy.logical_and", "matplotlib.pyplot.ylabel", "matplotlib.colors.LogNorm", "matplotlib.gridspec.GridSpecFromSubplotSpec", "matplotlib.pyplot.yscale", "matplotlib.pyplot.subplots", "matplotlib.pyplot.sca", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel" ], [ "pandas.DataFrame" ], [ "matplotlib.pyplot.tight_layout", "numpy.sqrt", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylim", "matplotlib.pyplot.yscale", "matplotlib.pyplot.sca", "numpy.tile", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xscale", "matplotlib.pyplot.ylabel" ], [ "numpy.array", "numpy.diff", "numpy.linspace", "numpy.einsum" ], [ "numpy.arange", "numpy.empty", "numpy.diff", "numpy.array", "numpy.where", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kkleidal/GatedPixelCNNPyTorch
[ "286298a0cd81ed2f6cb918fd39ce4da2c0e92802" ]
[ "models/components/pixelcnn.py" ]
[ "# Loosely derived from https://github.com/jzbontar/pixelcnn-pytorch/blob/master/main.py\n# and moreso derived from https://github.com/rampage644/wavenet/blob/master/wavenet/models.py\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\nimport tqdm\n\n#owidth = floor((width + 2*padW - kW) / dW + 1)\n#oheight = floor((height + 2*padH - kH) / dH + 1)\n#dW is stride, assuming 1:\n# kW // 2 = padW\ndef same_padding(kernel_size):\n # assumming stride 1\n if isinstance(kernel_size, int):\n return kernel_size // 2\n else:\n return (kernel_size[0] // 2, kernel_size[1] // 2)\n\n# PyTorch port of\nclass MaskedConvolution2D(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, \n *args, mask='B', vertical=False, mask_mode=\"noblind\", **kwargs):\n if \"padding\" not in kwargs:\n assert \"stride\" not in kwargs\n kwargs[\"padding\"] = same_padding(kernel_size)\n remove = {\"conditional_features\", \"conditional_image_channels\"}\n for feature in remove:\n if feature in kwargs:\n del kwargs[feature]\n super(MaskedConvolution2D, self).__init__(in_channels,\n out_channels, kernel_size, *args, **kwargs)\n Cout, Cin, kh, kw = self.weight.size()\n pre_mask = np.ones_like(self.weight.data.cpu().numpy()).astype(np.float32)\n yc, xc = kh // 2, kw // 2\n\n assert mask_mode in {\"noblind\", \"turukin\", \"fig1-van-den-oord\"}\n if mask_mode == \"noblind\":\n # context masking - subsequent pixels won't hav access\n # to next pixels (spatial dim)\n if vertical:\n if mask == 'A':\n # In the first layer, can ONLY access pixels above it\n pre_mask[:, :, yc:, :] = 0.0\n else:\n # In the second layer, can access pixels above or even with it.\n # Reason being that the pixels to the right or left of the current pixel\n # only have a receptive field of the layer above the current layer and up.\n pre_mask[:, :, yc+1:, :] = 0.0\n else:\n # All rows after center must be zero\n pre_mask[:, :, yc+1:, :] = 0.0\n ### All rows before center must be zero # XXX: not actually necessary\n ##pre_mask[:, :, :yc, :] = 0.0\n # All columns after center in center row must be zero\n pre_mask[:, :, yc, xc+1:] = 0.0\n\n if mask == 'A':\n # Center must be zero in first layer\n pre_mask[:, :, yc, xc] = 0.0\n # same pixel masking - pixel won't access next color (conv filter dim)\n #def bmask(i_out, i_in):\n # cout_idx = np.expand_dims(np.arange(Cout) % 3 == i_out, 1)\n # cin_idx = np.expand_dims(np.arange(Cin) % 3 == i_in, 0)\n # a1, a2 = np.broadcast_arrays(cout_idx, cin_idx)\n # return a1 * a2\n\n #for j in range(3):\n # pre_mask[bmask(j, j), yc, xc] = 0.0 if mask == 'A' else 1.0\n\n #pre_mask[bmask(0, 1), yc, xc] = 0.0\n #pre_mask[bmask(0, 2), yc, xc] = 0.0\n #pre_mask[bmask(1, 2), yc, xc] = 0.0\n elif mask_mode == \"fig1-van-den-oord\":\n if vertical:\n pre_mask[:, :, yc:, :] = 0.0\n else:\n # All rows after center must be zero\n pre_mask[:, :, yc+1:, :] = 0.0\n ### All rows before center must be zero # XXX: not actually necessary\n ##pre_mask[:, :, :yc, :] = 0.0\n # All columns after center in center row must be zero\n pre_mask[:, :, yc, xc+1:] = 0.0\n\n if mask == 'A':\n # Center must be zero in first layer\n pre_mask[:, :, yc, xc] = 0.0\n elif mask_mode == \"turukin\":\n pre_mask[:, :, yc+1:, :] = 0.0\n pre_mask[:, :, yc, xc+1:] = 0.0\n if mask == 'A':\n pre_mask[:, :, yc, xc] = 0.0\n\n print(\"%s %s MASKED CONV: %d x %d. Mask:\" % (mask, \"VERTICAL\" if vertical else \"HORIZONTAL\", kh, kw))\n print(pre_mask[0, 0, :, :])\n\n self.register_buffer(\"mask\", torch.from_numpy(pre_mask))\n\n def __call__(self, x):\n self.weight.data = self.weight.data * self.mask\n return super(MaskedConvolution2D, self).forward(x)\n\nclass CroppedConvolution(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size,\n *args, **kwargs):\n if \"padding\" not in kwargs:\n assert \"stride\" not in kwargs\n kwargs[\"padding\"] = same_padding(kernel_size)\n super().__init__(in_channels, out_channels, kernel_size,\n *args, **kwargs)\n\n def __call__(self, x):\n ret = super().__call__(x)\n _, _, kh, kw = self.weight.size()\n pad_h, pad_w = self.padding\n h_crop = -(kh + 1) if pad_h == kh else None\n w_crop = -(kw + 1) if pad_w == kw else None\n return ret[:, :, :h_crop, :w_crop]\n\nclass PixelCNNGatedLayer(nn.Module):\n def __init__(self, primary, in_channels, out_channels, filter_size,\n mask='B', nobias=False, conditional_features=None,\n conditional_image_channels=None, residual_vertical=False,\n residual_horizontal=True, skips=False, gated=True,\n relu_out=False, horizontal_2d_convs=False, mask_mode=\"noblind\"):\n super().__init__()\n self.primary = primary\n if primary:\n assert mask == 'A'\n assert not residual_vertical\n assert not residual_horizontal\n else:\n assert mask == 'B'\n self.out_channels = out_channels\n self.gated = gated\n gm = 2 if gated else 1\n self.vertical_conv = MaskedConvolution2D(\n in_channels, gm * out_channels, (filter_size, filter_size),\n mask=mask, vertical=True, mask_mode=mask_mode)\n self.v_to_h_conv = nn.Conv2d(gm * out_channels, gm * out_channels, 1)\n\n self.horizontal_conv = MaskedConvolution2D(\n in_channels, gm * out_channels,\n (filter_size if horizontal_2d_convs else 1, filter_size), # XXX: traditionally (1, filter_size),\n mask=mask, vertical=False, mask_mode=mask_mode)\n\n self.residual_vertical = None\n if residual_vertical:\n self.residual_vertical = nn.Conv2d(in_channels, gm * out_channels, 1)\n\n self.horizontal_output = nn.Conv2d(out_channels, out_channels, 1)\n self.horizontal_skip = None\n if skips:\n self.horizontal_skip = nn.Conv2d(out_channels, out_channels, 1)\n self.conditional_vector = conditional_features is not None\n self.conditional_image = conditional_image_channels is not None\n if self.conditional_image:\n self.cond_conv_h = nn.Conv2d(conditional_image_channels, gm * out_channels, 1, bias=False)\n self.cond_conv_v = nn.Conv2d(conditional_image_channels, gm * out_channels, 1, bias=False)\n if self.conditional_vector:\n self.cond_fc_h = nn.Linear(conditional_features, gm * out_channels, bias=False)\n self.cond_fc_v = nn.Linear(conditional_features, gm * out_channels, bias=False)\n self.residual_horizontal = residual_horizontal\n self.relu_out = relu_out\n\n @classmethod\n def primary(cls, in_channels, out_channels, filter_size,\n nobias=False, conditional_features=None,\n conditional_image_channels=None, \n skips=False, gated=True,\n relu_out=False, horizontal_2d_convs=False, mask_mode=\"noblind\"):\n return cls(True, in_channels, out_channels, filter_size, nobias=nobias,\n mask='A', conditional_features=conditional_features,\n conditional_image_channels=conditional_image_channels,\n residual_vertical=False, residual_horizontal=False,\n skips=skips, gated=gated,\n relu_out=relu_out, horizontal_2d_convs=horizontal_2d_convs,\n mask_mode=mask_mode)\n \n @classmethod\n def secondary(cls, in_channels, out_channels, filter_size,\n nobias=False, conditional_features=None,\n conditional_image_channels=None, residual_vertical=True,\n residual_horizontal=True, skips=False, gated=True,\n relu_out=False, horizontal_2d_convs=False, mask_mode=\"noblind\"):\n return cls(False, in_channels, out_channels, filter_size, nobias=nobias,\n mask='B', conditional_features=conditional_features,\n conditional_image_channels=conditional_image_channels,\n residual_vertical=residual_vertical, residual_horizontal=residual_horizontal,\n skips=skips, gated=gated, relu_out=relu_out,\n horizontal_2d_convs=horizontal_2d_convs, mask_mode=mask_mode)\n\n def _gate(self, x):\n if self.gated:\n return F.tanh(x[:,:self.out_channels]) * F.sigmoid(x[:,self.out_channels:])\n else:\n return x\n\n def __call__(self, v, h, conditional_image=None, conditional_vector=None):\n horizontal_preactivation = self.horizontal_conv(h) # 1xN\n vertical_preactivation = self.vertical_conv(v) # NxN\n v_to_h = self.v_to_h_conv(vertical_preactivation) # 1x1\n if self.residual_vertical is not None:\n vertical_preactivation = vertical_preactivation + self.residual_vertical(v) # 1x1 to residual\n horizontal_preactivation = horizontal_preactivation + v_to_h\n if self.conditional_image and conditional_image is not None:\n horizontal_preactivation = horizontal_preactivation + \\\n self.cond_conv_h(conditional_image)\n vertical_preactivation = vertical_preactivation + \\\n self.cond_conv_v(conditional_image)\n if self.conditional_vector and conditional_vector is not None:\n horizontal_preactivation = horizontal_preactivation + \\\n self.cond_fc_h(conditional_vector).unsqueeze(-1).unsqueeze(-1)\n vertical_preactivation = vertical_preactivation + \\\n self.cond_fc_v(conditional_vector).unsqueeze(-1).unsqueeze(-1)\n v_out = self._gate(vertical_preactivation)\n h_activated = self._gate(horizontal_preactivation)\n h_skip = None\n if self.horizontal_skip is not None:\n h_skip = self.horizontal_skip(h_activated)\n h_preres = self.horizontal_output(h_activated)\n if self.residual_horizontal:\n h_out = h + h_preres\n else:\n h_out = h_preres\n if self.relu_out:\n v_out = F.relu(v_out)\n h_out = F.relu(h_out)\n if h_skip is not None:\n h_skip = F.relu(h_skip)\n return v_out, h_out, h_skip\n\nclass PixelCNNGatedStack(nn.Module):\n def __init__(self, *args):\n super().__init__()\n layers = list(args)\n for i, layer in enumerate(layers):\n assert isinstance(layer, PixelCNNGatedLayer)\n if i == 0:\n assert layer.primary\n else:\n assert not layer.primary\n self.layers = nn.ModuleList(layers)\n\n def __call__(self, v, h, skips=None, conditional_image=None, conditional_vector=None):\n if skips is None:\n skips = []\n else:\n skips = [skips]\n for layer in self.layers:\n v, h, skip = layer(v, h, conditional_image=conditional_image, conditional_vector=conditional_vector)\n if skip is not None:\n skips.append(skip)\n if len(skips) == 0:\n skips = None\n else:\n skips = torch.cat(skips, 1)\n return v, h, skips\n\nclass PixelCNN(nn.Module):\n def _sample_from(self, probas):\n N, level_count = probas.size()\n val = torch.rand(N, 1)\n if probas.is_cuda:\n val = val.cuda()\n cutoffs = torch.cumsum(probas, dim=1)\n _, idx = torch.max(cutoffs > val, dim=1)\n out = idx.float() / (level_count - 1)\n return out \n\n def sample_pixel(self, canvas, row, col, channel, **kwargs):\n probs = F.softmax(self(canvas, **kwargs)[:, :, channel, row, col], dim=1)\n return self._sample_from(probs.data)\n\n def generate_samples(self, height, width,\n channels, count, show_prog=False, **kwargs):\n samples = torch.zeros(count, channels, height, width).float()\n if next(self.parameters()).data.is_cuda:\n samples = samples.cuda()\n samples = Variable(samples, requires_grad=False)\n \n def generate(prog=None):\n for i in range(height):\n for j in range(width):\n for k in range(channels):\n samples.data[:, k, i, j] = self.sample_pixel(samples,\n i, j, k, **kwargs)\n if prog is not None:\n prog.update()\n if show_prog:\n with tqdm.tqdm(total=height * width * channels) as prog:\n generate(prog=prog)\n else:\n generate()\n return samples\n\nif __name__ == \"__main__\":\n x = PixelCNN(3, 128, 5, 16, 8, conditional_features=10,\n conditional_image_channels=8)\n print(x)\n inp = Variable(torch.randn(128, 3, 28, 28))\n cv = Variable(torch.randn(128, 10))\n ci = Variable(torch.randn(128, 8, 28, 28))\n print(x(inp, conditional_vector=cv, conditional_image=ci))\n print(x.generate_samples(28, 28, 3, 4, conditional_vector=cv, conditional_image=ci, show_prog=True))\n" ]
[ [ "torch.max", "torch.cat", "torch.zeros", "torch.randn", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.from_numpy", "torch.nn.Linear", "torch.nn.functional.sigmoid", "torch.nn.functional.relu", "torch.rand", "torch.nn.functional.tanh", "torch.cumsum", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhengye1995/datafountain_siweituxin_autodriver_det
[ "2c2df76fb9942ddc334730cc5bc447be66440e22" ]
[ "tools/convert_datasets/trans_txt2json.py" ]
[ "# *utf-8*\nimport os\nimport json\nimport numpy as np\nfrom tqdm import tqdm\nimport cv2\n\ndefect_name2label = {\n 'red': 1, 'green': 2, 'yellow': 3, 'red_left': 4, 'red_right': 5, 'yellow_left': 6, 'yellow_right': 7,\n 'green_left': 8, 'green_right': 9, 'red_forward': 10, 'green_forward': 11, 'yellow_forward': 12,\n 'horizon_red': 13, 'horizon_green': 14, 'horizon_yellow': 15, 'off': 16, 'traffic_sign': 17,\n 'car': 18, 'motor': 19, 'bike': 20, 'bus': 21, 'truck': 22, 'suv': 23, 'express': 24, 'person': 25,\n}\n\n\nclass Siwei2COCO:\n\n def __init__(self, mode=\"train\"):\n self.images = []\n self.annotations = []\n self.categories = []\n self.img_id = 0\n self.ann_id = 0\n self.mode = mode\n\n\n def to_coco(self, anno_file, img_dir):\n self._init_categories()\n with open(anno_file, 'r') as f:\n annos = f.readlines()\n\n for anno in tqdm(annos):\n try:\n img_name, seg_name, bboxs = anno.strip().split(' ', 2)\n except:\n img_name, seg_name = anno.strip().split(' ', 2)\n print(img_name)\n continue\n\n bboxs = bboxs.split(' ')\n # print(bboxs)\n\n img_path = os.path.join(img_dir, img_name)\n # img = cv2.imread(img_path)\n # h, w, _ = img.shape\n h, w = 720, 1280\n self.images.append(self._image(img_path, h, w))\n for bbox in zip(bboxs):\n # print(list(bbox)[0])\n xmin, ymin, xmax, ymax, class_id, _ = list(bbox)[0].split(',')\n # print(xmin, ymin, xmax, ymax, class_id)\n annotation = self._annotation(class_id, [float(xmin), float(ymin), float(xmax), float(ymax)], h, w)\n self.annotations.append(annotation)\n self.ann_id += 1\n self.img_id += 1\n instance = {}\n instance['info'] = 'fabric defect'\n instance['license'] = ['none']\n instance['images'] = self.images\n instance['annotations'] = self.annotations\n instance['categories'] = self.categories\n return instance\n\n def _init_categories(self):\n # for v in range(1, 16):\n # print(v)\n # category = {}\n # category['id'] = v\n # category['name'] = str(v)\n # category['supercategory'] = 'defect_name'\n # self.categories.append(category)\n for k, v in defect_name2label.items():\n category = {}\n category['id'] = v\n category['name'] = k\n category['supercategory'] = 'siweituxin_name'\n self.categories.append(category)\n\n def _image(self, path, h, w):\n image = {}\n image['height'] = h\n image['width'] = w\n image['id'] = self.img_id\n image['file_name'] = os.path.basename(path)\n return image\n\n def _annotation(self, label, bbox, h, w):\n area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])\n # area=abs(bbox[2]-bbox[0])*abs(bbox[3]-bbox[1])\n if area <= 0:\n print(bbox)\n input()\n points = [[bbox[0], bbox[1]], [bbox[2], bbox[1]], [bbox[2], bbox[3]], [bbox[0], bbox[3]]]\n annotation = {}\n annotation['id'] = self.ann_id\n annotation['image_id'] = self.img_id\n annotation['category_id'] = int(label)\n annotation['segmentation'] = [np.asarray(points).flatten().tolist()]\n annotation['bbox'] = self._get_box(points, h, w)\n annotation['iscrowd'] = 0\n annotation['area'] = area\n return annotation\n\n def _get_box(self, points, img_h, img_w):\n min_x = min_y = np.inf\n max_x = max_y = 0\n for x, y in points:\n min_x = min(min_x, x)\n min_y = min(min_y, y)\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n '''coco,[x,y,w,h]'''\n w = max_x - min_x\n h = max_y - min_y\n if w > img_w:\n w = img_w\n if h > img_h:\n h = img_h\n return [min_x, min_y, w, h]\n\n def save_coco_json(self, instance, save_path):\n with open(save_path, 'w') as fp:\n json.dump(instance, fp, indent=1, separators=(',', ': '))\n\n\n'''转换有瑕疵的样本为coco格式'''\nimg_dir = \"data/siweituxin/train_image\"\nanno_dir = \"data/siweituxin/Annotations/train.txt\"\nsiwei2coco = Siwei2COCO()\ntrain_instance = siwei2coco.to_coco(anno_dir, img_dir)\n\nsiwei2coco.save_coco_json(train_instance,\n \"data/siweituxin/annotations/\"\n + 'instances_{}.json'.format(\"train\"))" ]
[ [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mrpatekful/supervised-translation
[ "d03db6a0fc25900fd42b8057a12adad0b8d025f8" ]
[ "src/model.py" ]
[ "\"\"\"\n@author: Patrik Purgai\n@copyright: Copyright 2019, supervised-translation\n@license: MIT\n@email: [email protected]\n@date: 2019.04.04.\n\"\"\"\n\n# pylint: disable=no-member\n# pylint: disable=not-callable\n\nimport torch\nimport random\n\nfrom torch.nn.modules import (\n Module, ModuleList)\n\nfrom torch.nn.functional import (\n log_softmax, softmax, linear,\n embedding)\n\nfrom torch.nn import (\n Linear, Softmax, Parameter, \n GRU, Dropout, Embedding)\n\n\ndef setup_model_args(parser):\n \"\"\"\n Sets up the model arguments.\n \"\"\"\n parser.add_argument(\n '--hidden_size',\n type=int,\n default=256,\n help='Hidden size of the model.')\n parser.add_argument(\n '--embedding_size',\n type=int,\n default=128,\n help='Embedding dimension for the tokens.')\n\n\ndef create_model(args, tokenizers, device):\n \"\"\"\n Creates the sequence to sequence model.\n \"\"\"\n source_tokenizer, target_tokenizer = tokenizers\n\n special_ids = target_tokenizer.bos_id(), \\\n target_tokenizer.eos_id(), source_tokenizer.pad_id(), \\\n target_tokenizer.pad_id(), source_tokenizer.unk_id()\n\n tensor_indices = [\n torch.tensor(i).to(device) for i in special_ids]\n\n model = Seq2Seq(\n source_vocab_size=len(source_tokenizer),\n target_vocab_size=len(target_tokenizer),\n indices=tensor_indices,\n **vars(args)).to(device)\n\n return model\n\n\ndef neginf(dtype):\n \"\"\"\n Return a representable finite \n number near -inf for a dtype.\n \"\"\"\n if dtype is torch.float16:\n return -65504\n else:\n return -1e20\n\n\n# NOTE currently unused function\ndef embeddeding_dropout(embed, inputs, training, mask=None, p=0.1):\n \"\"\"\n Applies dropout to the embedding layer based on\n https://arxiv.org/pdf/1512.05287.pdf. The code is\n based on salesforce/awd-lstm-lm.\n \"\"\"\n if not training:\n masked_embed_weight = embed.weight\n if mask is not None:\n # masks might be provided, which is useful for shared\n # dropout masks over the whole sequence of inputs\n masked_embed_weight = mask * embed.weight\n elif p:\n mask = embed.weight.new_empty((embed.weight.size(0), 1))\n mask.bernoulli_(1 - p).expand_as(embed.weight) / (1 - p)\n masked_embed_weight = mask * embed.weight\n else:\n masked_embed_weight = embed.weight\n\n return embedding(\n inputs, masked_embed_weight, embed.padding_idx, \n embed.max_norm, embed.norm_type,\n embed.scale_grad_by_freq, embed.sparse)\n\n\nclass Seq2Seq(Module):\n \"\"\"\n The sequence-to-sequence model.\n \"\"\"\n\n def __init__(self, embedding_size, hidden_size, indices,\n source_vocab_size, target_vocab_size, **kwargs):\n super().__init__()\n\n self.start_idx, self.end_idx, \\\n self.pad_idx, _, self.unk_idx = indices\n\n self.encoder = Encoder(\n input_size=embedding_size,\n hidden_size=hidden_size,\n pad_idx=self.pad_idx,\n vocab_size=source_vocab_size)\n\n self.decoder = Decoder(\n input_size=embedding_size,\n hidden_size=hidden_size,\n vocab_size=target_vocab_size)\n\n def forward(self, inputs, attn_mask=None, targets=None, \n max_len=50):\n \"\"\"\n Runs the inputs through the encoder-decoder model.\n \"\"\"\n # inputs are expexted in sequence-first format\n batch_size = inputs.size(0)\n max_len = targets.size(1) if targets is not None \\\n else max_len\n\n if attn_mask is None:\n attn_mask = inputs.eq(self.pad_idx)\n\n # the number of layers in the decoder must be equal\n # to the number of layers in the encoder because of\n # the initial hidden states from the encoder\n encoder_outputs, hidden_states = self.encoder(inputs)\n\n scores = []\n preds = self.start_idx.detach().expand(batch_size, 1)\n\n for idx in range(max_len):\n # if targets are provided and training then apply\n # teacher forcing 50% of the time\n if targets is not None and self.training and \\\n random.random() > 0.5:\n prev_output = targets[:, idx].unsqueeze(1)\n else:\n prev_output = preds[:, -1:]\n\n step_scores, hidden_states = self.decoder(\n inputs=prev_output,\n encoder_outputs=encoder_outputs,\n prev_hiddens=hidden_states,\n attn_mask=attn_mask)\n\n _, step_preds = step_scores.max(dim=-1)\n\n preds = torch.cat([preds, step_preds], dim=-1)\n scores.append(step_scores)\n\n scores = torch.cat(scores, dim=1)\n preds = preds.narrow(1, 1, preds.size(1) - 1)\n\n return scores, preds\n\n\nclass Encoder(Module):\n \"\"\"\n Encoder module for the seq2seq model.\n \"\"\"\n\n def __init__(self, input_size, hidden_size, pad_idx,\n vocab_size):\n super().__init__()\n\n self.embedding = Embedding(\n num_embeddings=vocab_size,\n embedding_dim=input_size,\n padding_idx=pad_idx)\n\n self.dropout = Dropout(p=0.1)\n\n self.merge = Linear(\n in_features=hidden_size * 2,\n out_features=hidden_size,\n bias=False)\n\n # creating rnn layer as module list so locked\n # dropout can be applied between each layer\n # NOTE: currently not using weight drop, because\n # it is incompatible with apex\n self.rnn = ModuleList([\n GRU(input_size=input_size,\n hidden_size=hidden_size,\n bidirectional=True,\n batch_first=True)] + [\n GRU(input_size=hidden_size,\n hidden_size=hidden_size,\n batch_first=True)\n for _ in range(2)\n ])\n\n def forward(self, inputs):\n \"\"\"\n Computes the embeddings and runs them through an RNN.\n \"\"\"\n embedded = self.embedding(inputs)\n embedded = self.dropout(embedded)\n\n outputs, hidden_state = self.rnn[0](embedded)\n\n # merging the two directions of bidirectional layer\n # by summing along the first axis\n hidden_states = [hidden_state.sum(0, keepdim=True)]\n outputs = self.merge(outputs)\n\n for layer in self.rnn[1:]:\n outputs, hidden_state = layer(outputs)\n outputs = self.dropout(outputs)\n hidden_states.append(hidden_state)\n\n return outputs, hidden_states\n\n\nclass Decoder(Module):\n \"\"\"\n Decoder module for the seq2seq.\n \"\"\"\n\n def __init__(self, input_size, hidden_size, vocab_size):\n super().__init__()\n\n self.embedding = Embedding(\n num_embeddings=vocab_size,\n embedding_dim=input_size)\n\n self.dropout = Dropout(p=0.1)\n\n self.rnn = ModuleList([\n GRU(input_size=input_size,\n hidden_size=hidden_size,\n batch_first=True)] + [\n GRU(input_size=hidden_size,\n hidden_size=hidden_size,\n batch_first=True)\n for _ in range(2)\n ])\n\n self.attn = Attention(hidden_size=hidden_size)\n\n self.out_bias = Parameter(torch.zeros((vocab_size, )))\n self.out_weight = self.embedding.weight\n\n def forward(self, inputs, encoder_outputs, prev_hiddens,\n attn_mask=None, embed_mask=None):\n \"\"\"\n Applies decoding with attention mechanism, mixture\n of sofmaxes and multi dropout during training.\n MoS implementation is taken from \n \"\"\"\n embedded = self.embedding(inputs)\n output = self.dropout(embedded)\n\n hidden_states = []\n for idx, layer in enumerate(self.rnn):\n output, hidden_state = layer(\n output, prev_hiddens[idx])\n output = self.dropout(output)\n hidden_states.append(hidden_state)\n\n # NOTE attention weights are not used currently\n # (they could be exported for visualization)\n output, _ = self.attn(\n decoder_output=output,\n hidden_state=hidden_state,\n encoder_outputs=encoder_outputs,\n attn_mask=attn_mask)\n\n logits = linear(\n output, self.out_weight, self.out_bias)\n\n log_probs = log_softmax(logits, dim=-1)\n\n return log_probs, hidden_states\n\n\nclass Attention(Module):\n \"\"\"\n Luong style general attention from \n https://arxiv.org/pdf/1508.04025.pdf.\n \"\"\"\n\n def __init__(self, hidden_size):\n super().__init__()\n\n self.project = Linear(\n in_features=hidden_size,\n out_features=hidden_size,\n bias=False)\n\n self.combine = Linear(\n in_features=hidden_size * 2,\n out_features=hidden_size,\n bias=False)\n\n def forward(self, decoder_output, hidden_state, \n encoder_outputs, attn_mask=None):\n \"\"\"\n Applies attention by creating the weighted \n context vector. Implementation is based on \n `IBM/pytorch-seq2seq`.\n \"\"\"\n hidden_state = self.project(hidden_state)\n hidden_state = hidden_state.transpose(0, 1)\n \n encoder_outputs_t = encoder_outputs.transpose(1, 2)\n attn_scores = torch.bmm(\n hidden_state, encoder_outputs_t)\n\n # applying mask on padded values of the input\n # NOTE during beam search mask might not be provided\n if attn_mask is not None:\n attn_scores = attn_scores.squeeze(1)\n attn_scores.masked_fill_(\n attn_mask, neginf(attn_scores.dtype))\n attn_scores = attn_scores.unsqueeze(1)\n\n attn_weights = softmax(attn_scores, dim=-1)\n attn_applied = torch.bmm(\n attn_weights, encoder_outputs)\n\n stacked = torch.cat(\n [decoder_output, attn_applied], dim=-1)\n outputs = self.combine(stacked)\n\n return outputs, attn_weights\n" ]
[ [ "torch.nn.functional.embedding", "torch.nn.Dropout", "torch.nn.functional.softmax", "torch.nn.functional.log_softmax", "torch.cat", "torch.zeros", "torch.nn.GRU", "torch.nn.Embedding", "torch.tensor", "torch.nn.Linear", "torch.bmm", "torch.nn.functional.linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jiruifu-jerry0219/UpperLimbEstimator
[ "d62deef93419934dcb33e43707dd0634a235fb9a" ]
[ "ArtificialNeuralNetwork/model.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.utils.data as Data\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader, WeightedRandomSampler\n\nclass MultipleRegression(nn.Module):\n def __init__(self, num_features):\n super(MultipleRegression, self).__init__()\n self.fc1 = nn.Linear(num_features, 64)\n self.fc2 = nn.Linear(64, 128)\n# self.fc3 = nn.Linear(128, 64)\n self.output = nn.Linear(128, 1)\n\n self.act = nn.Sigmoid()\n\n def forward(self, inputs):\n x = self.act(self.fc1(inputs))\n x = self.act(self.fc2(x))\n# x = self.act(self.fc3(x))\n x = self.output(x)\n\n return x\n\n def predict(self, test_inputs):\n x = self.act(self.fc1(test_inputs))\n x = self.act(self.fc2(x))\n# x = self.act(self.fc3(x))\n x = self.output(x)\n\n return x\n" ]
[ [ "torch.nn.Linear", "torch.nn.Sigmoid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anonymousGithub2023/ECCV
[ "7c4b862c898aa6a591bc7f32f95c5900e0492490", "7c4b862c898aa6a591bc7f32f95c5900e0492490" ]
[ "collect_latency.py", "loss_impact.py" ]
[ "import torch\nimport os\nimport numpy as np\n\nfrom utils import *\n\nl2_res = np.zeros([6, 7])\nlinf_res = np.zeros([6, 7])\n\nresDir = 'res'\nif not os.path.isdir(resDir):\n os.mkdir(resDir)\nfor model_file in MODEL_FILE_LIST:\n task_name = model_file.split('.')[0]\n for attack_name in ['L2', 'Linf']:\n avg_res = np.zeros([3, 7])\n max_res = np.zeros([3, 7])\n for attack_type in [1,2,3,4,5,6,0]:\n latency_file = os.path.join('latency', str(attack_type) + '_' + attack_name + '_' + task_name + '.latency')\n latency_res = torch.load(latency_file)\n ori_res, adv_res = latency_res\n cpu_inc, gpu_inc, loop_inc = [], [], []\n for ori, adv in zip(ori_res, adv_res):\n cpu_inc.append(adv[1] / ori[1] - 1)\n gpu_inc.append(adv[0] / ori[0] - 1)\n loop_inc.append(adv[2] / ori[2] - 1)\n cpu_inc, gpu_inc, loop_inc = np.array(cpu_inc), np.array(gpu_inc), np.array(loop_inc)\n\n avg_res[0, attack_type] = loop_inc.mean()\n avg_res[1, attack_type] = cpu_inc.mean()\n avg_res[2, attack_type] = gpu_inc.mean()\n\n max_res[0, attack_type] = loop_inc.max()\n max_res[1, attack_type] = cpu_inc.max()\n max_res[2, attack_type] = gpu_inc.max()\n final_res = np.concatenate([avg_res, max_res], axis=0)\n file_name = os.path.join(resDir, task_name + '_' + attack_name + '.csv')\n final_res = np.concatenate([final_res[:,1:], final_res[:, 0:1]], axis=1)\n np.savetxt(file_name, final_res, delimiter=',')\n print(file_name, 'success')\n", "import torch\n\n\nimport datetime\nimport os\nimport torch\nimport argparse\n\n\nfrom utils import *\n\nADV_NUM = 1000\nBATCH = 20\nif not os.path.isdir('study'):\n os.mkdir('study')\n\n\ndef main(task_id):\n device = torch.device('cuda')\n model_file = MODEL_FILE_LIST[task_id]\n\n encoder, decoder, test_loader, _, word_map = load_dataset_model(model_file, batch_size=BATCH * CAP_PER_IMG)\n print('load model %s successful' % MODEL_FILE_LIST[task_id])\n for attack_norm in [0, 1]:\n task_name = model_file.split('.')[0]\n attack_class = ATTACK_METHOD[0]\n if attack_norm == 0:\n attack_name = 'L2'\n elif attack_norm == 1:\n attack_name = 'Linf'\n else:\n raise NotImplementedError\n config = {\n 'lr': 0.001,\n 'beams': 1,\n 'coeff': 100,\n 'max_len': 60,\n 'max_iter': 1000,\n 'max_per': MAX_PER_DICT[attack_name]\n }\n\n attack = attack_class(encoder, decoder, word_map, attack_norm, device, config)\n results = []\n\n for loss_type in [0, 1]:\n t1 = datetime.datetime.now()\n for i, data in enumerate(test_loader):\n (imgs, caption, caplen, all_captions) = data\n imgs = [imgs[jjj * CAP_PER_IMG:jjj * CAP_PER_IMG + 1] for jjj in range(BATCH)]\n imgs = torch.cat(imgs)\n imgs = imgs.to(attack.device)\n is_success, ori_img, adv_img = attack.run_diff_loss(imgs, loss_type)\n results.append([ori_img, adv_img])\n torch.save(results, 'study/' + str(loss_type) + '_' + attack_name + '_' + task_name + '.adv')\n if i >= 10:\n break\n t2 = datetime.datetime.now()\n print(t2 - t1)\n torch.save(results, 'study/' + str(loss_type) + '_' + attack_name + '_' + task_name + '.adv')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Transformer')\n parser.add_argument('--task', default=2, type=int, help='experiment subjects')\n args = parser.parse_args()\n main(args.task)\n\n # 3 4 5 6\n" ]
[ [ "torch.load", "numpy.concatenate", "numpy.savetxt", "numpy.array", "numpy.zeros" ], [ "torch.device", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
youqingxiaozhua/OpenSelfSup
[ "ab8fc27c6b43679317eaf312b85461ba490606af", "ab8fc27c6b43679317eaf312b85461ba490606af" ]
[ "openselfsup/models/backbones/resnet.py", "openselfsup/hooks/deepcluster_hook.py" ]
[ "import torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import constant_init, kaiming_init\nfrom mmcv.runner import load_checkpoint\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom openselfsup.utils import get_root_logger\nfrom ..registry import BACKBONES\nfrom ..utils import build_conv_layer, build_norm_layer\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN')):\n super(BasicBlock, self).__init__()\n\n self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n\n self.conv1 = build_conv_layer(\n conv_cfg,\n inplanes,\n planes,\n 3,\n stride=stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n self.conv2 = build_conv_layer(\n conv_cfg, planes, planes, 3, padding=1, bias=False)\n self.add_module(self.norm2_name, norm2)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n assert not with_cp\n\n @property\n def norm1(self):\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n return getattr(self, self.norm2_name)\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.norm2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN')):\n \"\"\"Bottleneck block for ResNet.\n If style is \"pytorch\", the stride-two layer is the 3x3 conv layer,\n if it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n \"\"\"\n super(Bottleneck, self).__init__()\n assert style in ['pytorch', 'caffe']\n\n self.inplanes = inplanes\n self.planes = planes\n self.stride = stride\n self.dilation = dilation\n self.style = style\n self.with_cp = with_cp\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n\n if self.style == 'pytorch':\n self.conv1_stride = 1\n self.conv2_stride = stride\n else:\n self.conv1_stride = stride\n self.conv2_stride = 1\n\n self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n self.norm3_name, norm3 = build_norm_layer(\n norm_cfg, planes * self.expansion, postfix=3)\n\n self.conv1 = build_conv_layer(\n conv_cfg,\n inplanes,\n planes,\n kernel_size=1,\n stride=self.conv1_stride,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n self.conv2 = build_conv_layer(\n conv_cfg,\n planes,\n planes,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n self.add_module(self.norm2_name, norm2)\n self.conv3 = build_conv_layer(\n conv_cfg,\n planes,\n planes * self.expansion,\n kernel_size=1,\n bias=False)\n self.add_module(self.norm3_name, norm3)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n\n @property\n def norm1(self):\n return getattr(self, self.norm1_name)\n\n @property\n def norm2(self):\n return getattr(self, self.norm2_name)\n\n @property\n def norm3(self):\n return getattr(self, self.norm3_name)\n\n def forward(self, x):\n\n def _inner_forward(x):\n identity = x\n\n out = self.conv1(x)\n out = self.norm1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.norm2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.norm3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n\n return out\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n out = self.relu(out)\n\n return out\n\n\ndef make_res_layer(block,\n inplanes,\n planes,\n blocks,\n stride=1,\n dilation=1,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN')):\n downsample = None\n if stride != 1 or inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n build_conv_layer(\n conv_cfg,\n inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False),\n build_norm_layer(norm_cfg, planes * block.expansion)[1],\n )\n\n layers = []\n layers.append(\n block(\n inplanes=inplanes,\n planes=planes,\n stride=stride,\n dilation=dilation,\n downsample=downsample,\n style=style,\n with_cp=with_cp,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg))\n inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(\n block(\n inplanes=inplanes,\n planes=planes,\n stride=1,\n dilation=dilation,\n style=style,\n with_cp=with_cp,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg))\n\n return nn.Sequential(*layers)\n\n\[email protected]_module\nclass ResNet(nn.Module):\n \"\"\"ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Normally 3.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n\n Example:\n >>> from openselfsup.models import ResNet\n >>> import torch\n >>> self = ResNet(depth=18)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 64, 8, 8)\n (1, 128, 4, 4)\n (1, 256, 2, 2)\n (1, 512, 1, 1)\n \"\"\"\n\n arch_settings = {\n 18: (BasicBlock, (2, 2, 2, 2)),\n 34: (BasicBlock, (3, 4, 6, 3)),\n 50: (Bottleneck, (3, 4, 6, 3)),\n 101: (Bottleneck, (3, 4, 23, 3)),\n 152: (Bottleneck, (3, 8, 36, 3))\n }\n\n def __init__(self,\n depth,\n in_channels=3,\n num_stages=4,\n strides=(1, 2, 2, 2),\n dilations=(1, 1, 1, 1),\n out_indices=(0, 1, 2, 3, 4),\n style='pytorch',\n frozen_stages=-1,\n conv_cfg=None,\n norm_cfg=dict(type='BN', requires_grad=True),\n norm_eval=False,\n with_cp=False,\n zero_init_residual=False):\n super(ResNet, self).__init__()\n if depth not in self.arch_settings:\n raise KeyError('invalid depth {} for resnet'.format(depth))\n self.depth = depth\n self.num_stages = num_stages\n assert num_stages >= 1 and num_stages <= 4\n self.strides = strides\n self.dilations = dilations\n assert len(strides) == len(dilations) == num_stages\n self.out_indices = out_indices\n assert max(out_indices) < num_stages + 1\n self.style = style\n self.frozen_stages = frozen_stages\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.with_cp = with_cp\n self.norm_eval = norm_eval\n self.zero_init_residual = zero_init_residual\n self.block, stage_blocks = self.arch_settings[depth]\n self.stage_blocks = stage_blocks[:num_stages]\n self.inplanes = 64\n\n self._make_stem_layer(in_channels)\n\n self.res_layers = []\n for i, num_blocks in enumerate(self.stage_blocks):\n stride = strides[i]\n dilation = dilations[i]\n planes = 64 * 2**i\n res_layer = make_res_layer(\n self.block,\n self.inplanes,\n planes,\n num_blocks,\n stride=stride,\n dilation=dilation,\n style=self.style,\n with_cp=with_cp,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg)\n self.inplanes = planes * self.block.expansion\n layer_name = 'layer{}'.format(i + 1)\n self.add_module(layer_name, res_layer)\n self.res_layers.append(layer_name)\n\n self._freeze_stages()\n\n self.feat_dim = self.block.expansion * 64 * 2**(\n len(self.stage_blocks) - 1)\n\n @property\n def norm1(self):\n return getattr(self, self.norm1_name)\n\n def _make_stem_layer(self, in_channels):\n self.conv1 = build_conv_layer(\n self.conv_cfg,\n in_channels,\n 64,\n kernel_size=7,\n stride=2,\n padding=3,\n bias=False)\n self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)\n self.add_module(self.norm1_name, norm1)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n def _freeze_stages(self):\n if self.frozen_stages >= 0:\n self.norm1.eval()\n for m in [self.conv1, self.norm1]:\n for param in m.parameters():\n param.requires_grad = False\n\n for i in range(1, self.frozen_stages + 1):\n m = getattr(self, 'layer{}'.format(i))\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=True, logger=logger)\n elif pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n kaiming_init(m, mode='fan_in', nonlinearity='relu')\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n constant_init(m, 1)\n\n if self.zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n constant_init(m.norm3, 0)\n elif isinstance(m, BasicBlock):\n constant_init(m.norm2, 0)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def forward(self, x):\n outs = []\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu(x) # r50: 64x128x128\n if 0 in self.out_indices:\n outs.append(x)\n x = self.maxpool(x) # r50: 64x56x56\n for i, layer_name in enumerate(self.res_layers):\n res_layer = getattr(self, layer_name)\n x = res_layer(x)\n if i + 1 in self.out_indices:\n outs.append(x)\n # r50: 1-256x56x56; 2-512x28x28; 3-1024x14x14; 4-2048x7x7\n return tuple(outs)\n\n def train(self, mode=True):\n super(ResNet, self).train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()\n", "import numpy as np\n\nfrom mmcv.runner import Hook\n\nimport torch\nimport torch.distributed as dist\n\nfrom openselfsup.third_party import clustering as _clustering\nfrom openselfsup.utils import print_log\nfrom .registry import HOOKS\nfrom .extractor import Extractor\n\n\[email protected]_module\nclass DeepClusterHook(Hook):\n \"\"\"Hook for DeepCluster.\n\n Args:\n extractor (dict): Config dict for feature extraction.\n clustering (dict): Config dict that specifies the clustering algorithm.\n unif_sampling (bool): Whether to apply uniform sampling.\n reweight (bool): Whether to apply loss re-weighting.\n reweight_pow (float): The power of re-weighting.\n init_memory (bool): Whether to initialize memory banks for ODC.\n Default: False.\n initial (bool): Whether to call the hook initially. Default: True.\n interval (int): Frequency of epochs to call the hook. Default: 1.\n dist_mode (bool): Use distributed training or not. Default: True.\n data_loaders (DataLoader): A PyTorch dataloader. Default: None.\n \"\"\"\n\n def __init__(\n self,\n extractor,\n clustering,\n unif_sampling,\n reweight,\n reweight_pow,\n init_memory=False, # for ODC\n initial=True,\n interval=1,\n dist_mode=True,\n data_loaders=None):\n self.extractor = Extractor(dist_mode=dist_mode, **extractor)\n self.clustering_type = clustering.pop('type')\n self.clustering_cfg = clustering\n self.unif_sampling = unif_sampling\n self.reweight = reweight\n self.reweight_pow = reweight_pow\n self.init_memory = init_memory\n self.initial = initial\n self.interval = interval\n self.dist_mode = dist_mode\n self.data_loaders = data_loaders\n\n def before_run(self, runner):\n if self.initial:\n self.deepcluster(runner)\n\n def after_train_epoch(self, runner):\n if not self.every_n_epochs(runner, self.interval):\n return\n self.deepcluster(runner)\n\n def deepcluster(self, runner):\n # step 1: get features\n runner.model.eval()\n features = self.extractor(runner)\n runner.model.train()\n\n # step 2: get labels\n if not self.dist_mode or (self.dist_mode and runner.rank == 0):\n clustering_algo = _clustering.__dict__[self.clustering_type](\n **self.clustering_cfg)\n # Features are normalized during clustering\n clustering_algo.cluster(features, verbose=True)\n assert isinstance(clustering_algo.labels, np.ndarray)\n new_labels = clustering_algo.labels.astype(np.int64)\n np.save(\n \"{}/cluster_epoch_{}.npy\".format(runner.work_dir,\n runner.epoch), new_labels)\n self.evaluate(runner, new_labels)\n else:\n new_labels = np.zeros((len(self.data_loaders[0].dataset), ),\n dtype=np.int64)\n\n if self.dist_mode:\n new_labels_tensor = torch.from_numpy(new_labels).cuda()\n dist.broadcast(new_labels_tensor, 0)\n new_labels = new_labels_tensor.cpu().numpy()\n new_labels_list = list(new_labels)\n\n # step 3: assign new labels\n self.data_loaders[0].dataset.assign_labels(new_labels_list)\n\n # step 4 (a): set uniform sampler\n if self.unif_sampling:\n self.data_loaders[0].sampler.set_uniform_indices(\n new_labels_list, self.clustering_cfg.k)\n\n # step 4 (b): set loss reweight\n if self.reweight:\n runner.model.module.set_reweight(new_labels, self.reweight_pow)\n\n # step 5: randomize classifier\n runner.model.module.head.init_weights(init_linear='normal')\n if self.dist_mode:\n for p in runner.model.module.head.state_dict().values():\n dist.broadcast(p, 0)\n\n # step 6: init memory for ODC\n if self.init_memory:\n runner.model.module.memory_bank.init_memory(features, new_labels)\n\n def evaluate(self, runner, new_labels):\n hist = np.bincount(new_labels, minlength=self.clustering_cfg.k)\n empty_cls = (hist == 0).sum()\n minimal_cls_size, maximal_cls_size = hist.min(), hist.max()\n if runner.rank == 0:\n print_log(\n \"empty_num: {}\\tmin_cluster: {}\\tmax_cluster:{}\".format(\n empty_cls.item(), minimal_cls_size.item(),\n maximal_cls_size.item()),\n logger='root')\n" ]
[ [ "torch.nn.Sequential", "torch.nn.ReLU", "torch.utils.checkpoint.checkpoint", "torch.nn.MaxPool2d" ], [ "torch.distributed.broadcast", "torch.from_numpy", "numpy.bincount" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
unhold/game-and-watch-patch
[ "dc33f2228d7c791a746502aef27a5331c0076503" ]
[ "patches/tileset.py" ]
[ "from io import BytesIO\nfrom math import ceil\n\nimport numpy as np\nfrom PIL import Image\n\nfrom .exception import ParsingError\n\n_BLOCK_SIZE = 16\n_BLOCK_PIXEL = _BLOCK_SIZE * _BLOCK_SIZE\n\nPALETTE_OFFSETS = [\n 0xB_EC68,\n 0xB_EDA8,\n 0xB_EEE8,\n 0xB_F028,\n 0xB_F168,\n]\n\n\ndef bytes_to_tilemap(data, palette=None, bpp=8, width=256):\n \"\"\"\n Parameters\n ----------\n palette : bytes\n 320 long RGBA (80 colors). Alpha is ignored.\n\n Returns\n -------\n PIL.Image\n Rendered RGB image.\n \"\"\"\n\n # assert bpp in [4, 8]\n\n if bpp < 8:\n nibbles = bytearray()\n # offset = 0x0\n for b in data:\n shift = 8 - bpp\n while shift >= 0:\n nibbles.append(b >> shift & (2 ** bpp - 1))\n shift -= bpp\n # nibbles.append((b >> 4) | (offset << 4))\n # nibbles.append((b & 0xF) | (offset << 4))\n data = bytes(nibbles)\n del nibbles\n\n # Assemble bytes into an index-image\n h, w = int(ceil(len(data) / width / _BLOCK_SIZE) * _BLOCK_SIZE), width\n canvas = np.zeros((h, w), dtype=np.uint8)\n i_sprite = 0\n for i in range(0, len(data), _BLOCK_PIXEL):\n sprite = data[i : i + _BLOCK_PIXEL]\n\n x = i_sprite * _BLOCK_SIZE % w\n y = _BLOCK_SIZE * (i_sprite * _BLOCK_SIZE // w)\n view = canvas[y : y + _BLOCK_SIZE, x : x + _BLOCK_SIZE]\n sprite_block = np.frombuffer(sprite, dtype=np.uint8).reshape(\n _BLOCK_SIZE, _BLOCK_SIZE\n )\n view[:] = sprite_block\n\n i_sprite += 1\n\n if palette is None:\n return Image.fromarray(canvas, \"L\")\n\n # Apply palette to index-image\n p = np.frombuffer(palette, dtype=np.uint8).reshape((80, 4))\n p = p[:, :3]\n p = np.fliplr(p) # BGR->RGB\n\n im = Image.fromarray(canvas, \"P\")\n im.putpalette(p)\n\n return im\n\n\ndef rgb_to_index(tilemap, palette):\n if isinstance(tilemap, Image.Image):\n tilemap = tilemap.convert(\"RGB\")\n tilemap = np.array(tilemap)\n elif isinstance(tilemap, np.ndarray):\n pass\n else:\n raise TypeError(f\"Don't know how to handle tilemap type {type(tilemap)}\")\n\n # Convert rgb tilemap to index image\n p = np.frombuffer(palette, dtype=np.uint8).reshape((80, 4))\n p = p[:, :3]\n p = np.fliplr(p) # BGR->RGB\n p = p[None, None].transpose(0, 1, 3, 2) # (1, 1, 3, 80)\n\n # Find closest color\n diff = tilemap[..., None] - p\n dist = np.linalg.norm(diff, axis=2)\n tilemap = np.argmin(dist, axis=-1).astype(np.uint8)\n\n return tilemap\n\n\ndef tilemap_to_bytes(tilemap, palette=None, bpp=8):\n \"\"\"\n Parameters\n ----------\n tilemap : PIL.Image.Image or numpy.ndarray\n RGB data\n palette : bytes\n 320 long RGBA (80 colors). Alpha is ignored.\n\n Returns\n -------\n bytes\n Bytes representation of index image\n \"\"\"\n\n if isinstance(tilemap, Image.Image):\n tilemap = tilemap.convert(\"RGB\")\n tilemap = np.array(tilemap)\n elif isinstance(tilemap, np.ndarray):\n pass\n else:\n raise TypeError(f\"Don't know how to handle tilemap type {type(tilemap)}\")\n\n if palette is not None:\n tilemap = rgb_to_index(tilemap, palette)\n\n # Need to undo the tiling now.\n out = []\n for i in range(0, tilemap.shape[0], _BLOCK_SIZE):\n for j in range(0, tilemap.shape[1], _BLOCK_SIZE):\n sprite = tilemap[i : i + _BLOCK_SIZE, j : j + _BLOCK_SIZE]\n sprite_bytes = sprite.tobytes()\n out.append(sprite_bytes)\n out = b\"\".join(out)\n\n if bpp == 4:\n out_packed = bytearray()\n assert len(out) % 2 == 0\n for i in range(0, len(out), 2):\n b1, b2 = out[i], out[i + 1]\n b1 &= 0xF\n b2 &= 0xF\n out_packed.append((b1 << 4) | b2)\n out = bytes(out_packed)\n\n return out\n\n\ndef decode_backdrop(data):\n \"\"\"Convert easter egg images to GIF\n\n Based on:\n https://gist.github.com/GMMan/c1f0b516afdbb71769752ee06adbbd9a\n\n Returns\n -------\n PIL.Image.Image\n Decoded image\n int\n Number of bytes consumed to create image.\n \"\"\"\n\n def rgb565_to_rgba32(pix):\n r = int(((pix >> 11) * 255 + 15) / 31)\n g = int((((pix >> 5) & 0x3F) * 255 + 31) / 63)\n b = int(((pix & 0x1F) * 255 + 15) / 31)\n return r, g, b\n\n idx = 0\n out = []\n\n # Header\n out.append(b\"GIF89a\")\n\n width = int.from_bytes(data[idx : idx + 2], \"little\")\n idx += 2\n\n height = int.from_bytes(data[idx : idx + 2], \"little\")\n idx += 2\n\n palette_size = data[idx]\n idx += 1\n idx += 1 # padding\n\n palette = []\n for _ in range(palette_size):\n palette.append(int.from_bytes(data[idx : idx + 2], \"little\"))\n idx += 2\n\n gct_size = 0\n calc_gct_size = 2\n while calc_gct_size < palette_size:\n gct_size += 1\n calc_gct_size <<= 1\n\n # Logical screen descriptor\n out.append(width.to_bytes(2, \"little\"))\n out.append(height.to_bytes(2, \"little\"))\n out.append(((1 << 7) | gct_size).to_bytes(1, \"little\"))\n out.append(b\"\\x00\")\n out.append(b\"\\x00\")\n\n # Global Color Table\n for i in range(calc_gct_size):\n if i < len(palette):\n r, g, b = rgb565_to_rgba32(palette[i])\n out.append(r.to_bytes(1, \"little\"))\n out.append(g.to_bytes(1, \"little\"))\n out.append(b.to_bytes(1, \"little\"))\n else:\n out.append(b\"\\x00\")\n out.append(b\"\\x00\")\n out.append(b\"\\x00\")\n\n # Image descriptor\n out.append(b\"\\x2c\")\n out.append(b\"\\x00\\x00\") # x\n out.append(b\"\\x00\\x00\") # y\n out.append(width.to_bytes(2, \"little\"))\n out.append(height.to_bytes(2, \"little\"))\n out.append(b\"\\x00\")\n\n # Frame\n min_code_size = data[idx]\n idx += 1\n out.append(min_code_size.to_bytes(1, \"little\"))\n\n while True:\n block_size = data[idx]\n idx += 1\n out.append(block_size.to_bytes(1, \"little\"))\n if block_size == 0:\n break\n out.append(data[idx : idx + block_size])\n idx += block_size\n\n trailer = data[idx]\n idx += 1\n\n if trailer != 0x3B:\n raise ParsingError(\"Invalid GIF Trailer\")\n out.append(trailer.to_bytes(1, \"little\"))\n out = b\"\".join(out)\n\n im = Image.open(BytesIO(out))\n\n return im, idx\n" ]
[ [ "numpy.fliplr", "numpy.linalg.norm", "numpy.frombuffer", "numpy.argmin", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JeanOlivier/Labber-PSICT
[ "f6d831823ec7c8373fd562b6d1247f7e438f1ee5" ]
[ "PSICT_extras/PSICT_MultiPulse/PSICT_MultiPulse.py" ]
[ "#!/usr/bin/env python\n\nimport InstrumentDriver\nimport numpy as np\n\nimport os\nimport logging\nfrom datetime import datetime\n\nfrom PSICT_MultiPulse_tools import delistifyPulseDefs\nfrom waveforms_handling import generatePulse, calculateWaveform, gen_pulse_sequence\n\n\nclass Driver(InstrumentDriver.InstrumentWorker):\n \"\"\" This class implements the PSICT-MultiPulse pulse generator\"\"\"\n\n def performOpen(self, options = {}):\n '''Open the instrument connection'''\n ## Start logging object\n self.initLogger()\n ## Number of traces - corresponds to number of outputs\n self.nTrace = 4\n ## Waveform and time containers\n self.lWaveforms = [np.array([], dtype=float)] * self.nTrace\n self.lQuadratures = [np.array([], dtype=float)] * self.nTrace\n self.vTime = np.array([], dtype=float)\n ## Pulse definition and sequence containers\n self.lDefKeyOrder = []\n self.lPulseDefinitions = []\n self.lPulseSequences = []\n ## Log completion of opening operation\n self._logger.info('Instrument opened successfully.')\n\n def initLogger(self):\n ## Dir and file setup\n log_dir = os.path.expanduser('~/MultiPulse_logs/')\n log_file = 'MultiPulse_{:%y%m%d_%H%M%S}'.format(datetime.now())+'.log'\n ## Create log dir if it does not exist\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n log_path = os.path.join(log_dir, log_file)\n ## logger object config and init\n logging.basicConfig(filename = log_path, filemode = 'a', \\\n level = logging.DEBUG,\\\n format = '%(asctime)s %(name)-8s: %(message)s', \\\n datefmt = '%y-%m-%d %H:%M:%S')\n self._logger = logging.getLogger('MultiPulse')\n self._logger.info('Logging initialized to {}'.format(log_path))\n\n def performSetValue(self, quant, value, sweepRate = 0.0, options = {}):\n '''\n Set the specified quantity to the given value\n\n Should return the actual value set by the instrument.\n '''\n self._logger.debug('SetValue: {} {} {}'.format(quant.name, value, type(value)))\n ## If the value is a pulse definitions or sequences file path, pull the contents of the file\n if quant.name == 'Pulse definitions file':\n ## Only fetch if input string is not empty\n if value is not '':\n self._logger.debug('Pulling pulse definitions from file: {}'.format(value))\n ## Get pulse definitions from file\n with open(value, 'r') as pdfile:\n self.lDefKeyOrder = pdfile.readline().strip().split(',')\n lRawPulseDefinitions = [[float(yy) for yy in xx.strip().split(',')] \\\n for xx in pdfile.readlines()]\n ## Parse raw pulse definitions\n self.lPulseDefinitions = delistifyPulseDefs(lRawPulseDefinitions, self.lDefKeyOrder)\n self._logger.debug('Pulse definitions: {}'.format(self.lPulseDefinitions))\n elif quant.name == 'Pulse sequences file':\n ## Only fetch if input string is not empty\n if value is not '':\n self._logger.debug('Pulling pulse sequences from file: {}'.format(value))\n ## Get pulse definitions from file\n with open(value, 'r') as psfile:\n self.lPulseSequences = [[int(yy) for yy in xx.strip().split(',')] \\\n for xx in psfile.readlines()]\n self._logger.debug('Imported pulse sequences: {}'.format(self.lPulseSequences))\n ## Return value, regardless of quant\n return value\n\n def performGetValue(self, quant, options = {}):\n '''\n Get the value of the specified quantity from the instrument\n '''\n ## Ensure that vector waveforms are updated before returning value\n if quant.name[:5] == 'Trace':\n ## Recalculate waveform if necessary\n if self.isConfigUpdated():\n self.calculateWaveform()\n vData = self.getWaveformFromMemory(quant)\n dt = 1/self.getValue('Sample rate')\n value = quant.getTraceDict(vData, dt=dt)\n elif quant.name[:10] == 'Quadrature':\n ## Recalculate waveform if necessary\n if self.isConfigUpdated():\n self.calculateWaveform()\n vData = self.getWaveformFromMemory(quant)\n dt = 1/self.getValue('Sample rate')\n value = quant.getTraceDict(vData, dt=dt)\n else:\n ## All other values can be returned as-is\n value = quant.getValue()\n ## Log GetValue operation\n self._logger.debug('GetValue: {} {} {}'.format(quant.name, value, type(value)))\n return value\n\n def getWaveformFromMemory(self, quant):\n '''Return data from calculated waveforms'''\n if quant.name[:5] == 'Trace':\n iDataIndex = int(quant.name[-1]) - 1\n self._logger.debug('Fetching waveform for output {}'.format(iDataIndex))\n vData = self.lWaveforms[iDataIndex]\n elif quant.name[:10] == 'Quadrature':\n iDataIndex = int(quant.name[-1]) - 1\n self._logger.debug('Fetching quadrature for output {}'.format(iDataIndex))\n vData = self.lQuadratures[iDataIndex]\n else:\n raise RuntimeError('Invalid specification for getting waveform: {}'.format(quant.name))\n return vData\n\n def calculateTotalSeqTime(self, pulseSeq, truncRange):\n '''\n Calculate the total time required for the specified pulse sequence with the given truncation range\n '''\n ## Get pulse definitions\n lPulseDefs = self.lPulseDefinitions\n ## Calculate total time\n totalTime = 0.0\n for pulseIndex in pulseSeq:\n oPulseDef = lPulseDefs[pulseIndex]\n totalTime += oPulseDef['w'] + oPulseDef['v'] + oPulseDef['s']\n ## Add decay time for last pulse in sequence\n totalTime += lPulseDefs[pulseSeq[-1]]['w'] * (truncRange - 1)/2\n ## Return final value\n return totalTime\n\n def updateHeadTime(self, dOldHeadTime, oPulseDef, bReversed = False):\n ## Get edge-to-edge length of pulse (including spacing)\n dPulseLength = oPulseDef['w'] + oPulseDef['v'] + oPulseDef['s']\n ## Increment head time and return new value\n if bReversed:\n dNewHeadTime = dOldHeadTime - dPulseLength\n else:\n dNewHeadTime = dOldHeadTime + dPulseLength\n return dNewHeadTime\n\n calculateWaveform = calculateWaveform\n generatePulse = generatePulse\n gen_pulse_sequence = gen_pulse_sequence\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
keklarup/POET_AiPoetry
[ "ff197564ab13d0e2001093bc2b803d538dd52f59" ]
[ "app.py" ]
[ "# -*- coding: utf-8 -*-\n# app.py\n\nfrom flask import Flask, request, render_template\nimport pickle\nimport gzip\nimport numpy as np\nimport AiPoems\n\n#start up cell -- import necessary metadata for model\nwith open('tokenizer.pickle', 'rb') as handle:\n tokenizer = pickle.load(handle)\nwith gzip.GzipFile('predictors.npy.gz', \"r\") as f:\n predictors=np.load(f)\nwith gzip.GzipFile('label.npy.gz', \"r\") as f:\n label=np.load(f)\ntotal_words=len(label[0])\nmax_sequence_len=len(predictors[0])+1\nfilename='word_model_love_poems_composite_100.h5'\n\n#start up cell -- initialize model\nmodel = AiPoems.initialize_model(predictors, label, max_sequence_len, \n total_words, device='/cpu:0')\nmodel=AiPoems.load_model(model, filename)\n\ntext=AiPoems.generate_text_random(model, tokenizer, 10, max_sequence_len, seed_text=\"starttoken\", top_n=10)\n\napp = Flask(__name__)\n\[email protected](\"/example\")\ndef get_numbers():\n #return ExampleService().supply_numbers(1,2)\n return str(1+2)\n #return ExampleModel().add_numbers(5,5)\n\[email protected](\"/\")\ndef home():\n return render_template(\"home.html\")\n \[email protected](\"/about\")\ndef about():\n return render_template(\"about.html\")\n\[email protected](\"/make\")\ndef make(): \n return render_template(\"make.html\")\n\[email protected](\"/generatedPoem\")\ndef generatedPoem():\n #choices=['this is string 1', 'this is string 2', 'a cat is a cat is a cat', 'the rain is spain']\n #import random\n AiPoem=AiPoems.generate_text_random(model, tokenizer, 50, max_sequence_len, seed_text=\"starttoken\", top_n=10)\n AiPoem=AiPoem.replace('starttoken','').replace('returntoken','\\n').split('endtoken2')[0]\n AiPoem=AiPoem.strip()\n #text=str(max_sequence_len)\n \n #text=random.choice(choices)\n return render_template(\"generatedPoem.html\", text=AiPoem)\n\n\nif __name__ == \"__main__\":\n app.run(debug=False)" ]
[ [ "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AntonYermilov/progue
[ "7f382208c9efc904cff9d8df4750606039801d45" ]
[ "game/model/entity/character/hero.py" ]
[ "from dataclasses import dataclass\nfrom typing import Dict\n\nimport numpy as np\n\nfrom game import Position\nfrom game.model.entity.damage import Damageable, Damage, DamageType\nfrom game.model.entity.inventory.inventory_keeper import InventoryKeeper\nfrom game.model.entity.item.item import Item\nfrom .character import Character, CharacterStats\n\n\n@dataclass\nclass HeroStats(CharacterStats):\n max_experience: int\n experience: int\n confuse_ratio: float\n\n def __init__(self, max_experience: int, experience: int, confuse_ratio: float, confuse_turns: int, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.max_experience = max_experience\n self.experience = experience\n self.confuse_ratio = confuse_ratio\n self.confuse_turns = confuse_turns\n\n@dataclass\nclass LevelUpStats:\n health_inc_value: int\n strength_inc_value: int\n experience_inc_ratio: float\n\nclass Hero(Character, InventoryKeeper):\n \"\"\"\n Hero is the character controlled by the player.\n \"\"\"\n\n def __init__(self, name: str, id: str, position: Position, description: Dict):\n stats = HeroStats(level=1,\n max_health=description['initial_stats']['max_health'],\n health=description['initial_stats']['health'],\n attack_damage=description['initial_stats']['max_strength'],\n max_experience=description['initial_stats']['max_experience'],\n experience=description['initial_stats']['experience'],\n confuse_ratio=description['initial_stats']['confuse_ratio'],\n confuse_turns=description['initial_stats']['confuse_turns'],\n reward=description['reward'])\n Character.__init__(self, position=position, stats=stats)\n InventoryKeeper.__init__(self, limit=description['initial_stats']['inventory_size'])\n self.level_up_stats = LevelUpStats(health_inc_value=description['on_new_level']['max_health_increase_value'],\n strength_inc_value=description['on_new_level']['max_strength_increase_value'],\n experience_inc_ratio=description['on_new_level']['max_experience_increase_ratio'])\n self.name = name\n self.id = id\n\n def deal_damage(self, target: Damageable) -> Damage:\n confuse_turns = 0\n confuse = np.random.choice([True, False], p=[self.stats.confuse_ratio, 1 - self.stats.confuse_ratio])\n if confuse:\n confuse_turns = self.stats.confuse_turns\n return Damage(damage_type=DamageType.PHYSICAL,\n damage_amount=self.stats.attack_damage,\n confuse_turns=confuse_turns)\n\n def accept_damage(self, damage: Damage):\n self.update_health(-damage.damage_amount)\n\n def use_item(self, item: Item):\n item.apply(self)\n self.inventory.remove(item)\n\n def on_destroy(self, model):\n model.players.pop(self.id)\n\n def is_alive(self) -> bool:\n return self.stats.health > 0\n\n def update_health(self, health):\n self.stats.health += health\n if self.stats.health > self.stats.max_health:\n self.stats.health = self.stats.max_health\n if self.stats.health < 0:\n self.stats.health = 0\n\n def update_experience(self, experience):\n self.stats.experience += experience\n if self.stats.experience > self.stats.max_experience:\n self.stats.level += 1\n self.stats.experience -= self.stats.max_experience\n self.stats.health += self.level_up_stats.health_inc_value\n self.stats.max_health += self.level_up_stats.health_inc_value\n self.stats.attack_damage += self.level_up_stats.strength_inc_value\n self.stats.max_experience = int(self.stats.max_experience * self.level_up_stats.experience_inc_ratio)\n if self.stats.experience < 0:\n self.stats.experience = 0\n" ]
[ [ "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
qureshinomaan/habitat-sim
[ "df2540b658d0444e84bbc7a0c3fb995f8d523b52" ]
[ "tests/test_controls.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport attr\nimport hypothesis\nimport magnum as mn\nimport numpy as np\nimport pytest\nimport quaternion # noqa: F401\nfrom hypothesis import strategies as st\n\nimport habitat_sim\nimport habitat_sim.errors\nfrom habitat_sim.utils.common import angle_between_quats, quat_from_angle_axis\n\n\ndef test_no_action():\n scene_graph = habitat_sim.SceneGraph()\n agent_config = habitat_sim.AgentConfiguration()\n agent_config.action_space = dict(\n move_backward=habitat_sim.ActionSpec(\n \"move_backward\", habitat_sim.ActuationSpec(amount=0.25)\n )\n )\n agent = habitat_sim.Agent(scene_graph.get_root_node().create_child(), agent_config)\n\n with pytest.raises(AssertionError):\n agent.act(\"move_forward\")\n\n\ndef test_no_move_fun():\n scene_graph = habitat_sim.SceneGraph()\n agent_config = habitat_sim.AgentConfiguration()\n agent_config.action_space = dict(\n move_forward=habitat_sim.ActionSpec(\n \"DNF\", habitat_sim.ActuationSpec(amount=0.25)\n )\n )\n agent = habitat_sim.Agent(scene_graph.get_root_node().create_child(), agent_config)\n\n with pytest.raises(AssertionError):\n agent.act(\"move_forward\")\n\n\[email protected](auto_attribs=True, cmp=False)\nclass ExpectedDelta:\n delta_pos: np.ndarray = attr.Factory(lambda: np.array([0, 0, 0]))\n delta_rot: np.quaternion = attr.Factory(lambda: np.quaternion(1, 0, 0, 0))\n\n\ndef _check_state_same(s1, s2):\n assert np.allclose(s1.position, s2.position)\n assert angle_between_quats(s1.rotation, s2.rotation) < 1e-5\n\n\ndef _check_state_expected(s1, s2, expected: ExpectedDelta):\n assert np.linalg.norm(s2.position - s1.position - expected.delta_pos) < 1e-5\n assert (\n angle_between_quats(s2.rotation * expected.delta_rot.inverse(), s1.rotation)\n < 1e-5\n )\n\n\ndefault_body_control_testdata = [\n (\"move_backward\", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.BACK)),\n (\"move_forward\", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.FRONT)),\n (\"move_right\", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.RIGHT)),\n (\"move_left\", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.LEFT)),\n (\n \"turn_right\",\n ExpectedDelta(\n delta_rot=quat_from_angle_axis(np.deg2rad(10.0), habitat_sim.geo.GRAVITY)\n ),\n ),\n (\n \"turn_left\",\n ExpectedDelta(\n delta_rot=quat_from_angle_axis(np.deg2rad(10.0), habitat_sim.geo.UP)\n ),\n ),\n]\n\n\[email protected](\"action,expected\", default_body_control_testdata)\ndef test_default_body_contorls(action, expected):\n scene_graph = habitat_sim.SceneGraph()\n agent_config = habitat_sim.AgentConfiguration()\n agent_config.action_space = dict(\n move_backward=habitat_sim.ActionSpec(\n \"move_backward\", habitat_sim.ActuationSpec(amount=0.25)\n ),\n move_forward=habitat_sim.ActionSpec(\n \"move_forward\", habitat_sim.ActuationSpec(amount=0.25)\n ),\n move_left=habitat_sim.ActionSpec(\n \"move_left\", habitat_sim.ActuationSpec(amount=0.25)\n ),\n move_right=habitat_sim.ActionSpec(\n \"move_right\", habitat_sim.ActuationSpec(amount=0.25)\n ),\n turn_left=habitat_sim.ActionSpec(\n \"turn_left\", habitat_sim.ActuationSpec(amount=10.0)\n ),\n turn_right=habitat_sim.ActionSpec(\n \"turn_right\", habitat_sim.ActuationSpec(amount=10.0)\n ),\n )\n agent = habitat_sim.Agent(scene_graph.get_root_node().create_child(), agent_config)\n\n state = agent.state\n agent.act(action)\n new_state = agent.state\n\n _check_state_expected(state, new_state, expected)\n for k, v in state.sensor_states.items():\n assert k in new_state.sensor_states\n _check_state_expected(v, new_state.sensor_states[k], expected)\n\n\ndefault_sensor_control_testdata = [\n (\"move_up\", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.UP)),\n (\"move_down\", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.GRAVITY)),\n (\n \"look_right\",\n ExpectedDelta(\n delta_rot=quat_from_angle_axis(np.deg2rad(-10.0), habitat_sim.geo.UP)\n ),\n ),\n (\n \"look_left\",\n ExpectedDelta(\n delta_rot=quat_from_angle_axis(np.deg2rad(10.0), habitat_sim.geo.UP)\n ),\n ),\n (\n \"look_up\",\n ExpectedDelta(\n delta_rot=quat_from_angle_axis(np.deg2rad(10.0), habitat_sim.geo.RIGHT)\n ),\n ),\n (\n \"look_down\",\n ExpectedDelta(\n delta_rot=quat_from_angle_axis(np.deg2rad(-10.0), habitat_sim.geo.RIGHT)\n ),\n ),\n (\n \"rotate_sensor_clockwise\",\n ExpectedDelta(\n delta_rot=quat_from_angle_axis(np.deg2rad(-10.0), habitat_sim.geo.FRONT)\n ),\n ),\n (\n \"rotate_sensor_anti_clockwise\",\n ExpectedDelta(\n delta_rot=quat_from_angle_axis(np.deg2rad(10.0), habitat_sim.geo.FRONT)\n ),\n ),\n]\n\n\[email protected](\"action,expected\", default_sensor_control_testdata)\ndef test_default_sensor_contorls(action, expected):\n scene_graph = habitat_sim.SceneGraph()\n agent_config = habitat_sim.AgentConfiguration()\n agent_config.action_space = dict(\n move_up=habitat_sim.ActionSpec(\n \"move_up\", habitat_sim.ActuationSpec(amount=0.25)\n ),\n move_down=habitat_sim.ActionSpec(\n \"move_down\", habitat_sim.ActuationSpec(amount=0.25)\n ),\n look_left=habitat_sim.ActionSpec(\n \"look_left\", habitat_sim.ActuationSpec(amount=10.0)\n ),\n look_right=habitat_sim.ActionSpec(\n \"look_right\", habitat_sim.ActuationSpec(amount=10.0)\n ),\n look_up=habitat_sim.ActionSpec(\n \"look_up\", habitat_sim.ActuationSpec(amount=10.0)\n ),\n look_down=habitat_sim.ActionSpec(\n \"look_down\", habitat_sim.ActuationSpec(amount=10.0)\n ),\n rotate_sensor_clockwise=habitat_sim.ActionSpec(\n \"rotate_sensor_clockwise\", habitat_sim.ActuationSpec(amount=10.0)\n ),\n rotate_sensor_anti_clockwise=habitat_sim.ActionSpec(\n \"rotate_sensor_anti_clockwise\", habitat_sim.ActuationSpec(amount=10.0)\n ),\n )\n agent = habitat_sim.Agent(scene_graph.get_root_node().create_child(), agent_config)\n\n state = agent.state\n agent.act(action)\n new_state = agent.state\n\n _check_state_same(state, new_state)\n for k, v in state.sensor_states.items():\n assert k in new_state.sensor_states\n _check_state_expected(v, new_state.sensor_states[k], expected)\n\n\[email protected]()\ndef scene_graph():\n return habitat_sim.SceneGraph()\n\n\[email protected](\n \"control_name,control_axis\",\n [(\"look_up\", 0), (\"look_down\", 0), (\"look_left\", 1), (\"look_right\", 1)],\n)\[email protected](\n actuation_amount=st.floats(0, 60), actuation_constraint=st.floats(0, 60)\n)\ndef test_constrainted(\n scene_graph, control_name, control_axis, actuation_amount, actuation_constraint\n):\n initial_look_angle = mn.Deg(\n np.random.uniform(-actuation_constraint, actuation_constraint)\n )\n rotation_vector = mn.Vector3()\n rotation_vector[control_axis] = 1\n initial_rotation = mn.Quaternion.rotation(\n mn.Rad(initial_look_angle), rotation_vector\n )\n\n node = scene_graph.get_root_node().create_child()\n node.rotation = initial_rotation\n\n spec = habitat_sim.agent.controls.ActuationSpec(\n actuation_amount, actuation_constraint\n )\n habitat_sim.registry.get_move_fn(control_name)(node, spec)\n\n expected_angle = initial_look_angle + mn.Deg(\n -actuation_amount\n if control_name in {\"look_down\", \"look_right\"}\n else actuation_amount\n )\n\n if expected_angle > mn.Deg(actuation_constraint):\n expected_angle = mn.Deg(actuation_constraint)\n elif expected_angle < mn.Deg(-actuation_constraint):\n expected_angle = mn.Deg(-actuation_constraint)\n\n final_rotation = node.rotation\n\n look_vector = final_rotation.transform_vector(habitat_sim.geo.FRONT)\n if control_axis == 0:\n look_angle = mn.Deg(mn.Rad(np.arctan2(look_vector[1], -look_vector[2])))\n elif control_axis == 1:\n look_angle = -mn.Deg(mn.Rad(np.arctan2(look_vector[0], -look_vector[2])))\n\n assert np.abs(float(expected_angle - look_angle)) < 1e-1\n" ]
[ [ "numpy.allclose", "numpy.linalg.norm", "numpy.quaternion", "numpy.arctan2", "numpy.deg2rad", "numpy.random.uniform", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
readthedocs-assistant/tabmat
[ "c2d82db1901afb0ea79806f26ee66068d553b3e6" ]
[ "src/tabmat/dense_matrix.py" ]
[ "from typing import List, Optional, Union\n\nimport numpy as np\n\nfrom .ext.dense import (\n dense_matvec,\n dense_rmatvec,\n dense_sandwich,\n transpose_square_dot_weights,\n)\nfrom .matrix_base import MatrixBase\nfrom .util import (\n check_matvec_out_shape,\n check_transpose_matvec_out_shape,\n setup_restrictions,\n)\n\n\nclass DenseMatrix(np.ndarray, MatrixBase):\n \"\"\"\n A ``numpy.ndarray`` subclass with several additional functions that allow\n it to share the MatrixBase API with SparseMatrix and CategoricalMatrix.\n\n In particular, we have added:\n\n - The ``sandwich`` product\n - ``getcol`` to support the same interface as SparseMatrix for retrieving a\n single column\n - ``toarray``\n - ``matvec``\n\n \"\"\"\n\n def __new__(cls, input_array): # noqa\n \"\"\"\n Details of how to subclass np.ndarray are explained here:\n\n https://docs.scipy.org/doc/numpy/user/basics.subclassing.html\\\n #slightly-more-realistic-example-attribute-added-to-existing-array\n \"\"\"\n obj = np.asarray(input_array).view(cls)\n if not np.issubdtype(obj.dtype, np.floating):\n raise NotImplementedError(\"DenseMatrix is only implemented for float data\")\n return obj\n\n def __array_finalize__(self, obj):\n if obj is None:\n return\n\n def getcol(self, i):\n \"\"\"Return matrix column at specified index.\"\"\"\n return self[:, [i]]\n\n def toarray(self):\n \"\"\"Return array representation of matrix.\"\"\"\n return np.asarray(self)\n\n def sandwich(\n self, d: np.ndarray, rows: np.ndarray = None, cols: np.ndarray = None\n ) -> np.ndarray:\n \"\"\"Perform a sandwich product: X.T @ diag(d) @ X.\"\"\"\n d = np.asarray(d)\n rows, cols = setup_restrictions(self.shape, rows, cols)\n return dense_sandwich(self, d, rows, cols)\n\n def _cross_sandwich(\n self,\n other: MatrixBase,\n d: np.ndarray,\n rows: Optional[np.ndarray] = None,\n L_cols: Optional[np.ndarray] = None,\n R_cols: Optional[np.ndarray] = None,\n ):\n from .categorical_matrix import CategoricalMatrix\n from .sparse_matrix import SparseMatrix\n\n if isinstance(other, SparseMatrix) or isinstance(other, CategoricalMatrix):\n return other._cross_sandwich(self, d, rows, R_cols, L_cols).T\n raise TypeError\n\n def _get_col_stds(self, weights: np.ndarray, col_means: np.ndarray) -> np.ndarray:\n \"\"\"Get standard deviations of columns.\"\"\"\n sqrt_arg = transpose_square_dot_weights(self, weights) - col_means ** 2\n # Minor floating point errors above can result in a very slightly\n # negative sqrt_arg (e.g. -5e-16). We just set those values equal to\n # zero.\n sqrt_arg[sqrt_arg < 0] = 0\n return np.sqrt(sqrt_arg)\n\n def _matvec_helper(\n self,\n vec: Union[List, np.ndarray],\n rows: Optional[np.ndarray],\n cols: Optional[np.ndarray],\n out: Optional[np.ndarray],\n transpose: bool,\n ):\n # Because the dense_rmatvec takes a row array and col array, it has\n # added overhead compared to a raw matrix vector product. So, when\n # we're not filtering at all, let's just use default numpy dot product.\n #\n # TODO: related to above, it could be nice to have a version that only\n # filters rows and a version that only filters columns. How do we do\n # this without an explosion of code?\n X = self.T if transpose else self\n vec = np.asarray(vec)\n\n # NOTE: We assume that rows and cols are unique\n unrestricted_rows = rows is None or len(rows) == self.shape[0]\n unrestricted_cols = cols is None or len(cols) == self.shape[1]\n\n if unrestricted_rows and unrestricted_cols:\n if out is None:\n out = X.dot(vec)\n else:\n out += X.dot(vec)\n return out\n else:\n rows, cols = setup_restrictions(self.shape, rows, cols)\n # TODO: should take 'out' parameter\n fast_fnc = dense_rmatvec if transpose else dense_matvec\n if vec.ndim == 1:\n res = fast_fnc(self, vec, rows, cols)\n elif vec.ndim == 2 and vec.shape[1] == 1:\n res = fast_fnc(self, vec[:, 0], rows, cols)[:, None]\n else:\n subset = self[np.ix_(rows, cols)]\n res = subset.T.dot(vec[rows]) if transpose else subset.dot(vec[cols])\n if out is None:\n return res\n if transpose:\n out[cols] += res\n else:\n # Note that currently 'rows' will always be all rows\n out[rows] += res\n return out\n\n def transpose_matvec(\n self,\n vec: Union[np.ndarray, List],\n rows: np.ndarray = None,\n cols: np.ndarray = None,\n out: np.ndarray = None,\n ) -> np.ndarray:\n \"\"\"Perform: self[rows, cols].T @ vec.\"\"\"\n check_transpose_matvec_out_shape(self, out)\n return self._matvec_helper(vec, rows, cols, out, True)\n\n def matvec(\n self,\n vec: Union[np.ndarray, List],\n cols: np.ndarray = None,\n out: np.ndarray = None,\n ) -> np.ndarray:\n \"\"\"Perform self[:, cols] @ other.\"\"\"\n check_matvec_out_shape(self, out)\n return self._matvec_helper(vec, None, cols, out, False)\n" ]
[ [ "numpy.asarray", "numpy.issubdtype", "numpy.ix_", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vishalbelsare/RLScore
[ "713f0a402f7a09e41a609f2ddcaf849b2021a0a7", "713f0a402f7a09e41a609f2ddcaf849b2021a0a7" ]
[ "rlscore/test/test_learner/test_query_rankrls.py", "rlscore/learner/greedy_nfold_rls.py" ]
[ "import numpy as np\nimport numpy.linalg as la\nfrom numpy.testing import assert_allclose\nimport unittest\n\nfrom rlscore.learner import QueryRankRLS\nfrom rlscore.kernel import GaussianKernel, PolynomialKernel\n\n\ndef mapQids(qids):\n \"\"\"Maps qids to running numbering starting from zero, and partitions\n the training data indices so that each partition corresponds to one\n query\"\"\"\n qid_dict = {}\n folds = {}\n counter = 0\n for index, qid in enumerate(qids):\n if not qid in qid_dict:\n qid_dict[qid] = counter\n folds[qid] = []\n counter += 1\n folds[qid].append(index)\n indslist = []\n for f in folds.values():\n indslist.append(f)\n return indslist\n\ndef generate_qids(m):\n qids = []\n qsize = int(m / 10)\n for i in range(int(m / qsize)):\n qids = qids + [i] * qsize\n qids = qids + [i + 1] * (m % qsize)\n objcount = np.max(qids)+1\n P = np.zeros((m, objcount))\n for i in range(m):\n qid = qids[i]\n P[i, qid] = 1.\n labelcounts = np.sum(P, axis=0)\n P = np.divide(P, np.sqrt(labelcounts))\n D = np.ones((1, m))\n L = np.multiply(np.eye(m), D) - np.dot(P, P.T)\n return qids, L\n\nclass Test(unittest.TestCase):\n \n def setUp(self):\n np.random.seed(100)\n m= 30\n self.Xtrain1 = np.random.rand(m, 20)\n self.Xtrain2 = np.random.rand(m, 40)\n self.Ytrain1 = np.random.randn(m)\n self.Ytrain2 = np.random.randn(m, 5)\n self.bvectors = [0,3,5,22]\n \n #@unittest.skip(\"does not work\") \n def test_linear_subset(self):\n X = self.Xtrain1\n Y = self.Ytrain1\n m = X.shape[0]\n qids, L = generate_qids(m)\n #reduced set approximation\n primal_rls = QueryRankRLS(X, Y, qids, basis_vectors = X[self.bvectors], regparam=0.001)\n W = primal_rls.predictor.W\n K = np.dot(X, X.T)\n Kr = K[:, self.bvectors]\n Krr = K[np.ix_(self.bvectors, self.bvectors)]\n A = np.linalg.solve(np.dot(Kr.T, np.dot(L, Kr))+ 0.001 * Krr, np.dot(Kr.T, np.dot(L, Y)))\n #W_reduced = np.dot(X[self.bvectors].T, A)\n W_reduced = np.dot(X[self.bvectors].T, A)\n assert_allclose(W, W_reduced)\n \n def test_linear(self):\n #Test that learning with linear kernel works correctly both\n #with low and high-dimensional data\n for X in [self.Xtrain1, self.Xtrain2]:\n for Y in [self.Ytrain1, self.Ytrain2]:\n #Basic case\n m = X.shape[0]\n qids, L = generate_qids(m)\n primal_rls = QueryRankRLS(X, Y, qids, regparam=1.0, bias=0.)\n W = primal_rls.predictor.W\n d = X.shape[1]\n W2 = np.linalg.solve(np.dot(X.T, np.dot(L, X)) + np.eye(d), np.dot(X.T, np.dot(L, Y)))\n assert_allclose(W, W2)\n #For RankRLS, bias should have no effect\n primal_rls = QueryRankRLS(X, Y, qids, regparam=1.0, bias=5.)\n W2 = primal_rls.predictor.W\n assert_allclose(W, W2)\n #Fast regularization\n primal_rls.solve(10)\n W = primal_rls.predictor.W\n W2 = np.linalg.solve(np.dot(X.T, np.dot(L, X)) + 10 * np.eye(d), np.dot(X.T, np.dot(L, Y)))\n assert_allclose(W, W2)\n #reduced set approximation\n primal_rls = QueryRankRLS(X, Y, qids, basis_vectors = X[self.bvectors], regparam=5.0)\n W = primal_rls.predictor.W\n K = np.dot(X, X.T)\n Kr = K[:, self.bvectors]\n Krr = K[np.ix_(self.bvectors, self.bvectors)]\n A = np.linalg.solve(np.dot(Kr.T, np.dot(L, Kr))+ 5.0 * Krr, np.dot(Kr.T, np.dot(L, Y)))\n W_reduced = np.dot(X[self.bvectors].T, A)\n #assert_allclose(W, W_reduced)\n #Pre-computed linear kernel, reduced set approximation\n dual_rls = QueryRankRLS(Kr, Y, qids, kernel=\"PrecomputedKernel\", basis_vectors = Krr, regparam=5.0)\n W = np.dot(X[self.bvectors].T, dual_rls.predictor.W)\n assert_allclose(W, W_reduced)\n# #Precomputed kernel matrix\n# dual_rls = GlobalRankRLS(K, Y, kernel = \"PrecomputedKernel\", regparam=0.01)\n# W = np.dot(X.T, dual_rls.predictor.W)\n# W2 = np.linalg.solve(np.dot(X.T, np.dot(L, X)) + 0.01 * np.eye(d), np.dot(X.T, np.dot(L, Y)))\n# assert_allclose(W, W2)\n\n def test_kernel(self):\n #tests that learning with kernels works\n for X in [self.Xtrain1, self.Xtrain2]:\n for Y in [self.Ytrain1, self.Ytrain2]:\n m = X.shape[0]\n qids, L = generate_qids(m)\n #Basic case\n dual_rls = QueryRankRLS(X, Y, qids, kernel= \"GaussianKernel\", regparam=5.0, gamma=0.01)\n kernel = GaussianKernel(X, gamma = 0.01)\n K = kernel.getKM(X)\n m = K.shape[0]\n A = dual_rls.predictor.A\n A2 = np.linalg.solve(np.dot(L, K) +5.0*np.eye(m), np.dot(L, Y) )\n assert_allclose(A, A2)\n #Fast regularization\n dual_rls.solve(1000)\n A = dual_rls.predictor.A\n A2 = np.linalg.solve(np.dot(L, K) + 1000 * np.eye(m), np.dot(L, Y))\n assert_allclose(A, A2)\n #Precomputed kernel\n dual_rls = QueryRankRLS(K, Y, qids, kernel=\"PrecomputedKernel\", regparam = 1000)\n assert_allclose(dual_rls.predictor.W, A2)\n #Reduced set approximation\n kernel = PolynomialKernel(X[self.bvectors], gamma=0.5, coef0 = 1.2, degree = 2) \n Kr = kernel.getKM(X)\n Krr = kernel.getKM(X[self.bvectors])\n dual_rls = QueryRankRLS(X, Y, qids, kernel=\"PolynomialKernel\", basis_vectors = X[self.bvectors], regparam = 200, gamma=0.5, coef0=1.2, degree = 2)\n A = dual_rls.predictor.A\n A2 = np.linalg.solve(np.dot(Kr.T, np.dot(L, Kr))+ 200 * Krr, np.dot(Kr.T, np.dot(L, Y)))\n assert_allclose(A, A2)\n dual_rls = QueryRankRLS(Kr, Y, qids, kernel=\"PrecomputedKernel\", basis_vectors = Krr, regparam=200)\n A = dual_rls.predictor.W\n assert_allclose(A, A2)\n \n def test_holdout(self):\n for X in [self.Xtrain1, self.Xtrain2]:\n for Y in [self.Ytrain1, self.Ytrain2]:\n m = X.shape[0]\n qids, L = generate_qids(m)\n qids = np.array(qids)\n hoindices = np.where(qids == 1)[0]\n hocompl = list(set(range(m)) - set(hoindices))\n #Holdout with linear kernel\n rls1 = QueryRankRLS(X, Y, qids)\n rls2 = QueryRankRLS(X[hocompl], Y[hocompl], qids[hocompl])\n P1 = rls1.holdout(hoindices)\n P2 = rls2.predict(X[hoindices])\n assert_allclose(P1, P2)\n #Holdout with bias\n rls1 = QueryRankRLS(X, Y, qids, bias = 3.0)\n rls2 = QueryRankRLS(X[hocompl], Y[hocompl], qids[hocompl], bias = 3.0)\n P1 = rls1.holdout(hoindices)\n P2 = rls2.predict(X[hoindices])\n assert_allclose(P1, P2)\n #Fast regularization\n for i in range(-5, 5):\n rls1.solve(2**i)\n rls2.solve(2**i)\n P1 = rls1.holdout(hoindices)\n P2 = rls2.predict(X[hoindices])\n assert_allclose(P1, P2)\n #Kernel holdout\n rls1 = QueryRankRLS(X, Y, qids, kernel = \"GaussianKernel\", gamma = 0.01)\n rls2 = QueryRankRLS(X[hocompl], Y[hocompl], qids[hocompl], kernel = \"GaussianKernel\", gamma = 0.01)\n P1 = rls1.holdout(hoindices)\n P2 = rls2.predict(X[hoindices])\n assert_allclose(P1, P2)\n for i in range(-15, 15):\n rls1.solve(2**i)\n rls2.solve(2**i)\n P1 = rls1.holdout(hoindices)\n P2 = rls2.predict(X[hoindices])\n assert_allclose(P1, P2)\n #Incorrect indices\n I = [0, 3, 100]\n self.assertRaises(IndexError, rls1.holdout, I)\n I = [-1, 0, 2]\n self.assertRaises(IndexError, rls1.holdout, I)\n I = [1,1,2]\n self.assertRaises(IndexError, rls1.holdout, I)\n I = [0,4,8]\n self.assertRaises(IndexError, rls1.holdout, I)\n \n def testLabelRankRLS(self):\n \n print(\"Testing the cross-validation routines of the QueryRankRLS module.\\n\")\n \n np.random.seed(100)\n floattype = np.float64\n \n m, n = 100, 400 #data, features\n Xtrain = np.mat(np.random.rand(m, n))\n K = Xtrain * Xtrain.T\n ylen = 1\n Y = np.mat(np.zeros((m, ylen), dtype=floattype))\n Y[:, 0] = np.sum(Xtrain, 1)\n \n \n labelcount = 5\n \n hoindices = range(labelcount)\n hocompl = list(set(range(m)) - set(hoindices))\n \n qidlist = [0 for i in range(100)]\n for h in range(5, 12):\n qidlist[h] = 1\n for h in range(12, 32):\n qidlist[h] = 2\n for h in range(32, 34):\n qidlist[h] = 3\n for h in range(34, 85):\n qidlist[h] = 4\n for h in range(85, 100):\n qidlist[h] = 5\n qidlist_cv = qidlist[5: len(qidlist)]\n \n objcount = max(qidlist) + 1\n P = np.mat(np.zeros((m, objcount), dtype=np.float64))\n for i in range(m):\n qid = qidlist[i]\n P[i, qid] = 1.\n labelcounts = np.sum(P, axis=0)\n P = np.divide(P, np.sqrt(labelcounts))\n D = np.mat(np.ones((1, m), dtype=np.float64))\n L = np.multiply(np.eye(m), D) - P * P.T\n \n Kcv = K[np.ix_(hocompl, hocompl)]\n Lcv = L[np.ix_(hocompl, hocompl)]\n \n Xcv = Xtrain[hocompl]\n Xtest = Xtrain[hoindices]\n Yho = Y[hocompl]\n \n rpool = {}\n rpool[\"X\"] = Xtrain\n rpool[\"Y\"] = Y\n rpool[\"qids\"] = qidlist\n primalrls = QueryRankRLS(**rpool) \n \n rpool = {}\n rpool[\"X\"] = K\n rpool['kernel'] = 'PrecomputedKernel'\n rpool[\"Y\"] = Y\n rpool[\"qids\"] = qidlist \n dualrls = QueryRankRLS(**rpool)\n \n rpool = {}\n rpool['X'] = Xcv\n rpool['Y'] = Yho\n rpool['qids'] = qidlist_cv\n primalrls_naive = QueryRankRLS(**rpool)\n\n rpool = {}\n rpool['X'] = Kcv\n rpool['kernel'] = 'PrecomputedKernel' \n rpool['Y'] = Yho\n #rpool['X'] = Xcv\n rpool['qids'] = qidlist_cv\n dualrls_naive = QueryRankRLS(**rpool)\n \n testkm = K[np.ix_(hocompl, hoindices)]\n \n loglambdas = range(-5, 5)\n for j in range(0, len(loglambdas)):\n regparam = 2. ** loglambdas[j]\n print\n print(\"Regparam 2^%1d\" % loglambdas[j])\n \n \n print(str(np.squeeze(np.array((testkm.T * la.inv(Lcv * Kcv + regparam * np.eye(Lcv.shape[0])) * Lcv * Yho).T))) + ' Dumb HO')\n \n predhos = []\n primalrls_naive.solve(regparam)\n predho = primalrls_naive.predictor.predict(Xtest)\n print(str(predho.T) + ' Naive HO (primal)')\n predhos.append(predho)\n \n dualrls_naive.solve(regparam)\n predho = dualrls_naive.predictor.predict(testkm.T)\n print(str(predho.T) + ' Naive HO (dual)')\n predhos.append(predho)\n \n primalrls.solve(regparam)\n predho = np.squeeze(primalrls.holdout(hoindices))\n print(str(predho.T) + ' Fast HO (primal)')\n predhos.append(predho)\n \n dualrls.solve(regparam)\n predho = np.squeeze(dualrls.holdout(hoindices))\n print(str(predho.T) + ' Fast HO (dual)')\n predhos.append(predho)\n \n predho0 = predhos.pop(0)\n for predho in predhos:\n self.assertEqual(predho0.shape, predho.shape)\n for row in range(predho.shape[0]):\n #for col in range(predho.shape[1]):\n # self.assertAlmostEqual(predho0[row,col],predho[row,col], places=5)\n self.assertAlmostEqual(predho0[row],predho[row], places=5)\n", "#\n# The MIT License (MIT)\n#\n# This file is part of RLScore \n#\n# Copyright (c) 2014 - 2016 Tapio Pahikkala, Antti Airola\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport scipy\nimport scipy.sparse as sp\nimport numpy as np\n\nclass GreedyNFoldRLS(object):\n \n def loadResources(self):\n \"\"\"\n Loads the resources from the previously set resource pool.\n \n @raise Exception: when some of the resources required by the learner is not available in the ResourcePool object.\n \"\"\"\n \n Y = self.resource_pool['Y']\n self.Y = Y\n #Number of training examples\n self.size = Y.shape[0]\n if not Y.shape[1] == 1:\n raise Exception('GreedyRLS currently supports only one output at a time. The output matrix is now of shape ' + str(Y.shape) + '.')\n \n X = self.resource_pool['X']\n self.setDataMatrix(X.T)\n if self.resource_pool.has_key('bias'):\n self.bias = float(self.resource_pool['bias'])\n else:\n self.bias = 0.\n if self.resource_pool.has_key('measure'):\n self.measure = self.resource_pool['measure']\n else:\n self.measure = None\n qids = self.resource_pool['qids']\n if not self.resource_pool.has_key('cross-validation_folds'):\n self.resource_pool['cross-validation_folds'] = qids\n self.setQids(qids)\n self.results = {}\n \n \n def setQids(self, qids):\n \"\"\"Sets the qid parameters of the training examples. The list must have as many qids as there are training examples.\n \n @param qids: A list of qid parameters.\n @type qids: List of integers.\"\"\"\n \n self.qidlist = [-1 for i in range(self.size)]\n for i in range(len(qids)):\n for j in qids[i]:\n if j >= self.size:\n raise Exception(\"Index %d in query out of training set index bounds\" %j)\n elif j < 0:\n raise Exception(\"Negative index %d in query, query indices must be non-negative\" %j)\n else:\n self.qidlist[j] = i\n if -1 in self.qidlist:\n raise Exception(\"Not all training examples were assigned a query\")\n \n \n self.qidmap = {}\n for i in range(len(self.qidlist)):\n qid = self.qidlist[i]\n if self.qidmap.has_key(qid):\n sameqids = self.qidmap[qid]\n sameqids.append(i)\n else:\n self.qidmap[qid] = [i]\n self.indslist = []\n for qid in self.qidmap.keys():\n self.indslist.append(self.qidmap[qid])\n \n \n def setDataMatrix(self, X):\n \"\"\"\n Sets the label data for RLS.\n \n @param X: Features of the training examples.\n @type X: scipy sparse matrix\n \"\"\"\n if isinstance(X, scipy.sparse.base.spmatrix):\n self.X = X.todense()\n else:\n self.X = X\n \n \n def train(self):\n regparam = float(self.resource_pool['regparam'])\n self.regparam = regparam\n self.solve_bu(regparam)\n \n \n def solve_bu(self, regparam):\n \"\"\"Trains RLS with the given value of the regularization parameter\n \n @param regparam: value of the regularization parameter\n @type regparam: float\n \"\"\"\n \n self.regparam = regparam\n \n \n X = self.X\n Y = self.Y\n \n if not hasattr(self, \"bias\"):\n self.bias = 0.\n \n tsize = self.size\n fsize = X.shape[0]\n assert X.shape[1] == tsize\n \n rp = regparam\n rpinv = 1. / rp\n \n \n if not self.resource_pool.has_key('subsetsize'):\n raise Exception(\"Parameter 'subsetsize' must be given.\")\n desiredfcount = int(self.resource_pool['subsetsize'])\n if not fsize >= desiredfcount:\n raise Exception('The overall number of features ' + str(fsize) + ' is smaller than the desired number ' + str(desiredfcount) + ' of features to be selected.')\n \n \n \n if self.resource_pool.has_key('calculate_test_error'):\n calculate_test_error = self.resource_pool['calculate_test_error']\n if calculate_test_error == 'True':\n calculate_test_error = True\n self.testY = self.resource_pool['test_labels']\n self.testX = self.resource_pool['prediction_features'].todense()\n self.testQids = self.resource_pool['test_qids'].readQids()\n \n self.testperformances = []\n \n self.testqidmap = {}\n for i in range(len(self.testQids)):\n qid = self.testQids[i]\n if self.testqidmap.has_key(qid):\n sameqids = self.testqidmap[qid]\n sameqids.append(i)\n else:\n self.testqidmap[qid] = [i]\n self.testindslist = []\n for qid in self.testqidmap.keys():\n self.testindslist.append(self.testqidmap[qid])\n else:\n calculate_test_error = False\n else:\n calculate_test_error = False\n \n \n \n \n #Biaz\n cv = np.sqrt(self.bias)*np.mat(np.ones((1, tsize)))\n ca = rpinv * (1. / (1. + cv * rpinv * cv.T)) * (cv * rpinv)\n \n \n self.A = rpinv * Y - cv.T * rpinv * (1. / (1. + cv * rpinv * cv.T)) * (cv * rpinv * Y)\n \n XT = X.T\n GXT = rpinv * XT - cv.T * rpinv * (1. / (1. + cv * rpinv * cv.T)) * ((cv * rpinv) * XT)\n yac = []\n yyac = []\n \n for inds in self.indslist:\n u = cv.T[inds, 0]\n v = ca[0, inds]\n temp = rp * GXT[inds] - rp * rp * u * (1. / (-1. + rp * v * u)) * (v * GXT[inds])\n yac.append(temp)\n temp = rp * self.A[inds] - rp * rp * u * (1. / (-1. + rp * v * u)) * (v * self.A[inds])\n yyac.append(temp)\n \n listX = []\n for ci in range(fsize):\n listX.append(X[ci])\n \n self.selected = []\n \n currentfcount = 0\n self.performances = []\n while currentfcount < desiredfcount:\n \n bestlqocvperf = float('inf')\n \n for ci in range(fsize):\n if ci in self.selected: continue\n cv = listX[ci]\n GXT_ci = GXT[:, ci]\n const = 1. / (1. + cv * GXT_ci)[0, 0]\n cvA = (const * (cv * self.A))[0, 0]\n updA = self.A - GXT_ci * cvA\n lqocvperf = 0.\n for qi in range(len(self.indslist)):\n inds = self.indslist[qi]\n V = GXT_ci[inds].T\n MVT = yac[qi][:, ci]\n gamma = (1. / (-const ** -1. + V * MVT))[0, 0]\n lqodiff = yyac[qi] - cvA * MVT - gamma * MVT * (MVT.T * updA[inds])\n lqocvperf += (lqodiff.T * lqodiff)[0, 0]\n \n if lqocvperf < bestlqocvperf:\n bestcind = ci\n bestlqocvperf = lqocvperf\n \n '''\n if not self.measure is None:\n loopred = Y - multiply(invupddiagG, updA)\n looperf = self.measure.multiOutputPerformance(Y, loopred)\n if bestlooperf is None:\n bestlooperf = looperf\n bestcind = ci\n if self.measure.comparePerformances(looperf, bestlooperf) > 0:\n bestcind = ci\n bestlooperf = looperf\n else:\n #This default squared performance is a bit faster to compute than the one loaded separately.\n loodiff = multiply(invupddiagG, updA)\n looperf = (loodiff.T * loodiff)[0, 0]\n if looperf < bestlooperf:\n bestcind = ci\n bestlooperf = looperf\n '''\n \n self.bestlqocvperf = bestlqocvperf\n self.performances.append(bestlqocvperf)\n cv = listX[bestcind]\n GXT_ci = GXT[:, bestcind]\n const = (1. / (1. + cv * GXT_ci))[0, 0]\n cvA = const * cv * self.A\n self.A = self.A - GXT_ci * cvA\n cvGXT = const * cv * GXT\n GXT = GXT - GXT_ci * cvGXT\n for qi in range(len(self.indslist)):\n inds = self.indslist[qi]\n V = GXT_ci[inds].T\n MVT = yac[qi][:, bestcind]\n gammaMVT = MVT * (1. / (-const ** -1. + V * MVT))\n yyac[qi] = yyac[qi] - MVT * cvA - gammaMVT * (MVT.T * self.A[inds])\n yac[qi] = yac[qi] - MVT * cvGXT - gammaMVT * (MVT.T * GXT[inds])\n self.selected.append(bestcind)\n currentfcount += 1\n \n if calculate_test_error:\n bias_slice = np.sqrt(self.bias) * np.mat(np.ones((1,X.shape[1]),dtype=np.float64))\n X_biased = np.vstack([X,bias_slice])\n selected_plus_bias = self.selected+[fsize]\n cutdiag = sp.lil_matrix((fsize+1, currentfcount + 1))\n for ci, col in zip(selected_plus_bias, range(currentfcount + 1)):\n cutdiag[ci, col] = 1.\n W = cutdiag * (X_biased[selected_plus_bias] * self.A)\n bias_slice = np.sqrt(self.bias) * np.mat(np.ones((1,self.testX.shape[1]),dtype=np.float64))\n testX_biased = np.vstack([self.testX,bias_slice])\n self.Y_predicted = testX_biased.T * W\n if not self.callbackfun is None:\n self.callbackfun.callback(self)\n if not self.callbackfun is None:\n self.callbackfun.finished(self)\n \n bias_slice = np.sqrt(self.bias) * np.mat(np.ones((1,X.shape[1]),dtype=np.float64))\n X = np.vstack([X,bias_slice])\n selected_plus_bias = self.selected+[fsize]\n cutdiag = sp.lil_matrix((fsize+1, currentfcount + 1))\n for ci, col in zip(selected_plus_bias, range(currentfcount + 1)):\n cutdiag[ci, col] = 1.\n self.A = cutdiag * (X[selected_plus_bias] * self.A)\n self.results['selected_features'] = self.selected\n self.results['GreedyRLS_LOO_performances'] = self.performances\n if calculate_test_error:\n self.results['GreedyRLS_test_performances'] = self.testperformances\n" ]
[ [ "numpy.dot", "numpy.ix_", "numpy.sqrt", "numpy.random.seed", "numpy.eye", "numpy.ones", "numpy.max", "numpy.random.randn", "numpy.random.rand", "numpy.where", "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.ones", "numpy.sqrt", "numpy.vstack", "scipy.sparse.lil_matrix" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
marwahaha/QTensor
[ "936d078825a6418f9d32d2c176332422d8a4c137", "936d078825a6418f9d32d2c176332422d8a4c137", "936d078825a6418f9d32d2c176332422d8a4c137", "936d078825a6418f9d32d2c176332422d8a4c137" ]
[ "qtensor/tests/test_simulators.py", "qtensor/optimisation/RGreedy.py", "qtensor/tests/qiskit_qaoa_energy.py", "qtensor/OpFactory.py" ]
[ "import qtensor\nfrom qtensor import CirqQAOAComposer, QtreeQAOAComposer\nfrom qtensor import QAOAQtreeSimulator\nfrom qtensor.Simulate import CirqSimulator, QtreeSimulator\nfrom qtensor.FeynmanSimulator import FeynmanSimulator\nimport numpy as np\nimport networkx as nx\n\nnp.random.seed(42)\n\n\ndef get_test_problem(n=14, p=2, d=3):\n w = np.array([[0,1,1,0],[1,0,1,1],[1,1,0,1],[0,1,1,0]])\n G = nx.from_numpy_matrix(w)\n\n G = nx.random_regular_graph(d, n)\n gamma, beta = [np.pi/3]*p, [np.pi/2]*p\n return G, gamma, beta\n\ndef test_qtree():\n G, gamma, beta = get_test_problem()\n\n composer = QtreeQAOAComposer(\n graph=G, gamma=gamma, beta=beta)\n composer.ansatz_state()\n\n print(composer.circuit)\n sim = QtreeSimulator()\n result = sim.simulate(composer.circuit)\n print(result)\n qtree_amp = result\n\n composer = CirqQAOAComposer(\n graph=G, gamma=gamma, beta=beta)\n composer.ansatz_state()\n\n print(composer.circuit)\n sim = CirqSimulator()\n result = sim.simulate(composer.circuit)\n print(result)\n final_cirq = result.final_state\n assert final_cirq[0] - qtree_amp < 1e-5\n\n assert result\n\n\ndef test_parallel_batched():\n G, gamma, beta = get_test_problem(14, 3, d=4)\n batch_vars = 3\n\n composer = QtreeQAOAComposer(\n graph=G, gamma=gamma, beta=beta)\n composer.ansatz_state()\n\n sim = QtreeSimulator()\n amp = sim.simulate(composer.circuit)\n amps = sim.simulate_batch(composer.circuit, batch_vars=2)\n print('ordinary qtree amp', amp)\n print('ordinary qtree 2 amps', amps)\n assert abs( amp - amps[0]) < 1e-6\n\n sim = FeynmanSimulator()\n result = sim.simulate(composer.circuit, batch_vars=batch_vars, tw_bias=7)\n print(result)\n\n batch_amps = 2**batch_vars\n assert len(result) == batch_amps\n assert abs( amp - result[0]) < 1e-6\n\n\ndef test_qtree_energy():\n G, gamma, beta = get_test_problem(16, 2, d=3)\n\n sim = QAOAQtreeSimulator(QtreeQAOAComposer)\n E = sim.energy_expectation(\n G=G, gamma=gamma, beta=beta)\n\n print('Energy', E)\n assert np.imag(E)<1e-6\n\n E = np.real(E)\n\n Ed = G.number_of_edges()\n C = (Ed - E)/2\n\n print(\"Edges\", Ed)\n print(\"Cost\", C)\n assert E\n\nif __name__ == \"__main__\":\n #test_qtree_energy()\n test_parallel_batched()\n", "import numpy as np\nimport copy, operator\nfrom qtensor.optimisation.Optimizer import OrderingOptimizer\nfrom qtensor import utils\nfrom functools import reduce\nimport networkx as nx\nimport qtree\n\ndef reducelist(f, lst, x=0):\n prev = x\n for i in lst:\n prev = f(prev, i)\n yield prev\n\nclass RGreedyOptimizer(OrderingOptimizer):\n \"\"\"\n An orderer that greedy selects vertices\n using boltzman probabilities.\n\n \"\"\"\n def __init__(self, *args, temp=0.002, repeats=10, **kwargs):\n super().__init__(*args, **kwargs)\n self.temp = temp\n self.repeats = repeats\n\n def _get_ordering(self, graph, **kwargs):\n node_names = nx.get_node_attributes(graph, 'name')\n node_sizes = nx.get_node_attributes(graph, 'size')\n peo, path = self._get_ordering_ints(graph)\n\n peo = [qtree.optimizer.Var(var, size=node_sizes[var],\n name=node_names[var])\n for var in peo]\n #print('tw=', max(path))\n return peo, path\n\n def _get_ordering_ints(self, old_graph, free_vars=[]):\n best_peo = None\n best_width = np.inf\n best_widths = None\n\n for i in range(self.repeats):\n graph = copy.deepcopy(old_graph)\n peo = []\n widths = []\n while graph.number_of_nodes():\n ngs = np.array(list(\n map(len, map(operator.itemgetter(1), graph.adjacency()))\n ))\n\n weights = np.exp(-(ngs - np.min(ngs))/self.temp)\n #print(ngs)\n #print(weights)\n # 1, 3, 5, 2, 1\n distrib = np.array([0]+list(reducelist(lambda x, y:x+y, weights, 0)))\n #print(distrib)\n # 0, 1, 4, 9, 11, 12\n rnd = np.random.random()*distrib[-1]\n # between 0 and 12 = say, 5\n # find the smallest value that larger than rnd\n bool_map = distrib < rnd\n # True, True, True, False, False, False\n select_map = bool_map[1:] ^ bool_map[:-1]\n selected_elem = np.array(list(graph.nodes))[select_map]\n assert len(selected_elem)==1, 'Error in algorithm, please submit an issue'\n selected_node = selected_elem[0]\n utils.eliminate_node_no_structure(graph, selected_node)\n\n peo.append(int(selected_node))\n widths.append(int(ngs[select_map][0]))\n\n if max(widths) < best_width:\n best_peo = peo\n best_widths = widths\n best_width = max(widths)\n\n return best_peo, best_widths\n", "import qiskit\nimport numpy as np\nimport networkx as nx\nfrom functools import partial\n\nimport qiskit\n\ndef qiskit_imports():\n # pylint: disable-msg=no-name-in-module, import-error\n # qiskit version workaround\n if qiskit.__version__ > '0.15.0':\n # new\n from qiskit.aqua.algorithms.minimum_eigen_solvers.qaoa.var_form import QAOAVarForm\n from qiskit.optimization.applications.ising.max_cut import get_operator as get_maxcut_operator\n else:\n # old\n from qiskit.optimization.ising.max_cut import get_operator as get_maxcut_operator\n from qiskit.aqua.algorithms.adaptive.qaoa.var_form import QAOAVarForm\n return get_maxcut_operator, QAOAVarForm\n\nget_maxcut_operator, QAOAVarForm = qiskit_imports()\n\n# Use these lines for import with new qiskit(>=0.19). The resulting QAOA energy will be wrong\n# The change is somewhere in this file: https://github.com/Qiskit/qiskit-aqua/blob/0.7.5/qiskit/aqua/algorithms/minimum_eigen_solvers/qaoa/var_form.py\n# It's ridiculous that nobody found this and never fixed, August 2020\n\n\n# from qiskit.optimization.applications.ising.max_cut import get_operator as get_maxcut_operator\n# from qiskit.aqua.algorithms.minimum_eigen_solvers.qaoa.var_form import QAOAVarForm\nfrom qiskit import Aer, execute\nfrom qiskit.compiler import transpile\n\ndef state_num2str(basis_state_as_num, nqubits):\n return '{0:b}'.format(basis_state_as_num).zfill(nqubits)\n\ndef state_str2num(basis_state_as_str):\n return int(basis_state_as_str, 2)\n\ndef state_reverse(basis_state_as_num, nqubits):\n basis_state_as_str = state_num2str(basis_state_as_num, nqubits)\n new_str = basis_state_as_str[::-1]\n return state_str2num(new_str)\n\ndef get_adjusted_state(state):\n nqubits = np.log2(state.shape[0])\n if nqubits % 1:\n raise ValueError(\"Input vector is not a valid statevector for qubits.\")\n nqubits = int(nqubits)\n\n adjusted_state = np.zeros(2**nqubits, dtype=complex)\n for basis_state in range(2**nqubits):\n adjusted_state[state_reverse(basis_state, nqubits)] = state[basis_state]\n return adjusted_state\n\n\ndef state_to_ampl_counts(vec, eps=1e-15):\n \"\"\"Converts a statevector to a dictionary\n of bitstrings and corresponding amplitudes\n \"\"\"\n qubit_dims = np.log2(vec.shape[0])\n if qubit_dims % 1:\n raise ValueError(\"Input vector is not a valid statevector for qubits.\")\n qubit_dims = int(qubit_dims)\n counts = {}\n str_format = '0{}b'.format(qubit_dims)\n for kk in range(vec.shape[0]):\n val = vec[kk]\n if val.real**2+val.imag**2 > eps:\n counts[format(kk, str_format)] = val\n return counts\n\n\ndef obj_from_statevector(sv, obj_f, precomputed=None):\n \"\"\"Compute objective from Qiskit statevector\n For large number of qubits, this is slow. \n To speed up for larger qubits, pass a vector of precomputed energies\n for QAOA, precomputed should be the same as the diagonal of the cost Hamiltonian\n \"\"\"\n if precomputed is None:\n adj_sv = get_adjusted_state(sv)\n counts = state_to_ampl_counts(adj_sv)\n assert(np.isclose(sum(np.abs(v)**2 for v in counts.values()), 1))\n return sum(obj_f(np.array([int(x) for x in k])) * (np.abs(v)**2) for k, v in counts.items())\n else:\n return np.dot(precomputed, np.abs(sv)**2)\n\ndef maxcut_obj(x,w):\n \"\"\"Compute -1 times the value of a cut.\n Args:\n x (numpy.ndarray): binary string as numpy array.\n w (numpy.ndarray): adjacency matrix.\n Returns:\n float: value of the cut.\n \"\"\"\n X = np.outer(x, (1 - x))\n return -np.sum(w * X)\n\ndef simulate_qiskit_amps_new(G, gamma, beta):\n assert len(gamma) == len(beta)\n p = len(gamma)\n # note the ordere of parameters\n parameters = np.concatenate([-np.array(gamma), np.array(beta)])\n w = nx.adjacency_matrix(G, nodelist=list(G.nodes())).toarray()\n qubitOp, offset = get_maxcut_operator(w)\n qc1 = QAOAVarForm(qubitOp.to_opflow(), p=p, initial_state=None).construct_circuit(parameters)\n ex1=execute(qc1, backend=Aer.get_backend('statevector_simulator'))\n sv = ex1.result().get_statevector()\n adj_sv = sv #get_adjusted_state(sv)\n E_0 = qubitOp.evaluate_with_statevector(adj_sv)[0].real\n return -(E_0 + offset)\n\ndef simulate_qiskit_amps(G, gamma, beta):\n assert len(gamma) == len(beta)\n p = len(gamma)\n\n if qiskit.__version__ > '0.15.0':\n return simulate_qiskit_amps_new(G, gamma, beta)\n\n w = nx.adjacency_matrix(G, nodelist=list(G.nodes())).toarray()\n obj = partial(maxcut_obj,w=w)\n C, offset = get_maxcut_operator(w)\n parameters = np.concatenate([beta, -np.array(gamma)])\n\n # When transitioning to newer qiskit this raises error.\n # Adding C.to_opflow() removes the error, but the values are still wrong\n # qiskit version workaround\n varform = QAOAVarForm(p=p,cost_operator=C)\n circuit = varform.construct_circuit(parameters)\n\n #circuit_qiskit = transpile(circuit, optimization_level=0,basis_gates=['u1', 'u2', 'u3', 'cx'])\n sv = execute(circuit, backend=Aer.get_backend(\"statevector_simulator\")).result().get_statevector()\n\n res = - obj_from_statevector(sv, obj)\n return res\n\ndef test_simulate_qiskit_amps():\n elist = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 0], [0, 5], [1, 6], [2, 7], [3, 8], [4, 9], [5, 7], [5, 8], [6, 8], [6, 9], [7, 9]]\n G = nx.OrderedGraph()\n G.add_edges_from(elist)\n parameters = np.array([5.192253984583296, 5.144373231492732, 5.9438949617723775, 5.807748946652058, 3.533458907810596, 6.006206583282401, 6.122313961527631, 6.218468942101044, 6.227704753217614,\n\n0.3895570099244132, -0.1809282325810937, 0.8844522327007089, 0.7916086532373585, 0.21294534589417236, 0.4328896243354414, 0.8327451563500539, 0.7694639329585451, 0.4727893829336214])\n beta = parameters[:9]\n gamma = -parameters[9:]\n\n result = simulate_qiskit_amps(G, gamma, beta)\n print(result)\n assert abs(abs(result) - 12) < 1e-2\n\nif __name__ == \"__main__\":\n test_simulate_qiskit_amps()\n\n", "import cirq\nimport qtree\n# Qiskit >=0.19\n#import qiskit.circuit.library as qiskit_lib\n#qiskit_lib = qtensor.tools.LasyModule('qiskit.extensions.standard')\nfrom qtensor.tools.lazy_import import qiskit\nfrom qtensor.tools.lazy_import import qiskit_lib\nimport numpy as np\n\nclass OpFactory:\n pass\n\n\nclass CirqFactory:\n H=cirq.H\n cX=cirq.CX\n Z=cirq.Z\n X=cirq.X\n\n @staticmethod\n def ZPhase(x, alpha):\n return cirq.ZPowGate(exponent=float(alpha)).on(x)\n\n @staticmethod\n def XPhase(x, alpha):\n return cirq.XPowGate(exponent=float(alpha)).on(x)\n\n cZ=cirq.CZ\n\nQtreeFactory = qtree.operators\nclass ZZFull(qtree.operators.ParametricGate):\n name = 'ZZ'\n _changes_qubits=(0,1)\n def gen_tensor(self):\n alpha = self.parameters['alpha']\n p = np.exp(1j*np.pi*alpha/2)\n m = np.exp(-1j*np.pi*alpha/2)\n tensor = np.diag([m, p ,p, m])\n return tensor.reshape((2,)*4)\n\nQtreeFullFactory = qtree.operators_full_matrix\nQtreeFullFactory.ZZ = ZZFull\n\nclass ZZ(qtree.operators.ParametricGate):\n name = 'ZZ'\n _changes_qubits=tuple()\n parameter_count=1\n def gen_tensor(self):\n alpha = self.parameters['alpha']\n p = np.exp(1j*np.pi*alpha/2)\n m = np.exp(-1j*np.pi*alpha/2)\n tensor = np.array([\n [m, p]\n ,[p, m]\n ])\n return tensor\n\nQtreeFactory.ZZ = ZZ\n# this is a bit ugly, but will work for now\nqtree.operators.LABEL_TO_GATE_DICT['zz'] = ZZ\n\nclass QiskitFactory_Metaclass(type):\n def __init__(cls, *args, **kwargs):\n pass\n\n @property\n def H(cls):\n return qiskit_lib.HGate\n\n @property\n def cX(cls):\n # Different versions of qiskit have different names\n try:\n return qiskit_lib.CnotGate\n except:\n return qiskit_lib.CXGate\n\n\n @staticmethod\n def ZPhase(alpha):\n return qiskit_lib.RZGate(phi=alpha*np.pi)\n\n @staticmethod\n def XPhase(alpha):\n return qiskit_lib.RXGate(theta=alpha*np.pi)\n\n @property\n def cZ(cls):\n return qiskit_lib.CzGate\n\n @property\n def Z(cls):\n return qiskit_lib.ZGate\n\nclass QiskitFactory(metaclass=QiskitFactory_Metaclass):\n pass\n\nclass CircuitBuilder:\n \"\"\" ABC for creating a circuit.\"\"\"\n operators = OpFactory\n\n def __init__(self, n_qubits, **params):\n self.n_qubits = n_qubits\n self.reset()\n self.qubits = self.get_qubits()\n\n def get_qubits(self):\n raise NotImplementedError\n\n def reset(self):\n \"\"\" Initialize new circuit \"\"\"\n raise NotImplementedError\n\n def inverse(self):\n if not hasattr(self, '_warned'):\n #print('Warning: conjugate is not implemented. Returning same circuit, in case you only care about circuit structure')\n self._warned = True\n return self._circuit\n\n def apply_gate(self, gate, *qubits, **params):\n self._circuit.append(gate(**params), *qubits)\n\n @property\n def circuit(self):\n return self._circuit\n @circuit.setter\n def circuit(self, circuit):\n self._circuit = circuit\n\nclass CirqBuilder(CircuitBuilder):\n operators = CirqFactory\n\n def get_qubits(self):\n return [cirq.LineQubit(i) for i in range(self.n_qubits)]\n\n def reset(self):\n self._circuit = cirq.Circuit()\n\n def apply_gate(self, gate, *qubits, **params):\n self._circuit.append(gate(*qubits, **params))\n\n def inverse(self):\n self._circuit = cirq.inverse(self._circuit)\n\nclass QtreeBuilder(CircuitBuilder):\n operators = QtreeFactory\n\n def get_qubits(self):\n return list(range(self.n_qubits))\n\n def reset(self):\n self._circuit = []\n\n def apply_gate(self, gate, *qubits, **params):\n self._circuit.append(gate(*qubits, **params))\n\n def inverse(self):\n self._circuit = list(reversed([g.dagger_me() for g in self._circuit]))\n\nclass QiskitBuilder(CircuitBuilder):\n operators = QiskitFactory\n\n def get_qubits(self):\n # The ``reset`` should be called first\n return self._circuit.qubits\n\n def reset(self):\n qreg_size = self.n_qubits\n creg_size = qreg_size\n self._circuit = qiskit.QuantumCircuit(qreg_size, creg_size)\n\n def apply_gate(self, gate, *qubits, **params):\n self._circuit.append(gate(**params), qubits)\n\n def inverse(self):\n self._circuit = self._circuit.inverse()\n\nclass QtreeFullBuilder(QtreeBuilder):\n operators = QtreeFullFactory\n" ]
[ [ "numpy.imag", "numpy.real", "numpy.array", "numpy.random.seed" ], [ "numpy.random.random", "numpy.min" ], [ "numpy.log2", "numpy.abs", "numpy.outer", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.diag", "numpy.array", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Prakadeeswaran05/Simple-Tf-ObjectDetection-SemanticSegmentation-ROS
[ "f119f0f8394c324c8453d540f4dfa495e34ee001" ]
[ "notcv_bridge.py" ]
[ "#! /usr/bin/env python\nimport sys\nimport numpy as np\nfrom sensor_msgs.msg import Image\n\ndef imgmsg_to_cv2(img_msg):\n if img_msg.encoding != \"bgr8\":\n rospy.logerr(\"This Coral detect node has been hardcoded to the 'bgr8' encoding. Come change the code if you're actually trying to implement a new camera\")\n dtype = np.dtype(\"uint8\") # Hardcode to 8 bits...\n dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<')\n image_opencv = np.ndarray(shape=(img_msg.height, img_msg.width, 3), # and three channels of data. Since OpenCV works with bgr natively, we don't need to reorder the channels.\n dtype=dtype, buffer=img_msg.data)\n # If the byt order is different between the message and the system.\n if img_msg.is_bigendian == (sys.byteorder == 'little'):\n image_opencv = image_opencv.byteswap().newbyteorder()\n return image_opencv\n\ndef cv2_to_imgmsg(cv_image):\n img_msg = Image()\n img_msg.height = cv_image.shape[0]\n img_msg.width = cv_image.shape[1]\n img_msg.encoding = \"bgr8\"\n img_msg.is_bigendian = 0\n img_msg.data = cv_image.tostring()\n img_msg.step = len(img_msg.data) // img_msg.height # That double line is actually integer division, not a comment\n return img_msg\n" ]
[ [ "numpy.ndarray", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ipanepen/sagemaker-scikit-learn-container
[ "3214b0d36955fed0b6338b997b26bcc883f7b883" ]
[ "test/unit/test_serving.py" ]
[ "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom __future__ import absolute_import\n\nfrom mock import patch, MagicMock\nimport numpy as np\nimport pytest\nimport os\n\nfrom sklearn.base import BaseEstimator\n\nfrom sagemaker_containers.beta.framework import (content_types, encoders, errors)\nfrom sagemaker_sklearn_container import serving\nfrom sagemaker_sklearn_container.serving import default_model_fn, import_module\n\n\[email protected](scope='module', name='np_array')\ndef fixture_np_array():\n return np.ones((2, 2))\n\n\nclass FakeEstimator(BaseEstimator):\n def __init__(self):\n pass\n\n @staticmethod\n def predict(input):\n return\n\n\ndef dummy_execution_parameters_fn():\n return {'dummy': 'dummy'}\n\n\nclass DummyUserModule:\n def __init__(self):\n self.execution_parameters_fn = dummy_execution_parameters_fn\n\n def model_fn(self, model_dir):\n pass\n\n\[email protected](\n 'json_data, expected', [\n ('[42, 6, 9]', np.array([42, 6, 9])),\n ('[42.0, 6.0, 9.0]', np.array([42., 6., 9.])),\n ('[\"42\", \"6\", \"9\"]', np.array(['42', '6', '9'], dtype=np.float32)),\n (u'[\"42\", \"6\", \"9\"]', np.array([u'42', u'6', u'9'], dtype=np.float32))])\ndef test_input_fn_json(json_data, expected):\n actual = serving.default_input_fn(json_data, content_types.JSON)\n np.testing.assert_equal(actual, expected)\n\n\[email protected](\n 'csv_data, expected', [\n ('42\\n6\\n9\\n', np.array([42, 6, 9], dtype=np.float32)),\n ('42.0\\n6.0\\n9.0\\n', np.array([42., 6., 9.], dtype=np.float32)),\n ('42\\n6\\n9\\n', np.array([42, 6, 9], dtype=np.float32))])\ndef test_input_fn_csv(csv_data, expected):\n deserialized_np_array = serving.default_input_fn(csv_data, content_types.CSV)\n assert np.array_equal(expected, deserialized_np_array)\n\n\[email protected]('np_array', ([42, 6, 9], [42., 6., 9.]))\ndef test_input_fn_npz(np_array):\n input_data = encoders.array_to_npy(np_array)\n deserialized_np_array = serving.default_input_fn(input_data, content_types.NPY)\n\n assert np.array_equal(np_array, deserialized_np_array)\n\n float_32_array = np.array(np_array, dtype=np.float32)\n input_data = encoders.array_to_npy(float_32_array)\n deserialized_np_array = serving.default_input_fn(input_data, content_types.NPY)\n\n assert np.array_equal(float_32_array, deserialized_np_array)\n\n float_64_array = np.array(np_array, dtype=np.float64)\n input_data = encoders.array_to_npy(float_64_array)\n deserialized_np_array = serving.default_input_fn(input_data, content_types.NPY)\n\n assert np.array_equal(float_64_array, deserialized_np_array)\n\n\ndef test_input_fn_bad_content_type():\n with pytest.raises(errors.UnsupportedFormatError):\n serving.default_input_fn('', 'application/not_supported')\n\n\ndef test_default_model_fn():\n with pytest.raises(NotImplementedError):\n default_model_fn('model_dir')\n\n\ndef test_predict_fn(np_array):\n mock_estimator = FakeEstimator()\n with patch.object(mock_estimator, 'predict') as mock:\n serving.default_predict_fn(np_array, mock_estimator)\n mock.assert_called_once()\n\n\ndef test_output_fn_json(np_array):\n response = serving.default_output_fn(np_array, content_types.JSON)\n\n assert response.get_data(as_text=True) == encoders.array_to_json(np_array.tolist())\n assert response.content_type == content_types.JSON\n\n\ndef test_output_fn_csv(np_array):\n response = serving.default_output_fn(np_array, content_types.CSV)\n\n assert response.get_data(as_text=True) == '1.0,1.0\\n1.0,1.0\\n'\n assert content_types.CSV in response.content_type\n\n\ndef test_output_fn_npz(np_array):\n response = serving.default_output_fn(np_array, content_types.NPY)\n\n assert response.get_data() == encoders.array_to_npy(np_array)\n assert response.content_type == content_types.NPY\n\n\ndef test_input_fn_bad_accept():\n with pytest.raises(errors.UnsupportedFormatError):\n serving.default_output_fn('', 'application/not_supported')\n\n\n@patch('importlib.import_module')\ndef test_import_module_execution_parameters(importlib_module_mock):\n importlib_module_mock.return_value = DummyUserModule()\n _, execution_parameters_fn = import_module('dummy_module', 'dummy_dir')\n\n assert execution_parameters_fn == dummy_execution_parameters_fn\n\n\n@patch('sagemaker_sklearn_container.serving.server')\ndef test_serving_entrypoint_start_gunicorn(mock_server):\n mock_server.start = MagicMock()\n serving.serving_entrypoint()\n mock_server.start.assert_called_once()\n\n\[email protected](os.environ, {'SAGEMAKER_MULTI_MODEL': 'True', })\n@patch('sagemaker_sklearn_container.serving.start_model_server')\ndef test_serving_entrypoint_start_mms(mock_start_model_server):\n serving.serving_entrypoint()\n mock_start_model_server.assert_called_once()\n" ]
[ [ "numpy.testing.assert_equal", "numpy.array", "numpy.array_equal", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]