repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
fatfatbear/tinynn
[ "8ef3e834e6af330c6809b1e09757bd95f91b0e3c" ]
[ "examples/mnist/pytorch-run.py" ]
[ "import argparse\nimport os\nimport time\n\nimport tinynn as tn\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n\nclass Dense(nn.Module):\n\n def __init__(self):\n super(Dense, self).__init__()\n self.fc1 = nn.Linear(784, 200)\n self.fc2 = nn.Linear(200, 100)\n self.fc3 = nn.Linear(100, 70)\n self.fc4 = nn.Linear(70, 30)\n self.fc5 = nn.Linear(30, 10)\n torch.nn.init.xavier_uniform_(self.fc1.weight)\n torch.nn.init.xavier_uniform_(self.fc2.weight)\n torch.nn.init.xavier_uniform_(self.fc3.weight)\n torch.nn.init.xavier_uniform_(self.fc4.weight)\n torch.nn.init.xavier_uniform_(self.fc5.weight)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.relu(self.fc4(x))\n x = self.fc5(x)\n x = F.log_softmax(x, dim=1)\n return x\n\n\nclass Conv(nn.Module):\n\n def __init__(self):\n super(Conv, self).__init__()\n self.conv1 = nn.Conv2d(1, 6, 5, 1, padding=\"same\")\n self.conv2 = nn.Conv2d(6, 16, 5, 1, padding=\"same\")\n\n self.fc1 = nn.Linear(784, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = F.max_pool2d(x, kernel_size=2, stride=2)\n\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, kernel_size=2, stride=2)\n\n x = torch.flatten(x, 1)\n\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.fc3(x)\n\n x = F.log_softmax(x, dim=1)\n return x\n\n\nclass RNN(nn.Module):\n\n def __init__(self):\n super(RNN, self).__init__()\n self.recurrent = nn.RNN(28, 30, batch_first=True)\n self.fc1 = nn.Linear(30, 10)\n\n def forward(self, x):\n output, hidden = self.recurrent(x)\n x = output[:, -1]\n x = self.fc1(x)\n x = F.log_softmax(x, dim=1)\n return x\n\n\nclass LSTM(RNN):\n\n def __init__(self):\n super(LSTM, self).__init__()\n self.recurrent = nn.LSTM(28, 30, batch_first=True)\n self.fc1 = nn.Linear(30, 10)\n\n\ndef main():\n if args.seed >= 0:\n tn.seeder.random_seed(args.seed)\n torch.manual_seed(args.seed)\n\n mnist = tn.dataset.MNIST(args.data_dir, one_hot=False)\n train_x, train_y = mnist.train_set\n test_x, test_y = mnist.test_set\n\n if args.model_type == \"mlp\":\n model = Dense()\n elif args.model_type == \"cnn\":\n train_x = train_x.reshape((-1, 1, 28, 28))\n test_x = test_x.reshape((-1, 1, 28, 28))\n model = Conv()\n elif args.model_type == \"rnn\":\n train_x = train_x.reshape((-1, 28, 28))\n test_x = test_x.reshape((-1, 28, 28))\n model = RNN()\n elif args.model_type == \"lstm\":\n train_x = train_x.reshape((-1, 28, 28))\n test_x = test_x.reshape((-1, 28, 28))\n model = LSTM()\n\n model.to(device)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n model.train()\n iterator = tn.data_iterator.BatchIterator(batch_size=args.batch_size)\n for epoch in range(args.num_ep):\n t_start = time.time()\n f_cost, b_cost = 0, 0\n for batch in iterator(train_x, train_y):\n x = torch.from_numpy(batch.inputs).to(device)\n y = torch.from_numpy(batch.targets).to(device)\n optimizer.zero_grad()\n pred = model(x)\n loss = F.nll_loss(pred, y)\n loss.backward()\n optimizer.step()\n print(f\"Epoch {epoch} time cost: {time.time() - t_start}\")\n # evaluate\n evaluate(model, test_x, test_y)\n\n\ndef evaluate(model, test_x, test_y):\n model.eval()\n x, y = torch.from_numpy(test_x).to(device), torch.from_numpy(test_y).to(device)\n with torch.no_grad():\n pred = model(x)\n test_pred_idx = pred.argmax(dim=1).numpy()\n accuracy, info = tn.metric.accuracy(test_pred_idx, test_y)\n print(f\"accuracy: {accuracy:.4f} info: {info}\")\n\n\nif __name__ == \"__main__\":\n curr_dir = os.path.dirname(os.path.abspath(__file__))\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data_dir\", type=str,\n default=os.path.join(curr_dir, \"data\"))\n parser.add_argument(\"--model_type\", default=\"mlp\", type=str,\n help=\"[*mlp|cnn|rnn|lstm]\")\n parser.add_argument(\"--num_ep\", default=10, type=int)\n parser.add_argument(\"--lr\", default=1e-3, type=float)\n parser.add_argument(\"--batch_size\", default=128, type=int)\n parser.add_argument(\"--seed\", default=31, type=int)\n args = parser.parse_args()\n\n device = torch.device(\"cpu\")\n\n main()\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.nn.init.xavier_uniform_", "torch.nn.Linear", "torch.nn.LSTM", "torch.nn.functional.max_pool2d", "torch.flatten", "torch.manual_seed", "torch.no_grad", "torch.nn.functional.nll_loss", "torch.nn.functional.relu", "torch.from_numpy", "torch.nn.Conv2d", "torch.nn.RNN", "torch.device" ] ]
shaoshitong/hdvw
[ "fbb39da9ad8a765f74225eec7e9614978c740dde" ]
[ "hdvw/models/seresnet_mcdo_block.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport hdvw.models.layers as layers\nimport hdvw.models.gates as gates\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_channels, channels,\n stride=1, groups=1, width_per_group=64, rate=0.3, sd=0.0,\n reduction=16, **block_kwargs):\n super(BasicBlock, self).__init__()\n\n if groups != 1 or width_per_group != 64:\n raise ValueError(\"BasicBlock only supports groups=1 and base_width=64\")\n width = int(channels * (width_per_group / 64.)) * groups\n\n self.rate = rate\n\n self.shortcut = []\n if stride != 1 or in_channels != channels * self.expansion:\n self.shortcut.append(layers.conv1x1(in_channels, channels * self.expansion, stride=stride))\n self.shortcut.append(layers.bn(channels * self.expansion))\n self.shortcut = nn.Sequential(*self.shortcut)\n\n self.conv1 = nn.Sequential(\n layers.conv3x3(in_channels, width, stride=stride),\n layers.bn(width),\n layers.relu(),\n )\n self.conv2 = nn.Sequential(\n layers.conv3x3(width, channels * self.expansion),\n layers.bn(channels * self.expansion),\n )\n\n self.relu = layers.relu()\n self.sd = layers.DropPath(sd) if sd > 0.0 else nn.Identity()\n self.gate = gates.ChannelGate(channels * self.expansion, reduction, max_pool=False)\n\n def forward(self, x):\n skip = self.shortcut(x)\n\n x = self.conv1(x)\n x = F.dropout(x, p=self.rate)\n x = self.conv2(x)\n x = self.gate(x)\n\n x = self.sd(x) + skip\n x = self.relu(x)\n\n return x\n\n def extra_repr(self):\n return \"rate=%.3e\" % self.rate\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_channels, channels,\n stride=1, groups=1, width_per_group=64, rate=0.3, sd=0.0,\n reduction=16, **block_kwargs):\n super(Bottleneck, self).__init__()\n\n width = int(channels * (width_per_group / 64.)) * groups\n\n self.rate = rate\n\n self.shortcut = []\n if stride != 1 or in_channels != channels * self.expansion:\n self.shortcut.append(layers.conv1x1(\n in_channels, channels * self.expansion, stride=stride))\n self.shortcut.append(layers.bn(channels * self.expansion))\n self.shortcut = nn.Sequential(*self.shortcut)\n\n self.conv1 = nn.Sequential(\n layers.conv1x1(in_channels, width),\n layers.bn(width),\n layers.relu(),\n )\n self.conv2 = nn.Sequential(\n layers.conv3x3(width, width, stride=stride, groups=groups),\n layers.bn(width),\n layers.relu(),\n )\n self.conv3 = nn.Sequential(\n layers.conv1x1(width, channels * self.expansion),\n layers.bn(channels * self.expansion),\n )\n\n self.relu = layers.relu()\n self.sd = layers.DropPath(sd) if sd > 0.0 else nn.Identity()\n self.gate = gates.ChannelGate(channels * self.expansion, reduction, max_pool=False)\n\n def forward(self, x):\n skip = self.shortcut(x)\n\n x = self.conv1(x)\n x = self.conv2(x)\n x = F.dropout(x, p=self.rate)\n x = self.conv3(x)\n x = self.gate(x)\n\n x = self.sd(x) + skip\n x = self.relu(x)\n\n return x\n\n def extra_repr(self):\n return \"rate=%.3e\" % self.rate\n" ]
[ [ "torch.nn.Identity", "torch.nn.functional.dropout", "torch.nn.Sequential" ] ]
arxxv/ivy
[ "740881dfefbdf658f6e395f1b3bc17ed4a77f650" ]
[ "ivy/functional/backends/torch/array_api/manipulation_functions.py" ]
[ "# global\nimport torch\nfrom typing import Union, Optional, Tuple, List\n\n\ndef roll(x: torch.Tensor, shift: Union[int, Tuple[int]], axis: Union[int, Tuple[int]]=None)\\\n -> torch.Tensor:\n return torch.roll(x, shift, axis) \n\n\n# noinspection PyShadowingBuiltins\ndef flip(x: torch.Tensor,\n axis: Optional[Union[int, Tuple[int], List[int]]] = None)\\\n -> torch.Tensor:\n num_dims: int = len(x.shape)\n if not num_dims:\n return x\n if axis is None:\n new_axis: List[int] = list(range(num_dims))\n else:\n new_axis: List[int] = axis\n if isinstance(new_axis, int):\n new_axis = [new_axis]\n else:\n new_axis = new_axis\n new_axis = [item + num_dims if item < 0 else item for item in new_axis]\n return torch.flip(x, new_axis)\n" ]
[ [ "torch.roll", "torch.flip" ] ]
xiadehu27/OIDDN
[ "29793c855831febcb09b60f8c4882b1e31faa28c" ]
[ "OIDN_def.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torch.nn.functional as F\nimport scipy.io as sio\nimport numpy as np\nimport os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# Define Basic reconstruct block\nclass BasicBlock(torch.nn.Module):\n def __init__(self,BLOCK_SIZE):\n super(BasicBlock, self).__init__()\n\n self.BLOCK_SIZE=BLOCK_SIZE\n\n self.lambda_step = nn.Parameter(torch.Tensor([0.5]))\n self.soft_thr = nn.Parameter(torch.Tensor([0.01])) \n self.t = nn.Parameter(torch.Tensor([1.0]))\n self.mergeScale = nn.Parameter(torch.Tensor([1.0]))\n \n\n self.conv_D = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 3, 3, 3)))\n\n self.conv1_forward = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 32, 3, 3)))\n self.conv2_forward = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 32, 3, 3)))\n self.conv1_backward = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 32, 3, 3)))\n self.conv2_backward = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 32, 3, 3)))\n\n self.conv1_G = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 32, 3, 3)))\n self.conv2_G = nn.Parameter(init.xavier_normal_(torch.Tensor(32, 32, 3, 3))) \n self.conv3_G = nn.Parameter(init.xavier_normal_(torch.Tensor(3, 32, 3, 3)))\n\n def forward(self, xprev, x, PhiWeight, PhiTWeight, PhiTb):\n\n tplus = (1+torch.sqrt(1+4*self.t*self.t))/2\n xi = (self.t-1)/tplus\n deltax = x-xprev\n\n zeta = x - self.lambda_step * PhiTPhi_fun(x, PhiWeight, PhiTWeight,self.BLOCK_SIZE)\n zeta = zeta - self.lambda_step * xi * PhiTPhi_fun(deltax, PhiWeight, PhiTWeight,self.BLOCK_SIZE)\n zeta = zeta + xi * deltax\n zeta = zeta + self.lambda_step * PhiTb\n\n x = zeta\n \n x_input = x\n\n x_D = F.conv2d(x_input, self.conv_D, padding=1)\n\n x = F.conv2d(x_D, self.conv1_forward, padding=1)\n x = F.relu(x)\n x_forward = F.conv2d(x, self.conv2_forward, padding=1)\n\n x = torch.mul(torch.sign(x_forward), F.relu(torch.abs(x_forward) - self.soft_thr))\n\n x = F.conv2d(x, self.conv1_backward, padding=1)\n x = F.relu(x)\n x_backward = F.conv2d(x, self.conv2_backward, padding=1)\n\n x = F.conv2d(F.relu(x_backward), self.conv1_G, padding=1)\n x = F.conv2d(F.relu(x), self.conv2_G, padding=1)\n x_G = F.conv2d(x, self.conv3_G, padding=1)\n\n x_pred = x_input + x_G*self.mergeScale\n\n x = F.conv2d(x_forward, self.conv1_backward, padding=1)\n x = F.relu(x)\n x_D_est = F.conv2d(x, self.conv2_backward, padding=1)\n symloss = x_D_est - x_D\n\n return [x_pred, symloss]\n\n\n# Define OIDN \nclass OIDN(torch.nn.Module):\n\n def __init__(self, LayerNo, M, BLOCK_SIZE):\n \n super(OIDN, self).__init__()\n\n N = BLOCK_SIZE * BLOCK_SIZE\n\n self.Phir = nn.Parameter(init.xavier_normal_(torch.Tensor(M, N)))\n self.Phig = nn.Parameter(init.xavier_normal_(torch.Tensor(M, N)))\n self.Phib = nn.Parameter(init.xavier_normal_(torch.Tensor(M, N)))\n self.Phi_scale = nn.Parameter(torch.Tensor([1.0]))\n \n\n onelayer = []\n self.LayerNo = LayerNo\n self.M = M\n self.N = N\n self.BLOCK_SIZE = BLOCK_SIZE\n\n for i in range(LayerNo):\n onelayer.append(BasicBlock(BLOCK_SIZE))\n\n self.fcs = nn.ModuleList(onelayer)\n self.shuffle = torch.nn.PixelShuffle(BLOCK_SIZE)\n\n def forward(self, x):\n\n origX = x\n\n # Sampling-subnet\n Phir = self.Phir * self.Phi_scale \n Phig = self.Phig * self.Phi_scale \n Phib = self.Phib * self.Phi_scale\n\n PhirWeight = Phir.contiguous().view(self.M, 1, self.BLOCK_SIZE, self.BLOCK_SIZE)\n PhigWeight = Phig.contiguous().view(self.M, 1, self.BLOCK_SIZE, self.BLOCK_SIZE)\n PhibWeight = Phib.contiguous().view(self.M, 1, self.BLOCK_SIZE, self.BLOCK_SIZE)\n\n Phixr = F.conv2d(x[:,0:1,:,:], PhirWeight, padding=0, stride=self.BLOCK_SIZE, bias=None)\n Phixg = F.conv2d(x[:,1:2,:,:], PhigWeight, padding=0, stride=self.BLOCK_SIZE, bias=None)\n Phixb = F.conv2d(x[:,2:3,:,:], PhibWeight, padding=0, stride=self.BLOCK_SIZE, bias=None)\n\n # Initialization-subnet\n PhiWeight = torch.cat((\n PhirWeight,\n PhigWeight,\n PhibWeight),dim=1)\n \n\n PhiTWeight = torch.cat((\n Phir.t().contiguous().view(self.N, self.M, 1, 1),\n Phig.t().contiguous().view(self.N, self.M, 1, 1),\n Phib.t().contiguous().view(self.N, self.M, 1, 1)),dim=0)\n \n\n PhiTb = torch.cat((\n self.shuffle(F.conv2d(Phixr, Phir.t().contiguous().view(self.N, self.M, 1, 1), padding=0, bias=None)),\n self.shuffle(F.conv2d(Phixg, Phig.t().contiguous().view(self.N, self.M, 1, 1), padding=0, bias=None)),\n self.shuffle(F.conv2d(Phixb, Phib.t().contiguous().view(self.N, self.M, 1, 1), padding=0, bias=None))),\n dim=1) \n \n x = PhiTb \n\n # Recovery-subnet\n layers_sym = [] # for computing symmetric loss \n xprev = x\n for i in range(self.LayerNo): \n \n [x1, layer_sym] = self.fcs[i](xprev, x, PhiWeight, PhiTWeight, PhiTb) \n xprev = x\n x=x1\n\n layers_sym.append(layer_sym)\n\n x_final = x\n\n return [x_final, layers_sym, [Phir,Phig,Phib]]\n\n\ndef PhiTPhi_fun(x, PhiW, PhiTW,BLOCK_SIZE):\n\n N = BLOCK_SIZE * BLOCK_SIZE\n\n phir = F.conv2d(x[:,0:1,:,:], PhiW[:,0:1,:,:], padding=0,stride=BLOCK_SIZE, bias=None)\n phig = F.conv2d(x[:,1:2,:,:], PhiW[:,1:2,:,:], padding=0,stride=BLOCK_SIZE, bias=None)\n phib = F.conv2d(x[:,2:3,:,:], PhiW[:,2:3,:,:], padding=0,stride=BLOCK_SIZE, bias=None)\n\n xtempr = F.conv2d(phir, PhiTW[0:N,:,:,:], padding=0, bias=None)\n xtempg = F.conv2d(phig, PhiTW[N:N*2,:,:,:], padding=0, bias=None)\n xtempb = F.conv2d(phib, PhiTW[N*2:N*3,:,:,:], padding=0, bias=None)\n\n temp = torch.cat(\n (\n xtempr,xtempg,xtempb\n ),dim=1\n )\n\n return torch.nn.PixelShuffle(BLOCK_SIZE)(temp)" ]
[ [ "torch.nn.PixelShuffle", "torch.nn.functional.conv2d", "torch.sqrt", "torch.nn.functional.relu", "torch.sign", "torch.cuda.is_available", "torch.nn.ModuleList", "torch.abs", "torch.cat", "torch.Tensor" ] ]
hellpanderrr/chicksexer
[ "7cf2bd1f3bea7501c7a64eda06e9d0506a061928" ]
[ "preprocessor/main.py" ]
[ "# -*- coding: UTF-8 -*-\n\"\"\"\nMain module of preprocessor package. Can be executed by `python -m preprocessor`.\n\"\"\"\nimport os\nimport pickle\nfrom random import shuffle\n\nimport numpy as np\n\nfrom chicksexer.constant import POSITIVE_CLASS, NEGATIVE_CLASS, NEUTRAL_CLASS, CLASS2DEFAULT_CUTOFF\nfrom chicksexer.util import get_logger\nfrom preprocessor import PACKAGE_ROOT\nfrom preprocessor.dbpedia import gen_triples_from_file\nfrom preprocessor.gender_csv import gen_name_gender_from_csv\nfrom preprocessor.us_stats import compute_gender_probas\nfrom preprocessor.util import Name2Proba\n\n__author__ = 'kensk8er'\n\n_DATA_ROOT = os.path.join(PACKAGE_ROOT, os.path.pardir, 'data') \n#_DATA_ROOT = os.path.join(PACKAGE_ROOT, 'data')\n\n\n_RAW_DATA_ROOT = os.path.join(_DATA_ROOT, 'raw')\n_PROCESSED_DATA_PATH = os.path.join(_DATA_ROOT, 'name2proba_{}.pkl')\n_NEUTRAL_NAME_AUGMENTATION_NUM = 100000\n_FEMALE_NAME_AUGMENTATION_NUM = 85000\n_TEST_DATA_SIZE = 10000 # the size of the whole dataset is ~400,000\n_LOGGER = get_logger(__name__)\n\n_CLASS2PROB = {\n POSITIVE_CLASS: 1.,\n NEUTRAL_CLASS: 0.5,\n NEGATIVE_CLASS: 0.,\n}\n\n\ndef _process_csv(name2probfa):\n \"\"\"Process csv files that list names and their gender.\"\"\"\n file_names = ['Black-Female-Names.csv', 'Black-Male-Names.csv', 'Hispanic-Female-Names.csv',\n 'Hispanic-Male-Names.csv', 'Indian-Female-Names.csv', 'Indian-Male-Names.csv',\n 'White-Female-Names.csv', 'White-Male-Names.csv']\n\n for file_name in file_names:\n for name, gender in gen_name_gender_from_csv(os.path.join(_RAW_DATA_ROOT, file_name)):\n proba = _CLASS2PROB[gender]\n name2probfa[name] = proba\n return name2probfa\n\n\ndef _process_dbpedia(name2proba):\n \"\"\"Process genders_en.ttl downloaded from dbpedia dump.\"\"\"\n file_name = 'genders_en.ttl'\n for name, gender in gen_triples_from_file(os.path.join(_RAW_DATA_ROOT, file_name)):\n proba = _CLASS2PROB[gender]\n name2proba[name] = proba\n return name2proba\n\n\ndef _process_us_stats(name2proba, start_year=1940):\n \"\"\"Process yobxxxx.txt files that list first names and their gender.\"\"\"\n dir_path = os.path.join(_RAW_DATA_ROOT, 'US-Baby-Name-Stats')\n name2proba_stats = compute_gender_probas(dir_path, start_year)\n for name, proba in name2proba_stats.items():\n name2proba.set_fix_item(name, proba)\n return name2proba\n\n\ndef _process_common_names(name2proba):\n \"\"\"Process male/female.txt files that list common male/female names.\"\"\"\n\n def process_common_names(file_name, gender, name2prob):\n with open(os.path.join(_RAW_DATA_ROOT, file_name), encoding='utf8') as file_:\n for line in file_:\n if line.startswith('#') or line.startswith('\\n'):\n continue\n name = line.strip()\n name2prob[name] = _CLASS2PROB[gender]\n return name2prob\n\n file_name2gender = {\n 'male.txt': POSITIVE_CLASS,\n 'female.txt': NEGATIVE_CLASS,\n }\n for file_name, gender in file_name2gender.items():\n name2proba = process_common_names(file_name, gender, name2proba)\n\n return name2proba\n\n\ndef _augment_full_names(name2proba, gender):\n \"\"\"Augment neutral names\"\"\"\n if gender == 'neutral':\n augmentation_num = _NEUTRAL_NAME_AUGMENTATION_NUM\n low_proba = CLASS2DEFAULT_CUTOFF[NEGATIVE_CLASS]\n high_proba = CLASS2DEFAULT_CUTOFF[POSITIVE_CLASS]\n elif gender == 'female':\n augmentation_num = _FEMALE_NAME_AUGMENTATION_NUM\n low_proba = float('-inf')\n high_proba = CLASS2DEFAULT_CUTOFF[NEGATIVE_CLASS]\n else:\n raise ValueError('Invalid argument gender={}'.format(gender))\n\n neutral_names = [name for name, prob in name2proba.items()\n if low_proba < prob < high_proba and ' ' not in name]\n multiple = augmentation_num // len(neutral_names)\n\n with open(os.path.join(_DATA_ROOT, 'surname2proba.pkl'), 'rb') as pickle_file:\n surname2proba = pickle.load(pickle_file)\n surnames, surname_probas = list(), list()\n for surname, proba in surname2proba.items():\n surnames.append(surname)\n surname_probas.append(proba)\n\n for neutral_name in neutral_names:\n proba = name2proba[neutral_name]\n sampled_surnames = np.random.choice(surnames, multiple, p=surname_probas)\n for surname in sampled_surnames:\n full_name = '{} {}'.format(neutral_name, surname)\n name2proba[full_name] = proba\n\n return name2proba\n\n\ndef main():\n name2proba = Name2Proba()\n _LOGGER.info('Processing Dbpedia...')\n name2proba = _process_dbpedia(name2proba)\n _LOGGER.info('Processing CSVs...')\n name2proba = _process_csv(name2proba)\n _LOGGER.info('Processing US Stats...')\n name2proba = _process_us_stats(name2proba)\n _LOGGER.info('Processing Common Names...')\n name2proba = _process_common_names(name2proba)\n \n # these require surname2proba.pkl\n #_LOGGER.info('Augmenting Neutral Names...')\n #name2proba = _augment_full_names(name2proba, 'neutral') \n #_LOGGER.info('Augmenting Female Names...')\n #name2proba = _augment_full_names(name2proba, 'female')\n #_LOGGER.info('Saving to the pickle files...')\n\n # randomly split into train/test set\n name2proba = dict(name2proba)\n assert len(name2proba) > _TEST_DATA_SIZE, 'Whole dataset size is not larger than test set size.'\n ids = list(range(len(name2proba)))\n shuffle(ids)\n test_ids = set(ids[:_TEST_DATA_SIZE])\n name2proba_train = dict()\n name2proba_test = dict()\n\n for id_, (name, proba) in enumerate(name2proba.items()):\n if id_ in test_ids:\n name2proba_test[name] = proba\n else:\n name2proba_train[name] = proba\n\n # write to pickle files\n with open(_PROCESSED_DATA_PATH.format('train'), 'wb') as train_file:\n pickle.dump(name2proba_train, train_file)\n with open(_PROCESSED_DATA_PATH.format('test'), 'wb') as test_file:\n pickle.dump(name2proba_test, test_file)\n with open(_PROCESSED_DATA_PATH.format('all'), 'wb') as all_file:\n pickle.dump(name2proba, all_file)\n" ]
[ [ "numpy.random.choice" ] ]
Rexiome/Knowledge-Distillation-Toolkit
[ "5e11420cb0d7e23adda17e2212b515516a53c883" ]
[ "examples/wav2vec2_compression_demo/wav2vec2_compression_demo.py" ]
[ "from collections import ChainMap\n\nimport yaml\nimport torch\nimport fairseq_mod\n\nimport sys\nsys.path.append(\"../..\")\n\nfrom wav2vec2_inference_pipeline import inference_pipeline\nfrom data_loader import LibriSpeechDataLoader\nfrom knowledge_distillation.kd_training import KnowledgeDistillationTraining\nfrom fairseq_mod.models.wav2vec.teacher_wav2vec2 import TeacherWav2Vec2Model\nfrom fairseq_mod.models.wav2vec.student_wav2vec2 import StudentWav2Vec2Model\n\ndef get_proj_layer(fairseq_pretrained_model_path):\n \"\"\"\n Get projection layer's weights and biases of wav2vec 2.0 pre-trained model\n \"\"\"\n w2v = torch.load(fairseq_pretrained_model_path)\n return w2v[\"model\"][\"w2v_encoder.proj.weight\"], w2v[\"model\"][\"w2v_encoder.proj.bias\"]\n\nif __name__ == \"__main__\":\n config = yaml.load(open('demo_config.yaml','r'), Loader=yaml.FullLoader)\n target_dict = fairseq_mod.data.Dictionary.load('ltr_dict.txt')\n\n libriSpeech_data_loader = LibriSpeechDataLoader(**config[\"data_loader\"])\n train_data_loader = libriSpeech_data_loader.get_train_data_loader()\n val_data_loaders = libriSpeech_data_loader.get_val_data_loaders()\n\n inference_pipeline_example = inference_pipeline(target_dict, use_cuda=True, input_half=False)\n\n student_model = StudentWav2Vec2Model.create_student_model(target_dict=target_dict,\n fairseq_pretrained_model_path=config[\"knowledge_distillation\"][\"general\"][\"fairseq_pretrained_model_path\"],\n **config[\"knowledge_distillation\"][\"student_model\"])\n teacher_model = TeacherWav2Vec2Model.create_teacher_model(target_dict=target_dict,\n fairseq_pretrained_model_path=config[\"knowledge_distillation\"][\"general\"][\"fairseq_pretrained_model_path\"])\n\n proj_layer_weight, proj_layer_bias = get_proj_layer(fairseq_pretrained_model_path=config[\"knowledge_distillation\"][\"general\"][\"fairseq_pretrained_model_path\"])\n student_model.init_proj_layer_to_decoder(proj_layer_weight, proj_layer_bias)\n teacher_model.init_proj_layer_to_decoder(proj_layer_weight, proj_layer_bias)\n\n KD_wav2vec2 = KnowledgeDistillationTraining(train_data_loader = train_data_loader,\n val_data_loaders = val_data_loaders,\n inference_pipeline = inference_pipeline_example,\n student_model = student_model,\n teacher_model = teacher_model,\n num_gpu_used = config[\"knowledge_distillation\"][\"general\"][\"num_gpu_used\"],\n temperature = config[\"knowledge_distillation\"][\"general\"][\"temperature\"],\n final_loss_coeff_dict = config[\"knowledge_distillation\"][\"final_loss_coeff\"],\n logging_param = ChainMap(config[\"knowledge_distillation\"][\"general\"], config[\"knowledge_distillation\"][\"optimization\"],\n config[\"knowledge_distillation\"][\"final_loss_coeff\"], config[\"knowledge_distillation\"][\"student_model\"],\n config[\"knowledge_distillation\"][\"pytorch_lightning_trainer\"]),\n **ChainMap(config[\"knowledge_distillation\"][\"optimization\"],\n config[\"knowledge_distillation\"][\"pytorch_lightning_trainer\"],\n config[\"knowledge_distillation\"][\"comet_info\"])\n )\n KD_wav2vec2.start_kd_training()\n\n exit()" ]
[ [ "torch.load" ] ]
yoon-gu/chaospy
[ "f22aa31e2a338a32a6d09b810c5b629c10a87236" ]
[ "src/chaospy/distributions/copulas/baseclass.py" ]
[ "r\"\"\"\nA cumulative distribution function of an independent multivariate random\nvariable can be made dependent through a copula as follows:\n\n.. math::\n F_{Q_0,\\dots,Q_{D-1}} (q_0,\\dots,q_{D-1}) =\n C(F_{Q_0}(q_0), \\dots, F_{Q_{D-1}}(q_{D-1}))\n\nwhere :math:`C` is the copula function, and :math:`F_{Q_i}` are marginal\ndistribution functions. One of the more popular classes of copulas is the\nArchimedean copulas.\n.. \\cite{sklar_random_1996}.\nThey are defined as follows:\n\n.. math::\n C(u_1,\\dots,u_n) =\n \\phi^{[-1]} (\\phi(u_1)+\\dots+\\phi(u_n)),\n\nwhere :math:`\\phi` is a generator and :math:`\\phi^{[-1]}` is its\npseudo-inverse. Support for Archimedean copulas in `chaospy` is possible\nthrough reformulation of the Rosenblatt transformation. In two dimension, this\nreformulation is as follows:\n\n.. math::\n\n F_{U_0}(u_0) = \\frac{C(u_0,1)}{C(1,1)}\n\n F_{U_1\\mid U_0}(u_1\\mid u_0) =\n \\frac{\\tfrac{\\partial}{\\partial u_0}\n C(u_0,u_1)}{\\tfrac{\\partial}{\\partial u_0} C(u_0,1)}\n\nThis definition can also be generalized in to multiple variables using the\nformula provided by Nelsen 1999.\n.. cite:: nelsen_introduction_1999\n\nThe definition of the Rosenblatt transform can require multiple\ndifferentiations. An analytical formulation is usually not feasible, so the\nexpressions are estimated using difference scheme similar to the one outlined\nfor probability density function defined in :ref:`distributions`. The accurate\nmight therefore be affected.\n\nSince copulas are meant as a replacement for Rosenblatt\ntransformation, it is usually assumed that the distribution it is\nused on is stochastically independent.\nHowever in the definition of a copula does not actually require it, and sine\nthe Rosenblatt transformation allows for it, multiple copulas can be stacked\ntogether in `chaospy`.\n\"\"\"\nimport numpy\n\nfrom .. import Dist\n\n\nclass Copula(Dist):\n\n def __init__(self, dist, trans):\n \"\"\"\n Args:\n dist (Dist) : Distribution to wrap the copula around.\n trans (Dist) : The copula wrapper `[0,1]^D \\into [0,1]^D`.\n \"\"\"\n Dist.__init__(self, dist=dist, trans=trans,\n _advance=True, _length=len(trans))\n\n def _cdf(self, x, graph):\n dist, trans = graph.dists[\"dist\"], graph.dists[\"trans\"]\n q = graph(graph(x, dist), trans)\n return q\n\n def _bnd(self, x, graph):\n return graph(x, graph.dists[\"dist\"])\n\n def _ppf(self, q, graph):\n dist, trans = graph.dists[\"dist\"], graph.dists[\"trans\"]\n return graph(graph(q, trans), dist)\n\n def _pdf(self, x, graph):\n dist, trans = graph.dists[\"dist\"], graph.dists[\"trans\"]\n return graph(graph.fwd_as_pdf(x, dist), trans)*graph(x, dist)\n\n\nclass Archimedean(Dist):\n \"\"\"\n Archimedean copula superclass.\n\n Subset this to generate an archimedean.\n \"\"\"\n\n def _ppf(self, x, th, eps):\n\n for i in range(1, len(x)):\n\n q = x[:i+1].copy()\n lo, up = 0,1\n dq = numpy.zeros(i+1)\n dq[i] = eps\n flo, fup = -q[i],1-q[i]\n\n for iteration in range(1, 10):\n fq = self._diff(q[:i+1], th, eps)\n dfq = self._diff((q[:i+1].T+dq).T, th, eps)\n dfq = (dfq-fq)/eps\n dfq = numpy.where(dfq==0, numpy.inf, dfq)\n\n fq = fq-x[i]\n if not numpy.any(numpy.abs(fq)>eps):\n break\n\n # reduce boundaries\n flo = numpy.where(fq<=0, fq, flo)\n lo = numpy.where(fq<=0, q[i], lo)\n\n fup = numpy.where(fq>=0, fq, fup)\n up = numpy.where(fq>=0, q[i], up)\n\n # Newton increment\n qdq = q[i]-fq/dfq\n\n # if new val on interior use Newton\n # else binary search\n q[i] = numpy.where((qdq<up)*(qdq>lo),\n qdq, .5*(up+lo))\n\n x[i] = q[i]\n return x\n\n\n def _cdf(self, x, th, eps):\n out = numpy.zeros(x.shape)\n out[0] = x[0]\n for i in range(1,len(x)):\n out[i][x[i]==1] = 1\n out[i] = self._diff(x[:i+1], th, eps)\n\n return out\n\n def _pdf(self, x, th, eps):\n out = numpy.ones(x.shape)\n sign = 1-2*(x>.5)\n for i in range(1,len(x)):\n x[i] += eps*sign[i]\n out[i] = self._diff(x[:i+1], th, eps)\n x[i] -= eps*sign[i]\n out[i] -= self._diff(x[:i+1], th, eps)\n out[i] /= eps\n\n out = abs(out)\n return out\n\n def _diff(self, x, th, eps):\n \"\"\"\n Differentiation function.\n\n Numerical approximation of a Rosenblatt transformation created from\n copula formulation.\n \"\"\"\n foo = lambda y: self.igen(numpy.sum(self.gen(y, th), 0), th)\n\n out1 = out2 = 0.\n sign = 1 - 2*(x>.5).T\n for I in numpy.ndindex(*((2,)*(len(x)-1)+(1,))):\n\n eps_ = numpy.array(I)*eps\n x_ = (x.T + sign*eps_).T\n out1 += (-1)**sum(I)*foo(x_)\n\n x_[-1] = 1\n out2 += (-1)**sum(I)*foo(x_)\n\n out = out1/out2\n return out\n\n\n def _bnd(self, **prm):\n return 0,1\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.abs", "numpy.array", "numpy.where" ] ]
Swapnil99007/sunpy
[ "249619d679ee8caf19f56a2cddadbfee6d026c52" ]
[ "sunpy/coordinates/tests/test_transformations.py" ]
[ "import numpy as np\nimport pytest\n\nimport astropy\nimport astropy.units as u\nfrom astropy.tests.helper import quantity_allclose, assert_quantity_allclose\nfrom astropy.coordinates import (SkyCoord, get_body_barycentric, Angle,\n ConvertError, Longitude, CartesianRepresentation,\n get_body_barycentric_posvel,\n CartesianDifferential, SphericalDifferential)\n# Versions of Astropy that do not have HeliocentricMeanEcliptic have the same frame\n# with the misleading name HeliocentricTrueEcliptic\ntry:\n from astropy.coordinates import HeliocentricMeanEcliptic\nexcept ImportError:\n from astropy.coordinates import HeliocentricTrueEcliptic as HeliocentricMeanEcliptic\n\nfrom astropy.time import Time\n\nfrom sunpy.coordinates import (Helioprojective, HeliographicStonyhurst,\n HeliographicCarrington, Heliocentric,\n HeliocentricEarthEcliptic, GeocentricSolarEcliptic,\n HeliocentricInertial, GeocentricEarthEquatorial,\n get_earth)\nfrom sunpy.coordinates import sun\nfrom sunpy.coordinates.frames import _J2000\nfrom sunpy.time import parse_time\n\n\ndef test_hcc_to_hgs():\n '''\n Check that a coordinate pointing to the observer in Heliocentric\n coordinates maps to the lattitude/longitude of the observer in\n HeliographicStonyhurst coordinates.\n '''\n lat = 10 * u.deg\n lon = 20 * u.deg\n observer = HeliographicStonyhurst(lat=lat, lon=lon)\n hcc_in = Heliocentric(x=0*u.km, y=0*u.km, z=1*u.km, observer=observer)\n hgs_out = hcc_in.transform_to(HeliographicStonyhurst)\n\n assert_quantity_allclose(hgs_out.lat, lat)\n assert_quantity_allclose(hgs_out.lon, lon)\n\n\ndef test_hpc_hpc():\n # Use some unphysical values for solar parameters for testing, to make it\n # easier to calculate expected results.\n rsun = 1*u.m\n D0 = 1*u.km\n L0 = 1*u.deg\n observer_in = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)\n observer_out = HeliographicStonyhurst(lat=0*u.deg, lon=L0, radius=D0)\n\n hpc_in = Helioprojective(0*u.arcsec, 0*u.arcsec, rsun=rsun, observer=observer_in)\n hpc_out = Helioprojective(observer=observer_out, rsun=rsun)\n\n hpc_new = hpc_in.transform_to(hpc_out)\n\n assert hpc_new.observer == hpc_out.observer\n\n # Calculate the distance subtended by an angle of L0 from the centre of the\n # Sun.\n dd = -1 * rsun * np.tan(L0)\n # Calculate the angle corresponding to that distance as seen by the new\n # observer.\n theta = np.arctan2(dd, (D0 - rsun))\n\n assert quantity_allclose(theta, hpc_new.Tx, rtol=1e-3)\n\n\ndef test_hpc_hpc_sc():\n # Use some unphysical values for solar parameters for testing, to make it\n # easier to calculate expected results.\n rsun = 1*u.m\n D0 = 1*u.km\n L0 = 1*u.deg\n observer_in = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)\n observer_out = HeliographicStonyhurst(lat=0*u.deg, lon=L0, radius=D0)\n\n sc_in = SkyCoord(0*u.arcsec, 0*u.arcsec, rsun=rsun, observer=observer_in,\n frame='helioprojective')\n hpc_out = Helioprojective(observer=observer_out, rsun=rsun)\n\n hpc_new = sc_in.transform_to(hpc_out)\n\n assert hpc_new.observer.lat == hpc_out.observer.lat\n assert hpc_new.observer.lon == hpc_out.observer.lon\n assert hpc_new.observer.radius == hpc_out.observer.radius\n\n\ndef test_hpc_hpc_null():\n hpc_in = Helioprojective(0*u.arcsec, 0*u.arcsec)\n hpc_out = Helioprojective()\n\n hpc_new = hpc_in.transform_to(hpc_out)\n\n assert hpc_new is not hpc_in\n assert quantity_allclose(hpc_new.Tx, hpc_in.Tx)\n assert quantity_allclose(hpc_new.Ty, hpc_in.Ty)\n assert hpc_out.observer == hpc_new.observer\n\n\ndef test_hcrs_hgs():\n # Get the current Earth location in HCRS\n adate = parse_time('2015/05/01 01:13:00')\n earth_hcrs = SkyCoord(get_body_barycentric('earth', adate), frame='icrs', obstime=adate).hcrs\n\n # Convert from HCRS to HGS\n earth_hgs = earth_hcrs.transform_to(HeliographicStonyhurst)\n\n # The HGS longitude of the Earth should be zero within numerical error\n # Due to an issue with wrapping at +-360, we shift it to pass the test.\n assert quantity_allclose((earth_hgs.lon+1*u.deg) % (360*u.deg), 1*u.deg, atol=1e-12*u.deg)\n\n # The HGS latitude and radius should be within valid ranges\n assert quantity_allclose(earth_hgs.lat, 0*u.deg, atol=7.3*u.deg)\n assert quantity_allclose(earth_hgs.radius, 1*u.AU, atol=0.017*u.AU)\n\n\ndef test_hcrs_hgs_array_obstime():\n # Get the Earth location in HCRS at two times\n times = Time(['2017-01-01', '2017-06-01'])\n earth_hcrs = SkyCoord(get_body_barycentric('earth', times), frame='icrs', obstime=times).hcrs\n\n # Transform each time in separate calls (uses scalar obstime)\n earth_hgs_0 = earth_hcrs[0].transform_to(HeliographicStonyhurst)\n earth_hgs_1 = earth_hcrs[1].transform_to(HeliographicStonyhurst)\n\n # Transform both times in one call (uses array obstime)\n earth_hgs = earth_hcrs.transform_to(HeliographicStonyhurst)\n\n # Confirm that the two approaches produce the same results\n assert quantity_allclose(earth_hgs_0.lon, earth_hgs[0].lon, atol=1e-12*u.deg)\n assert quantity_allclose(earth_hgs_0.lat, earth_hgs[0].lat, rtol=1e-10)\n assert quantity_allclose(earth_hgs_0.radius, earth_hgs[0].radius, rtol=1e-10)\n assert quantity_allclose(earth_hgs_1.lon, earth_hgs[1].lon, atol=1e-12*u.deg)\n assert quantity_allclose(earth_hgs_1.lat, earth_hgs[1].lat, rtol=1e-10)\n assert quantity_allclose(earth_hgs_1.radius, earth_hgs[1].radius, rtol=1e-10)\n\n\ndef test_hgs_hcrs():\n # This test checks the HGS->HCRS transformation by transforming from HGS to\n # HeliocentricMeanEcliptic (HME). It will fail if there are errors in Astropy's\n # HCRS->ICRS or ICRS->HME transformations.\n\n # Use published HGS coordinates in the Astronomical Almanac (2013), pages C6-C7\n obstime = Time('2013-01-28')\n earth_hgs = SkyCoord(0*u.deg, -5.73*u.deg, 0.9848139*u.AU, frame=HeliographicStonyhurst,\n obstime=obstime)\n\n # Transform to HME at observation-time equinox\n earth_hme = earth_hgs.transform_to(HeliocentricMeanEcliptic(equinox=obstime))\n\n # Validate against published values from the Astronomical Almanac (2013), page C6 per page E2\n # The dominant source of inaccuracy is the limited precision of the published B0 used above\n assert quantity_allclose(earth_hme.lon, Angle('308d13m30.51s') - 180*u.deg, atol=5*u.arcsec)\n assert quantity_allclose(earth_hme.lat, -Angle('-0.27s'), atol=10*u.arcsec)\n assert quantity_allclose(earth_hme.distance, 0.9848139*u.AU, atol=5e-7*u.AU)\n\n\ndef test_hgs_hgc_roundtrip():\n obstime = \"2011-01-01\"\n\n hgsin = HeliographicStonyhurst(lat=10*u.deg, lon=20*u.deg, obstime=obstime)\n hgcout = hgsin.transform_to(HeliographicCarrington(obstime=obstime))\n\n assert_quantity_allclose(hgsin.lat, hgcout.lat)\n assert_quantity_allclose(hgsin.lon + sun.L0(obstime), hgcout.lon)\n\n hgsout = hgcout.transform_to(HeliographicStonyhurst(obstime=obstime))\n\n assert_quantity_allclose(hgsout.lat, hgsin.lat)\n assert_quantity_allclose(hgsout.lon, hgsin.lon)\n\n\ndef test_hgs_cartesian_rep_to_hpc():\n # This test checks transformation HGS->HPC when the coordinate is in a Cartesian\n # representation and that it is the same as a transformation from an HGS frame with a\n # spherical representation\n\n obstime = \"2011-01-01\"\n hgscoord_cart = SkyCoord(x=1*u.km, y=0.*u.km, z=0.*u.km,\n frame=HeliographicStonyhurst(obstime=obstime),\n representation_type='cartesian')\n hpc_frame = Helioprojective(observer='earth', obstime=obstime)\n hgscoord_sph = hgscoord_cart.copy()\n hgscoord_sph.representation_type = 'spherical'\n hpccoord_cart = hgscoord_cart.transform_to(hpc_frame)\n hpccoord_sph = hgscoord_sph.transform_to(hpc_frame)\n assert_quantity_allclose(hpccoord_cart.Tx, hpccoord_sph.Tx)\n assert_quantity_allclose(hpccoord_cart.Ty, hpccoord_sph.Ty)\n assert_quantity_allclose(hpccoord_cart.distance, hpccoord_sph.distance)\n\n\ndef test_hgs_cartesian_rep_to_hcc():\n # This test checks transformation HGS->HCC when the coordinate is in a Cartesian\n # representation and that it is the same as a transformation from an HGS frame with a\n # spherical representation\n\n obstime = \"2011-01-01\"\n hgscoord_cart = SkyCoord(x=1*u.km, y=0.*u.km, z=0.*u.km,\n frame=HeliographicStonyhurst(obstime=obstime),\n representation_type='cartesian')\n hcc_frame = Heliocentric(observer='earth', obstime=obstime)\n hgscoord_sph = hgscoord_cart.copy()\n hgscoord_sph.representation_type = 'spherical'\n hcccoord_cart = hgscoord_cart.transform_to(hcc_frame)\n hcccoord_sph = hgscoord_sph.transform_to(hcc_frame)\n assert_quantity_allclose(hcccoord_cart.x, hcccoord_sph.x)\n assert_quantity_allclose(hcccoord_cart.y, hcccoord_sph.y)\n assert_quantity_allclose(hcccoord_cart.z, hcccoord_sph.z)\n\n\ndef test_hgs_cartesian_rep_to_hgc():\n # This test checks transformation HGS->HCC when the coordinate is in a Cartesian\n # representation and that it is the same as a transformation from an HGS frame with a\n # spherical representation\n\n obstime = \"2011-01-01\"\n hgscoord_cart = SkyCoord(x=1*u.km, y=0.*u.km, z=0.*u.km,\n frame=HeliographicStonyhurst(obstime=obstime),\n representation_type='cartesian')\n hgscoord_sph = hgscoord_cart.copy()\n hgscoord_sph.representation_type = 'spherical'\n # HGC\n hgccoord_cart = hgscoord_cart.transform_to(HeliographicCarrington(obstime=obstime))\n hgccoord_sph = hgscoord_sph.transform_to(HeliographicCarrington(obstime=obstime))\n assert_quantity_allclose(hgccoord_cart.lat, hgccoord_sph.lat)\n assert_quantity_allclose(hgccoord_cart.lon, hgccoord_sph.lon)\n assert_quantity_allclose(hgccoord_cart.radius, hgccoord_sph.radius)\n\n\ndef test_hcc_to_hpc_different_observer():\n # This test checks transformation HCC->HPC in the case where the HCC and HPC frames are\n # defined by different observers.\n\n rsun = 1*u.m\n D0 = 1*u.km\n L0 = 1*u.deg\n observer_1 = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)\n observer_2 = HeliographicStonyhurst(lat=0*u.deg, lon=L0, radius=D0)\n hcc_frame = Heliocentric(observer=observer_1)\n hpc_frame = Helioprojective(observer=observer_2)\n hcccoord = SkyCoord(x=rsun, y=rsun, z=rsun, frame=hcc_frame)\n hpccoord_out = hcccoord.transform_to(hpc_frame)\n hpccoord_expected = hcccoord.transform_to(HeliographicStonyhurst).transform_to(hpc_frame)\n assert_quantity_allclose(hpccoord_out.Tx, hpccoord_expected.Tx)\n assert_quantity_allclose(hpccoord_out.Ty, hpccoord_expected.Ty)\n assert_quantity_allclose(hpccoord_out.distance, hpccoord_expected.distance)\n\n\ndef test_hpc_to_hcc_different_observer():\n # This test checks transformation HPC->HCC in the case where the HCC and HPC frames are\n # defined by different observers.\n\n rsun = 1*u.m\n D0 = 1*u.km\n L0 = 1*u.deg\n observer_1 = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)\n observer_2 = HeliographicStonyhurst(lat=0*u.deg, lon=L0, radius=D0)\n hcc_frame = Heliocentric(observer=observer_1)\n hpc_frame = Helioprojective(observer=observer_2, rsun=rsun)\n hpccoord = SkyCoord(Tx=0*u.arcsec, Ty=0*u.arcsec, frame=hpc_frame)\n hcccoord_out = hpccoord.transform_to(hcc_frame)\n hcccoord_expected = hpccoord.transform_to(HeliographicStonyhurst).transform_to(hcc_frame)\n assert_quantity_allclose(hcccoord_out.x, hcccoord_expected.x)\n assert_quantity_allclose(hcccoord_out.y, hcccoord_expected.y)\n assert_quantity_allclose(hcccoord_out.z, hcccoord_expected.z)\n\n\ndef test_hcc_to_hpc_same_observer():\n # This test checks transformation HCC->HPC in the case of same observer\n\n rsun = 1*u.m\n D0 = 1*u.km\n observer = HeliographicStonyhurst(lat=0*u.deg, lon=0*u.deg, radius=D0)\n hcc_frame = Heliocentric(observer=observer)\n hpc_frame = Helioprojective(observer=observer, rsun=rsun)\n hcccoord = SkyCoord(x=rsun, y=rsun, z=rsun, frame=hcc_frame)\n hpccoord_out = hcccoord.transform_to(hpc_frame)\n hpccoord_expected = hcccoord.transform_to(HeliographicStonyhurst).transform_to(hpc_frame)\n assert_quantity_allclose(hpccoord_out.Tx, hpccoord_expected.Tx)\n assert_quantity_allclose(hpccoord_out.Ty, hpccoord_expected.Ty)\n assert_quantity_allclose(hpccoord_out.distance, hpccoord_expected.distance)\n\n\ndef test_hpc_to_hcc_same_observer():\n # This test checks transformation HPC->HCC in the case of same observer\n\n rsun = 1*u.m\n D0 = 1 * u.km\n observer = HeliographicStonyhurst(lat=0 * u.deg, lon=0 * u.deg, radius=D0)\n hcc_frame = Heliocentric(observer=observer)\n hpc_frame = Helioprojective(observer=observer, rsun=rsun)\n hpccoord = SkyCoord(Tx=0 * u.arcsec, Ty=0 * u.arcsec, frame=hpc_frame)\n hcccoord_out = hpccoord.transform_to(hcc_frame)\n hcccoord_expected = hpccoord.transform_to(HeliographicStonyhurst).transform_to(hcc_frame)\n assert_quantity_allclose(hcccoord_out.x, hcccoord_expected.x)\n assert_quantity_allclose(hcccoord_out.y, hcccoord_expected.y)\n assert_quantity_allclose(hcccoord_out.z, hcccoord_expected.z)\n\n\ndef test_hpc_hcc_different_observer_radius():\n # Tests HPC->HCC with a change in observer at different distances from the Sun\n observer1 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU)\n hpc = Helioprojective(0*u.arcsec, 0*u.arcsec, 0.5*u.AU, observer=observer1)\n\n observer2 = HeliographicStonyhurst(90*u.deg, 0*u.deg, 0.75*u.AU)\n hcc = hpc.transform_to(Heliocentric(observer=observer2))\n\n assert_quantity_allclose(hcc.x, -0.5*u.AU)\n assert_quantity_allclose(hcc.y, 0*u.AU, atol=1e-10*u.AU)\n assert_quantity_allclose(hcc.z, 0*u.AU, atol=1e-10*u.AU)\n\n\ndef test_hgs_hgs():\n # Test HGS loopback transformation\n obstime = Time('2001-01-01')\n old = SkyCoord(90*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime=obstime))\n new = old.transform_to(HeliographicStonyhurst(obstime=obstime + 1*u.day))\n\n assert_quantity_allclose(new.lon, old.lon - 1*u.deg, atol=0.1*u.deg) # due to Earth motion\n assert_quantity_allclose(new.lat, old.lat, atol=1e-3*u.deg)\n assert_quantity_allclose(new.radius, old.radius, atol=1e-5*u.AU)\n\n\ndef test_hgc_hgc():\n # Test HGC loopback transformation\n obstime = Time('2001-01-01')\n old = SkyCoord(90*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicCarrington(obstime=obstime))\n new = old.transform_to(HeliographicCarrington(obstime=obstime + 1*u.day))\n\n assert_quantity_allclose(new.lon, old.lon - 14.1844*u.deg, atol=1e-4*u.deg) # solar rotation\n assert_quantity_allclose(new.lat, old.lat, atol=1e-4*u.deg)\n assert_quantity_allclose(new.radius, old.radius, atol=1e-5*u.AU)\n\n\ndef test_hcc_hcc():\n # Test same observer and changing obstime\n observer = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-02-01')\n from_hcc = Heliocentric(0.2*u.AU, 0.3*u.AU, 0.4*u.AU, observer=observer, obstime='2001-01-01')\n to_hcc = from_hcc.transform_to(Heliocentric(observer=observer, obstime='2001-03-31'))\n\n # Since the observer is the same, the coordinates should be nearly the same but not exactly\n # equal due to motion of the origin (the Sun)\n assert np.all(from_hcc.cartesian.xyz != to_hcc.cartesian.xyz)\n assert_quantity_allclose(from_hcc.cartesian.xyz, to_hcc.cartesian.xyz, rtol=2e-3)\n\n # Test changing observer and same obstime\n observer1 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-01-01')\n observer2 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-03-31')\n from_hcc = Heliocentric(0.2*u.AU, 0.3*u.AU, 0.4*u.AU, observer=observer1, obstime='2001-02-01')\n to_hcc = from_hcc.transform_to(Heliocentric(observer=observer2, obstime='2001-02-01'))\n\n # This change in observer is approximately a 90-degree rotation about the Y axis\n assert_quantity_allclose(to_hcc.x, -from_hcc.z, rtol=2e-3)\n assert_quantity_allclose(to_hcc.y, from_hcc.y, rtol=2e-3)\n assert_quantity_allclose(to_hcc.z, from_hcc.x, rtol=2e-3)\n\n\ndef test_hcc_hgs_observer_mismatch():\n # Test whether the transformation gives the same answer regardless of what obstime the observer\n # coordinate is represented in\n observer1 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-01-01')\n observer2 = observer1.transform_to(HeliographicStonyhurst(obstime='2001-03-31'))\n\n hcc1 = Heliocentric(0.2*u.AU, 0.3*u.AU, 0.4*u.AU, observer=observer1, obstime=observer1.obstime)\n hgs1 = hcc1.transform_to(HeliographicStonyhurst(obstime=hcc1.obstime))\n\n hcc2 = Heliocentric(0.2*u.AU, 0.3*u.AU, 0.4*u.AU, observer=observer2, obstime=observer1.obstime)\n hgs2 = hcc2.transform_to(HeliographicStonyhurst(obstime=hcc2.obstime))\n\n assert_quantity_allclose(hgs1.lon, hgs2.lon)\n assert_quantity_allclose(hgs1.lat, hgs2.lat)\n assert_quantity_allclose(hgs1.radius, hgs2.radius)\n\n\ndef test_hgs_hcc_observer_mismatch():\n # Test whether the transformation gives the same answer regardless of what obstime the observer\n # coordinate is represented in\n observer1 = HeliographicStonyhurst(0*u.deg, 0*u.deg, 1*u.AU, obstime='2001-01-01')\n observer2 = observer1.transform_to(HeliographicStonyhurst(obstime='2001-03-31'))\n\n hgs = HeliographicStonyhurst(20*u.deg, 40*u.deg, 0.5*u.AU, obstime=observer1.obstime)\n hcc1 = hgs.transform_to(Heliocentric(observer=observer1, obstime=hgs.obstime))\n hcc2 = hgs.transform_to(Heliocentric(observer=observer2, obstime=hgs.obstime))\n\n assert_quantity_allclose(hcc1.cartesian.xyz, hcc2.cartesian.xyz)\n\n\ndef test_hgs_hcrs_sunspice():\n # Compare our HGS->HCRS transformation against SunSPICE by transforming beyond it\n # \"HEQ\" is another name for HEEQ, which is equivalent to Heliographic Stonyhurst\n # \"HAE\" is equivalent to Astropy's Heliocentric Mean Ecliptic, and defaults to J2000.0\n #\n # IDL> coord = [1.d, 0.d, 10.d]\n # IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HEQ', 'HAE', /au, /degrees\n # IDL> print, coord\n # 1.0000000 -108.65371 10.642778\n\n old = SkyCoord(0*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime='2019-06-01'))\n new = old.transform_to(HeliocentricMeanEcliptic)\n\n assert_quantity_allclose(new.lon, Longitude(-108.65371*u.deg), atol=0.1*u.arcsec, rtol=0)\n assert_quantity_allclose(new.lat, 10.642778*u.deg, atol=0.1*u.arcsec, rtol=0)\n assert_quantity_allclose(new.distance, old.radius)\n\n # Transform to HAE precessed to the mean ecliptic of date instead of J2000.0\n # IDL> coord = [1.d, 0.d, 10.d]\n # IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HEQ', 'HAE', /precess, /au, /degrees\n # IDL> print, coord\n # 1.0000000 -108.38240 10.640314\n\n new = old.transform_to(HeliocentricMeanEcliptic(equinox='2019-06-01'))\n\n assert_quantity_allclose(new.lon, Longitude(-108.38240*u.deg), atol=0.1*u.arcsec, rtol=0)\n assert_quantity_allclose(new.lat, 10.640314*u.deg, atol=0.1*u.arcsec, rtol=0)\n assert_quantity_allclose(new.distance, old.radius)\n\n\ndef test_hgs_hgc_sunspice():\n # Compare our HGS->HGC transformation against SunSPICE\n # \"HEQ\" is another name for HEEQ, which is equivalent to Heliographic Stonyhurst\n # \"Carrington\" is offset by 0.076 degrees in longitude from our Heliographic Carrington (HGC)\n # because \"Carrington\" does not include light travel time to the observer, while our\n # HGC includes the light travel time to Earth (see Seidelmann et al. 2007).\n #\n # IDL> coord = [1.d, 0.d, 10.d]\n # IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HEQ', 'Carrington', /au, /degrees\n # IDL> print, coord\n # 1.0000000 16.688242 10.000000\n\n old = SkyCoord(0*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime='2019-06-01'))\n new = old.heliographic_carrington\n\n assert_quantity_allclose(new.lon, 16.688242*u.deg + 0.076*u.deg, atol=1e-2*u.arcsec, rtol=0)\n assert_quantity_allclose(new.lat, old.lat)\n assert_quantity_allclose(new.radius, old.radius)\n\n\ndef test_hgs_hcc_sunspice():\n # Compare our HGS->HCC transformation against SunSPICE\n # \"HEQ\" is another name for HEEQ, which is equivalent to Heliographic Stonyhurst\n # \"HGRTN\" is equivalent to our Heliocentric, but with the axes permuted\n # SunSPICE, like us, assumes an Earth observer if not explicitly specified\n #\n # IDL> coord = [7d5, 8d5, 9d5]\n # IDL> convert_sunspice_coord, '2019-06-01', coord, 'HEQ', 'HGRTN'\n # Assuming Earth observation\n # IDL> print, coord\n # 688539.32 800000.00 908797.89\n\n old = SkyCoord(CartesianRepresentation([7e5, 8e5, 9e5]*u.km),\n frame=HeliographicStonyhurst(obstime='2019-06-01'))\n new = old.transform_to(Heliocentric(observer='earth'))\n\n assert_quantity_allclose(new.x, 800000.00*u.km, atol=1e-2*u.km)\n assert_quantity_allclose(new.y, 908797.89*u.km, atol=1e-2*u.km)\n assert_quantity_allclose(new.z, 688539.32*u.km, atol=1e-2*u.km)\n\n\ndef test_hpc_hgs_implicit_hcc():\n # An HPC->HGS transformation should give the same answer whether the transformation step\n # through HCC is implicit or explicit\n start = SkyCoord(0*u.arcsec, 0*u.arcsec, 0.5*u.AU,\n frame=Helioprojective(obstime='2019-06-01', observer='earth'))\n frame = HeliographicStonyhurst(obstime='2019-12-01')\n\n implicit = start.transform_to(frame)\n explicit1 = start.transform_to(Heliocentric(obstime=start.obstime, observer='earth')).\\\n transform_to(frame)\n explicit2 = start.transform_to(Heliocentric(obstime=frame.obstime, observer='earth')).\\\n transform_to(frame)\n\n assert_quantity_allclose(implicit.separation_3d(explicit1), 0*u.AU, atol=1e-10*u.AU)\n assert_quantity_allclose(implicit.separation_3d(explicit2), 0*u.AU, atol=1e-10*u.AU)\n\n\[email protected](astropy.__version__ < '3.2.0', reason=\"Not supported by Astropy <3.2\")\ndef test_velocity_hcrs_hgs():\n # Obtain the position/velocity of Earth in ICRS\n obstime = Time(['2019-01-01', '2019-04-01', '2019-07-01', '2019-10-01'])\n pos, vel = get_body_barycentric_posvel('earth', obstime)\n loc = pos.with_differentials(vel.represent_as(CartesianDifferential))\n earth = SkyCoord(loc, frame='icrs', obstime=obstime)\n\n # The velocity of Earth in HGS should be very close to zero. The velocity in the HGS Y\n # direction is slightly further away from zero because there is true latitudinal motion.\n new = earth.heliographic_stonyhurst\n assert_quantity_allclose(new.velocity.d_x, 0*u.km/u.s, atol=1e-15*u.km/u.s)\n assert_quantity_allclose(new.velocity.d_y, 0*u.km/u.s, atol=1e-14*u.km/u.s)\n assert_quantity_allclose(new.velocity.d_x, 0*u.km/u.s, atol=1e-15*u.km/u.s)\n\n # Test the loopback to ICRS\n newer = new.icrs\n assert_quantity_allclose(newer.velocity.d_x, vel.x)\n assert_quantity_allclose(newer.velocity.d_y, vel.y)\n assert_quantity_allclose(newer.velocity.d_z, vel.z)\n\n\ndef test_velocity_hgs_hgc():\n # Construct a simple HGS coordinate with zero velocity\n obstime = Time(['2019-01-01', '2019-04-01', '2019-07-01', '2019-10-01'])\n pos = CartesianRepresentation(1, 0, 0)*u.AU\n vel = CartesianDifferential(0, 0, 0)*u.km/u.s\n loc = (pos.with_differentials(vel))._apply('repeat', obstime.size)\n coord = SkyCoord(HeliographicStonyhurst(loc, obstime=obstime))\n\n # The induced velocity in HGC should be entirely longitudinal, and approximately equal to one\n # full rotation every mean synodic period (27.2753 days)\n new = coord.heliographic_carrington\n new_vel = new.data.differentials['s'].represent_as(SphericalDifferential, new.data)\n assert_quantity_allclose(new_vel.d_lon, -360*u.deg / (27.27253*u.day), rtol=1e-2)\n assert_quantity_allclose(new_vel.d_lat, 0*u.deg/u.s)\n assert_quantity_allclose(new_vel.d_distance, 0*u.km/u.s, atol=1e-7*u.km/u.s)\n\n\ndef test_hme_hee_sunspice():\n # Compare our HME->HEE transformation against SunSPICE\n # \"HAE\" is equivalent to Astropy's Heliocentric Mean Ecliptic, and defaults to J2000.0\n #\n # IDL> coord = [1.d, 0.d, 10.d]\n # IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HAE', 'HEE', /au, /degrees\n # IDL> print, coord\n # 1.0000000 110.01610 10.000300\n\n old = SkyCoord(0*u.deg, 10*u.deg, 1*u.AU, frame=HeliocentricMeanEcliptic(obstime='2019-06-01'))\n new = old.transform_to(HeliocentricEarthEcliptic)\n\n assert_quantity_allclose(new.lon, Longitude(110.01610*u.deg), atol=0.01*u.arcsec, rtol=0)\n assert_quantity_allclose(new.lat, 10.000300*u.deg, atol=0.01*u.arcsec, rtol=0)\n assert_quantity_allclose(new.distance, old.distance)\n\n # Transform from HAE precessed to the mean ecliptic of date instead of J2000.0\n # IDL> coord = [1.d, 0.d, 10.d]\n # IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HAE', 'HEE', /au, /degrees, /precess\n # IDL> print, coord\n # 1.0000000 109.74535 10.000070\n\n old = SkyCoord(0*u.deg, 10*u.deg, 1*u.AU, frame=HeliocentricMeanEcliptic(obstime='2019-06-01',\n equinox='2019-06-01'))\n new = old.transform_to(HeliocentricEarthEcliptic)\n\n assert_quantity_allclose(new.lon, Longitude(109.74535*u.deg), atol=0.05*u.arcsec, rtol=0)\n assert_quantity_allclose(new.lat, 10.000070*u.deg, atol=0.01*u.arcsec, rtol=0)\n assert_quantity_allclose(new.distance, old.distance)\n\n\ndef test_hee_hee():\n # Test HEE loopback transformation\n obstime = Time('2001-01-01')\n old = SkyCoord(90*u.deg, 10*u.deg, 1*u.AU, frame=HeliocentricEarthEcliptic(obstime=obstime))\n\n new = old.transform_to(HeliocentricEarthEcliptic)\n\n assert_quantity_allclose(new.lon, old.lon)\n assert_quantity_allclose(new.lat, old.lat)\n assert_quantity_allclose(new.distance, old.distance)\n\n new = old.transform_to(HeliocentricEarthEcliptic(obstime=obstime + 1*u.day))\n\n assert_quantity_allclose(new.lon, old.lon - 1*u.deg, atol=0.1*u.deg) # due to Earth motion\n assert_quantity_allclose(new.lat, old.lat, atol=0.5*u.arcsec)\n assert_quantity_allclose(new.distance, old.distance, rtol=1e-5)\n\n\ndef test_hee_gse_sunspice():\n # Compare our HEE->GSE transformation against SunSPICE\n #\n # IDL> coord = [0.7d, -20.d, 10.d]\n # IDL> convert_sunspice_coord, '2019-06-01', coord, 'HEE', 'GSE', /au, /degrees\n # IDL> print, coord\n # 0.45215884 32.777377 15.594639\n\n old = SkyCoord(-20*u.deg, 10*u.deg, 0.7*u.AU,\n frame=HeliocentricEarthEcliptic(obstime='2019-06-01'))\n new = old.geocentricsolarecliptic\n\n assert_quantity_allclose(new.lon, 32.777377*u.deg, atol=0.01*u.arcsec, rtol=0)\n assert_quantity_allclose(new.lat, 15.594639*u.deg, atol=0.01*u.arcsec, rtol=0)\n assert_quantity_allclose(new.distance, 0.45215884*u.AU)\n\n\ndef test_gse_gse():\n # Test GSE loopback transformation\n old = SkyCoord(90*u.deg, 10*u.deg, 0.7*u.AU,\n frame=GeocentricSolarEcliptic(obstime='2001-01-01'))\n new = old.transform_to(GeocentricSolarEcliptic)\n\n assert_quantity_allclose(new.lon, old.lon)\n assert_quantity_allclose(new.lat, old.lat)\n assert_quantity_allclose(new.distance, old.distance)\n\n\ndef test_hgs_hci_sunspice():\n # Compare our HGS->HCI transformation against SunSPICE\n # \"HEQ\" is another name for HEEQ, which is equivalent to Heliographic Stonyhurst\n #\n # IDL> coord = [1.d, 120.d, 10.d]\n # IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HEQ', 'HCI', /au, /degrees\n # IDL> print, coord\n # 1.0000000 -65.736793 10.000000\n\n old = SkyCoord(120*u.deg, 10*u.deg, 1*u.AU, frame=HeliographicStonyhurst(obstime='2019-06-01'))\n new = old.transform_to(HeliocentricInertial)\n\n assert_quantity_allclose(new.lon, -65.736793*u.deg, atol=0.5*u.arcsec, rtol=0)\n assert_quantity_allclose(new.lat, old.lat)\n assert_quantity_allclose(new.distance, old.radius)\n\n\ndef test_hci_hci():\n # Test HCI loopback transformation\n obstime = Time('2001-01-01')\n old = SkyCoord(90*u.deg, 10*u.deg, 0.7*u.AU, frame=HeliocentricInertial(obstime=obstime))\n new = old.transform_to(HeliocentricInertial)\n\n assert_quantity_allclose(new.lon, old.lon)\n assert_quantity_allclose(new.lat, old.lat)\n assert_quantity_allclose(new.distance, old.distance)\n\n new = old.transform_to(HeliocentricInertial(obstime=obstime + 1*u.day))\n\n assert_quantity_allclose(new.lon, old.lon, atol=0.1*u.deg) # due to Earth motion\n assert_quantity_allclose(new.lat, old.lat, atol=1e-3*u.deg)\n assert_quantity_allclose(new.distance, old.distance, atol=1e-5*u.AU)\n\n\ndef test_hme_gei_sunspice():\n # Compare our HME->GEI transformation against SunSPICE\n # \"HAE\" is equivalent to Astropy's Heliocentric Mean Ecliptic, and defaults to J2000.0\n #\n # IDL> coord = [1.d, 120.d, 10.d]\n # IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HAE', 'GEI', /au, /degrees\n # IDL> print, coord\n # 1.8197210 95.230617 28.830109\n\n old = SkyCoord(120*u.deg, 10*u.deg, 1*u.AU,\n frame=HeliocentricMeanEcliptic(obstime='2019-06-01'))\n new = old.transform_to(GeocentricEarthEquatorial)\n\n assert_quantity_allclose(new.lon, Longitude(95.230617*u.deg), atol=0.01*u.arcsec, rtol=0)\n assert_quantity_allclose(new.lat, 28.830109*u.deg, atol=0.05*u.arcsec, rtol=0)\n assert_quantity_allclose(new.distance, 1.8197210*u.AU)\n\n # Transform from HAE precessed to the mean ecliptic of date instead of J2000.0\n # IDL> coord = [1.d, 120.d, 10.d]\n # IDL> convert_sunspice_lonlat, '2019-06-01', coord, 'HAE', 'GEI', /au, /degrees, /precess\n # IDL> print, coord\n # 1.8217103 95.079030 28.827750\n\n old = SkyCoord(120*u.deg, 10*u.deg, 1*u.AU,\n frame=HeliocentricMeanEcliptic(obstime='2019-06-01', equinox='2019-06-01'))\n new = old.transform_to(GeocentricEarthEquatorial(equinox=_J2000))\n\n assert_quantity_allclose(new.lon, Longitude(95.079030*u.deg), atol=0.05*u.arcsec, rtol=0)\n assert_quantity_allclose(new.lat, 28.827750*u.deg, atol=0.05*u.arcsec, rtol=0)\n assert_quantity_allclose(new.distance, 1.8217103*u.AU)\n\n\ndef test_gei_gei():\n # Test GEI loopback transformation using the 2017 revision to Franz & Harper 2002\n t = Time('1996-08-28 16:46:00', scale='tt')\n gei_j2000 = CartesianRepresentation([-5.7840451, -4.1082375, 1.9146822] * (6378.14*u.km))\n gei_d = CartesianRepresentation([-5.7864918, -4.1039136, 1.9165612] * (6378.14*u.km))\n\n old = SkyCoord(gei_j2000, frame=GeocentricEarthEquatorial(obstime=t))\n new = old.transform_to(GeocentricEarthEquatorial(equinox=t, obstime=t)).cartesian\n\n assert_quantity_allclose(new.xyz, gei_d.xyz)\n\n\ndef test_no_observer():\n # Tests transformations to and from observer-based frames with no observer defined\n frames_in = [Heliocentric(0*u.km, 0*u.km, 0*u.km, observer=None),\n Heliocentric(0*u.km, 0*u.km, 0*u.km, observer=None, obstime='2001-01-01'),\n Helioprojective(0*u.deg, 0*u.deg, observer=None),\n Helioprojective(0*u.deg, 0*u.deg, observer=None, obstime='2001-01-01')]\n frames_out = frames_in + [\n HeliographicStonyhurst(0*u.deg, 0*u.deg, obstime=None),\n HeliographicStonyhurst(0*u.deg, 0*u.deg, obstime='2001-01-01'),\n Heliocentric(0*u.km, 0*u.km, 0*u.km, observer=None, obstime='2012-12-12'),\n Heliocentric(0*u.km, 0*u.km, 0*u.km, observer=\"earth\", obstime=None),\n Heliocentric(0*u.km, 0*u.km, 0*u.km, observer=\"earth\", obstime='2001-01-01'),\n Helioprojective(0*u.deg, 0*u.deg, observer=None, obstime='2012-12-12'),\n Helioprojective(0*u.deg, 0*u.deg, observer=\"earth\", obstime=None),\n Helioprojective(0*u.deg, 0*u.deg, observer=\"earth\", obstime='2001-01-01')]\n\n # Self-transformations should succeed\n for f in frames_in:\n f.transform_to(f.replicate_without_data())\n\n # All other transformations should error\n for i, f1 in enumerate(frames_in):\n for f2 in frames_out[i + 1:]:\n with pytest.raises(ConvertError):\n f1.transform_to(f2)\n with pytest.raises(ConvertError):\n f2.transform_to(f1)\n\n\ndef test_array_obstime():\n # Validate that you can transform from an array of obstimes to no obstimes,\n # or different obstimes.\n a = SkyCoord([10]*2, [10]*2, unit=u.deg,\n observer=\"earth\",\n obstime=[\"2019-01-01\", \"2019-01-02\"],\n frame=\"heliographic_carrington\")\n\n t = a.transform_to(Helioprojective)\n assert isinstance(t.frame, Helioprojective)\n\n t2 = a.transform_to(Helioprojective(obstime=[\"2019-01-03\", \"2019-01-04\"]))\n assert isinstance(t2.frame, Helioprojective)\n\n\n_frameset1 = [HeliographicStonyhurst, HeliographicCarrington, HeliocentricInertial]\n_frameset2 = [Heliocentric, Helioprojective]\n\n\[email protected](\"start_class\", _frameset1 + _frameset2)\[email protected](\"end_class\", _frameset1)\ndef test_no_obstime_on_one_end(start_class, end_class):\n start_obstime = Time(\"2001-01-01\")\n\n if hasattr(start_class, 'observer'):\n coord = start_class(CartesianRepresentation(0, 0, 0)*u.km,\n obstime=start_obstime, observer=\"earth\")\n else:\n coord = start_class(CartesianRepresentation(0, 0, 0)*u.km, obstime=start_obstime)\n\n result = coord.transform_to(end_class)\n assert result.obstime == start_obstime\n" ]
[ [ "numpy.arctan2", "numpy.all", "numpy.tan" ] ]
moeyensj/adam_home
[ "7dbe661ed9a04e9621ec4f5c9a0a9682cc37c227" ]
[ "adam/astro_utils.py" ]
[ "import numpy as np\n\nJPL_OBLIQUITY = np.deg2rad(84381.448 / 3600.0)\n\n\ndef icrf_to_jpl_ecliptic(x, y, z, vx, vy, vz):\n return _apply_x_rotation(JPL_OBLIQUITY, x, y, z, vx, vy, vz)\n\n\ndef jpl_ecliptic_to_icrf(x, y, z, vx, vy, vz):\n return _apply_x_rotation(-JPL_OBLIQUITY, x, y, z, vx, vy, vz)\n\n\ndef _apply_x_rotation(phi, x0, y0, z0, vx0, vy0, vz0):\n x = x0\n y = y0 * np.cos(phi) + z0 * np.sin(phi)\n z = -y0 * np.sin(phi) + z0 * np.cos(phi)\n vx = vx0\n vy = vy0 * np.cos(phi) + vz0 * np.sin(phi)\n vz = -vy0 * np.sin(phi) + vz0 * np.cos(phi)\n return [x, y, z, vx, vy, vz]\n" ]
[ [ "numpy.sin", "numpy.cos", "numpy.deg2rad" ] ]
amousist/cartpole
[ "7534d9504b4678a3b09a4e17466f54eaeaf23ccc" ]
[ "venv/lib/python3.6/site-packages/gym/envs/mujoco/humanoid_v3.py" ]
[ "import numpy as np\nfrom gym.envs.mujoco import mujoco_env\nfrom gym import utils\n\n\nDEFAULT_CAMERA_CONFIG = {\n 'trackbodyid': 1,\n 'distance': 4.0,\n 'lookat': np.array((0.0, 0.0, 2.0)),\n 'elevation': -20.0,\n}\n\n\ndef mass_center(model, sim):\n mass = np.expand_dims(model.body_mass, axis=1)\n xpos = sim.data.xipos\n return (np.sum(mass * xpos, axis=0) / np.sum(mass))[0:2].copy()\n\n\nclass HumanoidEnv(mujoco_env.MujocoEnv, utils.EzPickle):\n def __init__(self,\n xml_file='humanoid.xml',\n forward_reward_weight=1.25,\n ctrl_cost_weight=0.1,\n contact_cost_weight=5e-7,\n contact_cost_range=(-np.inf, 10.0),\n healthy_reward=5.0,\n terminate_when_unhealthy=True,\n healthy_z_range=(1.0, 2.0),\n reset_noise_scale=1e-2,\n exclude_current_positions_from_observation=True,\n rgb_rendering_tracking=True):\n utils.EzPickle.__init__(**locals())\n\n self._forward_reward_weight = forward_reward_weight\n self._ctrl_cost_weight = ctrl_cost_weight\n self._contact_cost_weight = contact_cost_weight\n self._contact_cost_range = contact_cost_range\n self._healthy_reward = healthy_reward\n self._terminate_when_unhealthy = terminate_when_unhealthy\n self._healthy_z_range = healthy_z_range\n\n self._reset_noise_scale = reset_noise_scale\n\n self._exclude_current_positions_from_observation = (\n exclude_current_positions_from_observation)\n\n mujoco_env.MujocoEnv.__init__(self, xml_file, 5, rgb_rendering_tracking=rgb_rendering_tracking)\n\n @property\n def healthy_reward(self):\n return float(\n self.is_healthy\n or self._terminate_when_unhealthy\n ) * self._healthy_reward\n\n def control_cost(self, action):\n control_cost = self._ctrl_cost_weight * np.sum(\n np.square(self.sim.data.ctrl))\n return control_cost\n\n @property\n def contact_cost(self):\n contact_forces = self.sim.data.cfrc_ext\n contact_cost = self._contact_cost_weight * np.sum(\n np.square(contact_forces))\n min_cost, max_cost = self._contact_cost_range\n contact_cost = np.clip(contact_cost, min_cost, max_cost)\n return contact_cost\n\n @property\n def is_healthy(self):\n min_z, max_z = self._healthy_z_range\n is_healthy = min_z < self.sim.data.qpos[2] < max_z\n\n return is_healthy\n\n @property\n def done(self):\n done = ((not self.is_healthy)\n if self._terminate_when_unhealthy\n else False)\n return done\n\n def _get_obs(self):\n position = self.sim.data.qpos.flat.copy()\n velocity = self.sim.data.qvel.flat.copy()\n\n com_inertia = self.sim.data.cinert.flat.copy()\n com_velocity = self.sim.data.cvel.flat.copy()\n\n actuator_forces = self.sim.data.qfrc_actuator.flat.copy()\n external_contact_forces = self.sim.data.cfrc_ext.flat.copy()\n\n if self._exclude_current_positions_from_observation:\n position = position[2:]\n\n return np.concatenate((\n position,\n velocity,\n com_inertia,\n com_velocity,\n actuator_forces,\n external_contact_forces,\n ))\n\n def step(self, action):\n xy_position_before = mass_center(self.model, self.sim)\n self.do_simulation(action, self.frame_skip)\n xy_position_after = mass_center(self.model, self.sim)\n\n xy_velocity = (xy_position_after - xy_position_before) / self.dt\n x_velocity, y_velocity = xy_velocity\n\n ctrl_cost = self.control_cost(action)\n contact_cost = self.contact_cost\n\n forward_reward = self._forward_reward_weight * x_velocity\n healthy_reward = self.healthy_reward\n\n rewards = forward_reward + healthy_reward\n costs = ctrl_cost + contact_cost\n\n observation = self._get_obs()\n reward = rewards - costs\n done = self.done\n info = {\n 'reward_linvel': forward_reward,\n 'reward_quadctrl': -ctrl_cost,\n 'reward_alive': healthy_reward,\n 'reward_impact': -contact_cost,\n\n 'x_position': xy_position_after[0],\n 'y_position': xy_position_after[1],\n 'distance_from_origin': np.linalg.norm(xy_position_after, ord=2),\n\n 'x_velocity': x_velocity,\n 'y_velocity': y_velocity,\n 'forward_reward': forward_reward,\n }\n\n return observation, reward, done, info\n\n def reset_model(self):\n noise_low = -self._reset_noise_scale\n noise_high = self._reset_noise_scale\n\n qpos = self.init_qpos + self.np_random.uniform(\n low=noise_low, high=noise_high, size=self.model.nq)\n qvel = self.init_qvel + self.np_random.uniform(\n low=noise_low, high=noise_high, size=self.model.nv)\n self.set_state(qpos, qvel)\n\n observation = self._get_obs()\n return observation\n\n def viewer_setup(self):\n for key, value in DEFAULT_CAMERA_CONFIG.items():\n if isinstance(value, np.ndarray):\n getattr(self.viewer.cam, key)[:] = value\n else:\n setattr(self.viewer.cam, key, value)\n" ]
[ [ "numpy.sum", "numpy.expand_dims", "numpy.clip", "numpy.array", "numpy.concatenate", "numpy.square", "numpy.linalg.norm" ] ]
kun-woo-park/MNIST-Alphabet-Superposition-CNN-DACON
[ "e5c50f6f28ae9cded2a65425f977e8f703b6fd89" ]
[ "deep_learning_modules.py" ]
[ "import os\nimport time\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\n\n\nclass CustomDataset(Dataset): # custom dataset\n def __init__(self, x_dat, y_dat):\n x = x_dat\n y = y_dat\n self.len = x.shape[0]\n y = y.astype('int')\n x = x.astype('float32')\n self.x_data = torch.tensor(x)\n self.y_data = torch.tensor(y)\n\n def __getitem__(self, index):\n return self.x_data[index], self.y_data[index]\n\n def __len__(self):\n return self.len\n\n\nclass Model(torch.nn.Module): # custom model\n def __init__(self, batch_size, num_gpus):\n super(Model, self).__init__()\n self.batch_size = batch_size\n self.num_gpus = num_gpus\n self.layer_1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3,\n stride=1, padding=2)\n self.act_1 = nn.ReLU()\n self.conv2_bn1 = nn.BatchNorm2d(64)\n\n self.layer_2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3,\n stride=1, padding=2)\n self.act_2 = nn.ReLU()\n self.conv2_bn2 = nn.BatchNorm2d(64)\n\n self.layer_3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3,\n stride=1, padding=2)\n self.act_3 = nn.ReLU()\n self.conv2_bn3 = nn.BatchNorm2d(64)\n\n self.max_1 = nn.MaxPool2d(2, 2)\n\n self.layer_4 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3,\n stride=1, padding=2)\n self.act_4 = nn.ReLU()\n self.conv2_bn4 = nn.BatchNorm2d(128)\n\n self.layer_5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3,\n stride=1, padding=2)\n self.act_5 = nn.ReLU()\n self.conv2_bn5 = nn.BatchNorm2d(128)\n\n self.layer_6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3,\n stride=1, padding=2)\n self.act_6 = nn.ReLU()\n self.conv2_bn6 = nn.BatchNorm2d(128)\n self.max_2 = nn.MaxPool2d(2, 2)\n\n self.layer_7 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3,\n stride=1, padding=2)\n self.act_7 = nn.ReLU()\n self.conv2_bn7 = nn.BatchNorm2d(256)\n\n self.layer_8 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3,\n stride=1, padding=2)\n self.act_8 = nn.ReLU()\n self.conv2_bn8 = nn.BatchNorm2d(256)\n\n self.layer_9 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3,\n stride=1, padding=2)\n self.act_9 = nn.ReLU()\n self.conv2_bn9 = nn.BatchNorm2d(256)\n\n self.max_3 = nn.MaxPool2d(2, 2)\n\n self.layer_10 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3,\n stride=1, padding=2)\n self.act_10 = nn.ReLU()\n self.conv2_bn10 = nn.BatchNorm2d(512)\n\n self.layer_11 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3,\n stride=1, padding=2)\n self.act_11 = nn.ReLU()\n self.conv2_bn11 = nn.BatchNorm2d(512)\n\n self.layer_12 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3,\n stride=1, padding=2)\n self.act_12 = nn.ReLU()\n self.conv2_bn12 = nn.BatchNorm2d(512)\n\n self.max_4 = nn.MaxPool2d(2, 2)\n\n self.layer_13 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3,\n stride=1, padding=2)\n self.act_13 = nn.ReLU()\n self.conv2_bn13 = nn.BatchNorm2d(1024)\n\n self.layer_14 = nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=3,\n stride=1, padding=2)\n self.act_14 = nn.ReLU()\n self.conv2_bn14 = nn.BatchNorm2d(1024)\n\n self.max_5 = nn.MaxPool2d(2, 2)\n\n self.layer_15 = nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=1,\n stride=1)\n self.act_15 = nn.ReLU()\n self.conv2_bn15 = nn.BatchNorm2d(1024)\n\n self.layer_16 = nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=3,\n stride=1, padding=2)\n self.act_16 = nn.ReLU()\n self.conv2_bn16 = nn.BatchNorm2d(1024)\n\n self.layer_17 = nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=1,\n stride=1)\n self.act_17 = nn.ReLU()\n self.conv2_bn17 = nn.BatchNorm2d(1024)\n\n self.fc_layer_1 = nn.Linear(49*1024, 1000)\n self.act_18 = nn.ReLU()\n\n self.bnm1 = nn.BatchNorm1d(1000)\n\n self.fc_layer_2 = nn.Linear(1000, 1000)\n self.act_19 = nn.ReLU()\n\n self.bnm2 = nn.BatchNorm1d(1000)\n\n self.fc_layer_3 = nn.Linear(1000, 100)\n self.act_20 = nn.ReLU()\n\n self.bnm3 = nn.BatchNorm1d(100)\n\n self.fc_layer_4 = nn.Linear(100, 10)\n self.act_21 = nn.ReLU()\n\n def forward(self, x):\n x = x.view(self.batch_size//self.num_gpus, 1, 28, 28)\n out = self.layer_1(x)\n out = self.act_1(out)\n for module in list(self.modules())[2:-11]:\n out = module(out)\n out = out.view(self.batch_size//self.num_gpus, -1)\n for module in list(self.modules())[-11:]:\n out = module(out)\n return out\n\n\ndef train_model(model, train_loader, val_loader, batch_size, total_epoch, model_char,\n patience, start_early_stop_check, saving_start_epoch):\n if torch.cuda.is_available():\n model.cuda()\n criterion = nn.KLDivLoss(reduction='batchmean')\n optimizer = torch.optim.Adam(model.parameters())\n\n # set loss and optimizer\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), weight_decay=0.001)\n model_name = \"\"\n trn_loss_list = []\n val_loss_list = []\n\n for epoch in range(total_epoch):\n trn_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n inputs, labels = data\n if torch.cuda.is_available():\n inputs = inputs.cuda()\n labels = labels.cuda()\n # grad init\n optimizer.zero_grad()\n # forward propagation\n output = model(inputs)\n # calculate loss\n loss = criterion(output, labels)\n # back propagation\n loss.backward()\n # weight update\n optimizer.step()\n\n # trn_loss summary\n trn_loss += loss.item()\n # validation\n with torch.no_grad():\n val_loss = 0.0\n cor_match = 0\n for j, val in enumerate(val_loader):\n val_x, val_label = val\n if torch.cuda.is_available():\n val_x = val_x.cuda()\n val_label = val_label.cuda()\n val_output = model(val_x)\n v_loss = criterion(val_output, val_label)\n val_loss += v_loss\n _, predicted = torch.max(val_output, 1)\n cor_match += np.count_nonzero(predicted.cpu().detach()\n == val_label.cpu().detach())\n\n trn_loss_list.append(trn_loss/len(train_loader))\n val_loss_list.append(val_loss/len(val_loader))\n val_acc = cor_match/(len(val_loader)*batch_size)\n now = time.localtime()\n print(\"%04d/%02d/%02d %02d:%02d:%02d\" % (now.tm_year, now.tm_mon,\n now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec))\n\n print(\"epoch: {}/{} | trn loss: {:.4f} | val loss: {:.4f} | val accuracy: {:.4f}% \\n\".format(\n epoch+1, total_epoch, trn_loss /\n len(train_loader), val_loss / len(val_loader), val_acc*100\n ))\n # early stop\n if epoch+1 > 2:\n if val_loss_list[-1] > val_loss_list[-2]:\n start_early_stop_check = 1\n else:\n val_loss_min = val_loss_list[-1]\n\n if start_early_stop_check:\n early_stop_temp = val_loss_list[-patience:]\n if all(early_stop_temp[i] < early_stop_temp[i+1] for i in range(len(early_stop_temp)-1)):\n print(\"Early stop!\")\n break\n # save the minimum loss model\n if epoch+1 > saving_start_epoch:\n if val_loss_list[-1] < val_loss_min:\n if os.path.isfile(model_name):\n os.remove(model_name)\n val_loss_min = val_loss_list[-1]\n model_name = \"Custom_model_\"+model_char + \\\n \"_{:.3f}\".format(val_loss_min)\n torch.save(model, model_name)\n print(\"Model replaced and saved as \", model_name)\n\n torch.save(model, \"Custom_model_fin\")\n print(\"model saved complete\")\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.BatchNorm1d", "torch.save", "torch.tensor", "torch.no_grad", "torch.nn.CrossEntropyLoss", "torch.nn.KLDivLoss", "torch.cuda.is_available", "torch.nn.Conv2d", "torch.max", "torch.nn.ReLU" ] ]
AnastasiiaNovikova/sentiment-discovery
[ "eaae55921038d674e2f16fbd0bfd2e63194a9545" ]
[ "fp16/fp16.py" ]
[ "import torch\r\nfrom torch import nn\r\nfrom torch.autograd import Variable\r\nfrom torch.nn.parameter import Parameter\r\nfrom torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors\r\n\r\nfrom .loss_scaler import DynamicLossScaler, LossScaler\r\n\r\nFLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)\r\nHALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)\r\n\r\ndef conversion_helper(val, conversion):\r\n \"\"\"Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure.\"\"\"\r\n if not isinstance(val, (tuple, list)):\r\n return conversion(val)\r\n rtn = [conversion_helper(v, conversion) for v in val]\r\n if isinstance(val, tuple):\r\n rtn = tuple(rtn)\r\n return rtn\r\n\r\ndef fp32_to_fp16(val):\r\n \"\"\"Convert fp32 `val` to fp16\"\"\"\r\n def half_conversion(val):\r\n val_typecheck = val\r\n if isinstance(val_typecheck, (Parameter, Variable)):\r\n val_typecheck = val.data\r\n if isinstance(val_typecheck, FLOAT_TYPES):\r\n val = val.half()\r\n return val\r\n return conversion_helper(val, half_conversion)\r\n\r\ndef fp16_to_fp32(val):\r\n \"\"\"Convert fp16 `val` to fp32\"\"\"\r\n def float_conversion(val):\r\n val_typecheck = val\r\n if isinstance(val_typecheck, (Parameter, Variable)):\r\n val_typecheck = val.data\r\n if isinstance(val_typecheck, HALF_TYPES):\r\n val = val.float()\r\n return val\r\n return conversion_helper(val, float_conversion)\r\n\r\nclass FP16_Module(nn.Module):\r\n def __init__(self, module):\r\n super(FP16_Module, self).__init__()\r\n self.add_module('module', module.half())\r\n\r\n def forward(self, *inputs, **kwargs):\r\n return fp16_to_fp32(self.module(*(fp32_to_fp16(inputs)), **kwargs))\r\n\r\n def load_state_dict(self, state_dict, strict=True):\r\n self.module.load_state_dict(state_dict, strict=strict)\r\n\r\n def state_dict(self, destination=None, prefix='', keep_vars=False):\r\n return self.module.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)\r\n\r\nclass FP16_Optimizer(object):\r\n \"\"\"\r\n FP16_Optimizer is designed to wrap an existing PyTorch optimizer, \r\n and enable an fp16 model to be trained using a master copy of fp32 weights.\r\n\r\n Args:\r\n optimizer (torch.optim.optimizer): Existing optimizer containing initialized fp16 parameters. Internally, FP16_Optimizer replaces the passed optimizer's fp16 parameters with new fp32 parameters copied from the original ones. FP16_Optimizer also stores references to the original fp16 parameters, and updates these fp16 parameters from the master fp32 copy after each step. \r\n static_loss_scale (float, optional, default=1.0): Loss scale used internally to scale fp16 gradients computed by the model. Scaled gradients will be copied to fp32, then downscaled before being applied to the fp32 master params, so static_loss_scale should not affect learning rate.\r\n dynamic_loss_scale (bool, optional, default=False): Use dynamic loss scaling. If True, this will override any static_loss_scale option.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, optimizer, static_loss_scale=1.0, dynamic_loss_scale=False):\r\n if not torch.cuda.is_available:\r\n raise SystemError('Cannot use fp16 without CUDA')\r\n\r\n self.fp16_param_groups = []\r\n self.fp32_param_groups = []\r\n for param_group in optimizer.param_groups:\r\n fp16_params_this_group = [param for param in param_group['params'] if param.requires_grad]\r\n \r\n fp32_params_this_group = _flatten_dense_tensors(\r\n [param.detach().data.clone().float() for param in fp16_params_this_group])\r\n fp32_params_this_group = Variable(fp32_params_this_group, requires_grad = True)\r\n fp32_params_this_group.grad = fp32_params_this_group.new(*fp32_params_this_group.size())\r\n \r\n param_group['params'] = [fp32_params_this_group]\r\n\r\n self.fp16_param_groups.append(fp16_params_this_group)\r\n self.fp32_param_groups.append(fp32_params_this_group)\r\n\r\n self.optimizer = optimizer.__class__(optimizer.param_groups)\r\n\r\n # self.optimizer.load_state_dict(optimizer.state_dict())\r\n\r\n self.param_groups = self.optimizer.param_groups\r\n\r\n if dynamic_loss_scale:\r\n self.dynamic_loss_scale = True\r\n self.loss_scaler = DynamicLossScaler()\r\n else:\r\n self.dynamic_loss_scale = False\r\n self.loss_scaler = LossScaler(static_loss_scale)\r\n\r\n self.overflow = False\r\n self.first_closure_call_this_step = True\r\n\r\n def zero_grad(self):\r\n \"\"\"\r\n Zero fp32 and fp16 parameter grads.\r\n \"\"\"\r\n self.optimizer.zero_grad()\r\n for fp16_group in self.fp16_param_groups:\r\n for param in fp16_group:\r\n if param.grad is not None:\r\n param.grad.detach_() # This does appear in torch.optim.optimizer.zero_grad(), \r\n # but I'm not sure why it's needed.\r\n param.grad.zero_()\r\n\r\n def _check_overflow(self):\r\n fp16_params = [] \r\n for fp16_group in self.fp16_param_groups:\r\n for param in fp16_group:\r\n fp16_params.append(param)\r\n self.overflow = self.loss_scaler.has_overflow(fp16_params)\r\n\r\n def _update_scale(self, has_overflow=False):\r\n self.loss_scaler.update_scale(has_overflow)\r\n\r\n def _copy_grads_fp16_to_fp32(self):\r\n for fp32_group, fp16_group in zip(self.fp32_param_groups, self.fp16_param_groups):\r\n # This might incur one more deep copy than is necessary.\r\n fp32_group.grad.data.copy_(\r\n _flatten_dense_tensors([fp16_param.grad.data for fp16_param in fp16_group]))\r\n\r\n def _downscale_fp32(self):\r\n for param_group in self.optimizer.param_groups:\r\n param_group['params'][0].grad.data.mul_(1./self.loss_scale)\r\n\r\n def clip_fp32_grads(self, clip=-1):\r\n if clip > 0:\r\n torch.nn.utils.clip_grad_norm(self.fp32_param_groups, clip)\r\n\r\n def _copy_params_fp32_to_fp16(self):\r\n for fp16_group, fp32_group in zip(self.fp16_param_groups, self.fp32_param_groups):\r\n for fp16_param, fp32_param in zip(fp16_group, \r\n _unflatten_dense_tensors(fp32_group, fp16_group)):\r\n fp16_param.data.copy_(fp32_param.data)\r\n\r\n def state_dict(self):\r\n \"\"\"\r\n Returns a dict containing the current state of this FP16_Optimizer instance.\r\n This dict contains attributes of FP16_Optimizer, as well as the state_dict\r\n of the contained Pytorch optimizer.\r\n\r\n Untested.\r\n \"\"\"\r\n state_dict = {}\r\n state_dict['loss_scaler'] = self.loss_scaler\r\n state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale\r\n state_dict['overflow'] = self.overflow\r\n state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step\r\n state_dict['optimizer_state_dict'] = self.optimizer.state_dict()\r\n return state_dict\r\n\r\n def load_state_dict(self, state_dict):\r\n \"\"\"\r\n Loads a state_dict created by an earlier call to state_dict. \r\n\r\n Untested.\r\n \"\"\"\r\n self.loss_scaler = state_dict['loss_scaler']\r\n self.dynamic_loss_scale = state_dict['dynamic_loss_scale']\r\n self.overflow = state_dict['overflow']\r\n self.first_closure_call_this_step = state_dict['first_closure_call_this_step']\r\n self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])\r\n\r\n def step(self, closure=None): # could add clip option.\r\n \"\"\"\r\n If no closure is supplied, step should be called after fp16_optimizer_obj.backward(loss).\r\n step updates the fp32 master copy of parameters using the optimizer supplied to\r\n FP16_Optimizer's constructor, then copies the updated fp32 params into the fp16 params\r\n originally referenced by Fp16_Optimizer's constructor, so the user may immediately run\r\n another forward pass using their model.\r\n\r\n If a closure is supplied, step may be called without a prior call to self.backward(loss).\r\n However, the user should take care that any loss.backward() call within the closure\r\n has been replaced by fp16_optimizer_obj.backward(loss).\r\n\r\n Args:\r\n closure (optional): Closure that will be supplied to the underlying optimizer originally passed to FP16_Optimizer's constructor. closure should call zero_grad on the FP16_Optimizer object, compute the loss, call .backward(loss), and return the loss.\r\n\r\n Closure example::\r\n\r\n # optimizer is assumed to be an FP16_Optimizer object, previously constructed from an \r\n # existing pytorch optimizer.\r\n for input, target in dataset:\r\n def closure():\r\n optimizer.zero_grad()\r\n output = model(input)\r\n loss = loss_fn(output, target)\r\n optimizer.backward(loss)\r\n return loss\r\n optimizer.step(closure)\r\n\r\n .. note::\r\n The only changes that need to be made compared to \r\n `ordinary optimizer closures`_ are that \"optimizer\" itself should be an instance of \r\n FP16_Optimizer, and that the call to loss.backward should be replaced by \r\n optimizer.backward(loss). \r\n\r\n .. warning::\r\n Currently, calling step with a closure is not compatible with dynamic loss scaling.\r\n\r\n .. _`ordinary optimizer closures`:\r\n http://pytorch.org/docs/master/optim.html#optimizer-step-closure\r\n \"\"\"\r\n if closure is not None and isinstance(self.loss_scaler, DynamicLossScaler):\r\n raise TypeError(\"Using step with a closure is currently not \"\r\n \"compatible with dynamic loss scaling.\")\r\n\r\n scale = self.loss_scaler.loss_scale\r\n self._update_scale(self.overflow)\r\n\r\n if self.overflow:\r\n print(\"OVERFLOW! Skipping step. Attempted loss scale: {}\".format(scale))\r\n return\r\n \r\n if closure is not None:\r\n self._step_with_closure(closure)\r\n else:\r\n self.optimizer.step()\r\n\r\n self._copy_params_fp32_to_fp16()\r\n\r\n return\r\n\r\n def _step_with_closure(self, closure):\r\n def wrapped_closure():\r\n if self.first_closure_call_this_step:\r\n \"\"\"\r\n We expect that the fp16 params are initially fresh on entering self.step(),\r\n so _copy_params_fp32_to_fp16() is unnecessary the first time wrapped_closure()\r\n is called within self.optimizer.step().\r\n \"\"\"\r\n self.first_closure_call_this_step = False\r\n else:\r\n \"\"\" \r\n If self.optimizer.step() internally calls wrapped_closure more than once,\r\n it may update the fp32 params after each call. However, self.optimizer \r\n doesn't know about the fp16 params at all. If the fp32 params get updated,\r\n we can't rely on self.optimizer to refresh the fp16 params. We need\r\n to handle that manually:\r\n \"\"\"\r\n self._copy_params_fp32_to_fp16()\r\n \r\n \"\"\"\r\n Our API expects the user to give us ownership of the backward() call by\r\n replacing all calls to loss.backward() with optimizer.backward(loss).\r\n This requirement holds whether or not the call to backward() is made within\r\n a closure.\r\n If the user is properly calling optimizer.backward(loss) within \"closure,\" \r\n calling closure() here will give the fp32 master params fresh gradients\r\n for the optimizer to play with, \r\n so all wrapped_closure needs to do is call closure() and return the loss.\r\n \"\"\"\r\n temp_loss = closure() \r\n return temp_loss\r\n\r\n self.optimizer.step(wrapped_closure)\r\n\r\n self.first_closure_call_this_step = True\r\n\r\n def backward(self, loss, update_fp32_grads=True):\r\n \"\"\" \r\n fp16_optimizer_obj.backward performs the following conceptual operations:\r\n\r\n fp32_loss = loss.float() (see first Note below)\r\n\r\n scaled_loss = fp32_loss*loss_scale\r\n\r\n scaled_loss.backward(), which accumulates scaled gradients into the .grad attributes of the\r\n fp16 model's leaves.\r\n\r\n fp16 grads are then copied to the stored fp32 params' .grad attributes (see second Note).\r\n\r\n Finally, fp32 grads are divided by loss_scale.\r\n\r\n In this way, after fp16_optimizer_obj.backward, the fp32 parameters have fresh gradients,\r\n and fp16_optimizer_obj.step may be called.\r\n\r\n .. note::\r\n Converting the loss to fp32 before applying the loss scale provides some\r\n additional safety against overflow if the user has supplied an fp16 value. \r\n However, for maximum overflow safety, the user should\r\n compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to \r\n fp16_optimizer_obj.backward.\r\n\r\n .. note::\r\n The gradients found in an fp16 model's leaves after a call to \r\n fp16_optimizer_obj.backward should not be regarded as valid in general, \r\n because it's possible \r\n they have been scaled (and in the case of dynamic loss scaling, \r\n the scale factor may silently change over time). \r\n If the user wants to inspect gradients after a call to fp16_optimizer_obj.backward, \r\n he/she should query the .grad attribute of FP16_Optimizer's stored fp32 parameters.\r\n\r\n Args:\r\n loss: The loss output by the user's model. loss may be either float or half (but see first Note above).\r\n update_fp32_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay this copy, which is useful to eliminate redundant fp16->fp32 grad copies if fp16_optimizer_obj.backward is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling fp16_optimizer_obj.update_fp32_grads before calling fp16_optimizer_obj.step.\r\n\r\n Example::\r\n\r\n # Ordinary operation:\r\n optimizer.backward(loss)\r\n\r\n # Naive operation with multiple losses (technically valid, but less efficient):\r\n # fp32 grads will be correct after the second call, but \r\n # the first call incurs an unnecessary fp16->fp32 grad copy.\r\n optimizer.backward(loss1)\r\n optimizer.backward(loss2)\r\n\r\n # More efficient way to handle multiple losses:\r\n # The fp16->fp32 grad copy is delayed until fp16 grads from all \r\n # losses have been accumulated.\r\n optimizer.backward(loss1, update_fp32_grads=False)\r\n optimizer.backward(loss2, update_fp32_grads=False)\r\n optimizer.update_fp32_grads()\r\n \"\"\" \r\n self.loss_scaler.backward(loss.float())\r\n if update_fp32_grads:\r\n self.update_fp32_grads()\r\n\r\n def update_fp32_grads(self):\r\n \"\"\"\r\n Copy the .grad attribute from stored references to fp16 parameters to \r\n the .grad attribute of the master fp32 parameters that are directly \r\n updated by the optimizer. :attr:`update_fp32_grads` only needs to be called if\r\n fp16_optimizer_obj.backward was called with update_fp32_grads=False.\r\n \"\"\"\r\n if self.dynamic_loss_scale:\r\n self._check_overflow()\r\n if self.overflow: return\r\n self._copy_grads_fp16_to_fp32()\r\n self._downscale_fp32()\r\n\r\n @property\r\n def loss_scale(self):\r\n return self.loss_scaler.loss_scale\r\n" ]
[ [ "torch._utils._unflatten_dense_tensors", "torch._utils._flatten_dense_tensors", "torch.autograd.Variable", "torch.nn.utils.clip_grad_norm" ] ]
ZhuofanXie/Copulas
[ "3210fc141c741185b781686cb69e00a96972d960" ]
[ "tests/unit/univariate/test_base.py" ]
[ "from unittest import TestCase\n\nimport numpy as np\n\nfrom copulas.univariate.base import BoundedType, ParametricType, Univariate\nfrom copulas.univariate.beta import BetaUnivariate\nfrom copulas.univariate.gamma import GammaUnivariate\nfrom copulas.univariate.gaussian import GaussianUnivariate\nfrom copulas.univariate.gaussian_kde import GaussianKDE\nfrom copulas.univariate.log_laplace import LogLaplace\nfrom copulas.univariate.student_t import StudentTUnivariate\nfrom copulas.univariate.truncated_gaussian import TruncatedGaussian\nfrom copulas.univariate.uniform import UniformUnivariate\nfrom tests import compare_nested_iterables\n\n\nclass TestUnivariate(TestCase):\n\n def test__select_candidates(self):\n # Run\n candidates = Univariate._select_candidates()\n\n # Assert\n assert set(candidates) == {\n GaussianKDE,\n GaussianUnivariate,\n TruncatedGaussian,\n BetaUnivariate,\n GammaUnivariate,\n StudentTUnivariate,\n UniformUnivariate,\n LogLaplace\n }\n\n def test__select_candidates_parametric(self):\n # Run\n candidates = Univariate._select_candidates(parametric=ParametricType.PARAMETRIC)\n\n # Assert\n assert set(candidates) == {\n GaussianUnivariate,\n TruncatedGaussian,\n BetaUnivariate,\n GammaUnivariate,\n StudentTUnivariate,\n UniformUnivariate,\n LogLaplace\n }\n\n def test__select_candidates_non_parametric(self):\n # Run\n candidates = Univariate._select_candidates(parametric=ParametricType.NON_PARAMETRIC)\n\n # Assert\n assert candidates == [GaussianKDE]\n\n def test__select_candidates_bounded(self):\n # Run\n candidates = Univariate._select_candidates(bounded=BoundedType.BOUNDED)\n\n # Assert\n assert set(candidates) == {\n TruncatedGaussian,\n BetaUnivariate,\n UniformUnivariate\n }\n\n def test__select_candidates_unbounded(self):\n # Run\n candidates = Univariate._select_candidates(bounded=BoundedType.UNBOUNDED)\n\n # Assert\n assert set(candidates) == {\n GaussianKDE,\n GaussianUnivariate,\n StudentTUnivariate\n }\n\n def test__select_candidates_semibounded(self):\n # Run\n candidates = Univariate._select_candidates(bounded=BoundedType.SEMI_BOUNDED)\n\n # Assert\n assert set(candidates) == {\n GammaUnivariate,\n LogLaplace\n }\n\n def test_fit_constant(self):\n \"\"\"if constant values, replace methods.\"\"\"\n # Setup\n distribution = Univariate()\n\n # Run\n distribution.fit(np.array([1, 1, 1, 1, 1]))\n\n # Assert\n assert distribution.fitted\n assert distribution._instance._is_constant()\n\n def test_fit_not_constant(self):\n \"\"\"if constant values, replace methods.\"\"\"\n # Setup\n distribution = Univariate()\n\n # Run\n distribution.fit(np.array([1, 2, 3, 4, 1]))\n\n # Assert\n assert distribution.fitted\n assert not distribution._instance._is_constant()\n\n def test_check_constant_value(self):\n \"\"\"check_constant_value return True if the array is constant.\"\"\"\n # Setup\n X = np.array([1, 1, 1, 1])\n\n # Run\n uni = Univariate()\n constant = uni._check_constant_value(X)\n\n # Check\n assert constant\n\n def test_check_constant_value_non_constant(self):\n \"\"\"_check_constant_value returns False if the array is not constant.\"\"\"\n # Setup\n X = np.array([1, 2, 3, 4])\n\n # Run\n uni = Univariate()\n constant = uni._check_constant_value(X)\n\n # Check\n assert not constant\n\n def test__constant_sample(self):\n \"\"\"_constant_sample returns a constant array of num_samples length.\"\"\"\n # Setup\n instance = Univariate()\n instance._constant_value = 15\n\n expected_result = np.array([15, 15, 15, 15, 15])\n\n # Run\n result = instance._constant_sample(5)\n\n # Check\n compare_nested_iterables(result, expected_result)\n\n def test__constant_cumulative_distribution(self):\n \"\"\"constant_cumulative_distribution returns only 0 and 1.\"\"\"\n # Setup\n instance = Univariate()\n instance._constant_value = 3\n\n X = np.array([1, 2, 3, 4, 5])\n expected_result = np.array([0, 0, 1, 1, 1])\n\n # Run\n result = instance._constant_cumulative_distribution(X)\n\n # Check\n compare_nested_iterables(result, expected_result)\n\n def test__constant_probability_density(self):\n \"\"\"constant_probability_density only is 1 in self.constant_value.\"\"\"\n # Setup\n instance = Univariate()\n instance._constant_value = 3\n\n X = np.array([1, 2, 3, 4, 5])\n expected_result = np.array([0, 0, 1, 0, 0])\n\n # Run\n result = instance._constant_probability_density(X)\n\n # Check\n compare_nested_iterables(result, expected_result)\n\n def test__constant_percent_point(self):\n \"\"\"constant_percent_point only is self.constant_value in non-zero probabilities.\"\"\"\n # Setup\n instance = Univariate()\n instance._constant_value = 3\n\n X = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])\n expected_result = np.array([3, 3, 3, 3, 3, 3])\n\n # Run\n result = instance._constant_percent_point(X)\n\n # Check\n compare_nested_iterables(result, expected_result)\n" ]
[ [ "numpy.array" ] ]
Sand3r-/Paddle
[ "1217a521554d63caa1381b8716910d0268dfc22d" ]
[ "python/paddle/fluid/tests/unittests/test_imperative_deepcf.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy as np\nimport random\nimport os\nimport sys\n\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.core as core\nfrom test_imperative_base import new_program_scope\nfrom paddle.fluid.dygraph.base import to_variable\nfrom paddle.fluid.dygraph import Linear\n\n# Can use Amusic dataset as the DeepCF describes.\nDATA_PATH = os.environ.get('DATA_PATH', '')\n\nBATCH_SIZE = int(os.environ.get('BATCH_SIZE', 128))\nNUM_BATCHES = int(os.environ.get('NUM_BATCHES', 5))\nNUM_EPOCHES = int(os.environ.get('NUM_EPOCHES', 1))\n\n\nclass DMF(fluid.Layer):\n def __init__(self):\n super(DMF, self).__init__()\n self._user_latent = Linear(1000, 256)\n self._item_latent = Linear(100, 256)\n\n self._user_layers = []\n self._item_layers = []\n self._hid_sizes = [128, 64]\n for i in range(len(self._hid_sizes)):\n self._user_layers.append(\n self.add_sublayer(\n 'user_layer_%d' % i,\n Linear(\n 256 if i == 0 else self._hid_sizes[i - 1],\n self._hid_sizes[i],\n act='relu')))\n self._item_layers.append(\n self.add_sublayer(\n 'item_layer_%d' % i,\n Linear(\n 256 if i == 0 else self._hid_sizes[i - 1],\n self._hid_sizes[i],\n act='relu')))\n\n def forward(self, users, items):\n users = self._user_latent(users)\n items = self._item_latent(items)\n\n for ul, il in zip(self._user_layers, self._item_layers):\n users = ul(users)\n items = il(items)\n return fluid.layers.elementwise_mul(users, items)\n\n\nclass MLP(fluid.Layer):\n def __init__(self):\n super(MLP, self).__init__()\n self._user_latent = Linear(1000, 256)\n self._item_latent = Linear(100, 256)\n self._match_layers = []\n self._hid_sizes = [128, 64]\n for i in range(len(self._hid_sizes)):\n self._match_layers.append(\n self.add_sublayer(\n 'match_layer_%d' % i,\n Linear(\n 256 * 2 if i == 0 else self._hid_sizes[i - 1],\n self._hid_sizes[i],\n act='relu')))\n\n def forward(self, users, items):\n users = self._user_latent(users)\n items = self._item_latent(items)\n match_vec = fluid.layers.concat(\n [users, items], axis=len(users.shape) - 1)\n for l in self._match_layers:\n match_vec = l(match_vec)\n return match_vec\n\n\nclass DeepCF(fluid.Layer):\n def __init__(self, num_users, num_items, matrix):\n super(DeepCF, self).__init__()\n self._num_users = num_users\n self._num_items = num_items\n self._rating_matrix = self.create_parameter(\n attr=fluid.ParamAttr(trainable=False),\n shape=matrix.shape,\n dtype=matrix.dtype,\n is_bias=False,\n default_initializer=fluid.initializer.NumpyArrayInitializer(matrix))\n self._rating_matrix.stop_gradient = True\n\n self._mlp = MLP()\n self._dmf = DMF()\n self._match_fc = Linear(128, 1, act='sigmoid')\n\n def forward(self, users, items):\n # users_emb = self._user_emb(users)\n # items_emb = self._item_emb(items)\n users_emb = fluid.layers.gather(self._rating_matrix, users)\n items_emb = fluid.layers.gather(\n fluid.layers.transpose(self._rating_matrix, [1, 0]), items)\n users_emb.stop_gradient = True\n items_emb.stop_gradient = True\n\n mlp_predictive = self._mlp(users_emb, items_emb)\n dmf_predictive = self._dmf(users_emb, items_emb)\n predictive = fluid.layers.concat(\n [mlp_predictive, dmf_predictive],\n axis=len(mlp_predictive.shape) - 1)\n prediction = self._match_fc(predictive)\n return prediction\n\n\ndef get_data():\n user_ids = []\n item_ids = []\n labels = []\n NUM_USERS = 100\n NUM_ITEMS = 1000\n matrix = np.zeros([NUM_USERS, NUM_ITEMS], dtype=np.float32)\n\n for uid in range(NUM_USERS):\n for iid in range(NUM_ITEMS):\n label = float(random.randint(1, 6) == 1)\n user_ids.append(uid)\n item_ids.append(iid)\n labels.append(label)\n matrix[uid, iid] = label\n indices = np.arange(len(user_ids))\n np.random.shuffle(indices)\n users_np = np.array(user_ids, dtype=np.int32)[indices]\n items_np = np.array(item_ids, dtype=np.int32)[indices]\n labels_np = np.array(labels, dtype=np.float32)[indices]\n return np.expand_dims(users_np, -1), \\\n np.expand_dims(items_np, -1), \\\n np.expand_dims(labels_np, -1), NUM_USERS, NUM_ITEMS, matrix\n\n\ndef load_data(DATA_PATH):\n sys.stderr.write('loading from %s\\n' % DATA_PATH)\n likes = dict()\n num_users = -1\n num_items = -1\n with open(DATA_PATH, 'r') as f:\n for l in f.readlines():\n uid, iid, rating = [int(v) for v in l.split('\\t')]\n num_users = max(num_users, uid + 1)\n num_items = max(num_items, iid + 1)\n if float(rating) > 0.0:\n likes[(uid, iid)] = 1.0\n\n user_ids = []\n item_ids = []\n labels = []\n matrix = np.zeros([num_users, num_items], dtype=np.float32)\n for uid, iid in likes.keys():\n user_ids.append(uid)\n item_ids.append(iid)\n labels.append(1.0)\n matrix[uid, iid] = 1.0\n\n negative = 0\n while negative < 3:\n nuid = random.randint(0, num_users - 1)\n niid = random.randint(0, num_items - 1)\n if (nuid, niid) not in likes:\n negative += 1\n user_ids.append(nuid)\n item_ids.append(niid)\n labels.append(0.0)\n\n indices = np.arange(len(user_ids))\n np.random.shuffle(indices)\n users_np = np.array(user_ids, dtype=np.int32)[indices]\n items_np = np.array(item_ids, dtype=np.int32)[indices]\n labels_np = np.array(labels, dtype=np.float32)[indices]\n return np.expand_dims(users_np, -1), \\\n np.expand_dims(items_np, -1), \\\n np.expand_dims(labels_np, -1), num_users, num_items, matrix\n\n\nclass TestDygraphDeepCF(unittest.TestCase):\n def test_deefcf(self):\n seed = 90\n if DATA_PATH:\n (users_np, items_np, labels_np, num_users, num_items,\n matrix) = load_data(DATA_PATH)\n else:\n (users_np, items_np, labels_np, num_users, num_items,\n matrix) = get_data()\n\n startup = fluid.Program()\n startup.random_seed = seed\n main = fluid.Program()\n main.random_seed = seed\n\n scope = fluid.core.Scope()\n with new_program_scope(main=main, startup=startup, scope=scope):\n users = fluid.layers.data('users', [1], dtype='int32')\n items = fluid.layers.data('items', [1], dtype='int32')\n labels = fluid.layers.data('labels', [1], dtype='float32')\n\n deepcf = DeepCF(num_users, num_items, matrix)\n prediction = deepcf(users, items)\n loss = fluid.layers.reduce_sum(\n fluid.layers.log_loss(prediction, labels))\n adam = fluid.optimizer.AdamOptimizer(0.01)\n adam.minimize(loss)\n\n exe = fluid.Executor(fluid.CPUPlace(\n ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))\n exe.run(startup)\n for e in range(NUM_EPOCHES):\n sys.stderr.write('epoch %d\\n' % e)\n for slice in range(0, BATCH_SIZE * NUM_BATCHES, BATCH_SIZE):\n if slice + BATCH_SIZE >= users_np.shape[0]:\n break\n static_loss = exe.run(\n main,\n feed={\n users.name: users_np[slice:slice + BATCH_SIZE],\n items.name: items_np[slice:slice + BATCH_SIZE],\n labels.name: labels_np[slice:slice + BATCH_SIZE]\n },\n fetch_list=[loss])[0]\n sys.stderr.write('static loss %s\\n' % static_loss)\n\n with fluid.dygraph.guard():\n fluid.default_startup_program().random_seed = seed\n fluid.default_main_program().random_seed = seed\n\n deepcf = DeepCF(num_users, num_items, matrix)\n adam = fluid.optimizer.AdamOptimizer(\n 0.01, parameter_list=deepcf.parameters())\n for e in range(NUM_EPOCHES):\n sys.stderr.write('epoch %d\\n' % e)\n for slice in range(0, BATCH_SIZE * NUM_BATCHES, BATCH_SIZE):\n if slice + BATCH_SIZE >= users_np.shape[0]:\n break\n prediction = deepcf(\n to_variable(users_np[slice:slice + BATCH_SIZE]),\n to_variable(items_np[slice:slice + BATCH_SIZE]))\n loss = fluid.layers.reduce_sum(\n fluid.layers.log_loss(prediction,\n to_variable(labels_np[\n slice:slice + BATCH_SIZE])))\n loss.backward()\n adam.minimize(loss)\n deepcf.clear_gradients()\n dy_loss = loss.numpy()\n sys.stderr.write('dynamic loss: %s %s\\n' % (slice, dy_loss))\n\n with fluid.dygraph.guard():\n fluid.default_startup_program().random_seed = seed\n fluid.default_main_program().random_seed = seed\n\n deepcf2 = DeepCF(num_users, num_items, matrix)\n adam2 = fluid.optimizer.AdamOptimizer(\n 0.01, parameter_list=deepcf2.parameters())\n backward_strategy = fluid.dygraph.BackwardStrategy()\n backward_strategy.sort_sum_gradient = True\n for e in range(NUM_EPOCHES):\n sys.stderr.write('epoch %d\\n' % e)\n for slice in range(0, BATCH_SIZE * NUM_BATCHES, BATCH_SIZE):\n if slice + BATCH_SIZE >= users_np.shape[0]:\n break\n prediction2 = deepcf2(\n to_variable(users_np[slice:slice + BATCH_SIZE]),\n to_variable(items_np[slice:slice + BATCH_SIZE]))\n loss2 = fluid.layers.reduce_sum(\n fluid.layers.log_loss(prediction2,\n to_variable(labels_np[\n slice:slice + BATCH_SIZE])))\n loss2.backward(backward_strategy)\n adam2.minimize(loss2)\n deepcf2.clear_gradients()\n dy_loss2 = loss2.numpy()\n sys.stderr.write('dynamic loss: %s %s\\n' %\n (slice, dy_loss2))\n\n self.assertEqual(static_loss, dy_loss)\n self.assertEqual(static_loss, dy_loss2)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.random.shuffle", "numpy.expand_dims", "numpy.zeros" ] ]
christian-oreilly/mne-python
[ "33146156f2660f122ecc04fa0d5b3fd3c34b549e" ]
[ "mne/io/brainvision/brainvision.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Conversion tool from Brain Vision EEG to FIF.\"\"\"\n\n# Authors: Teon Brooks <[email protected]>\n# Christian Brodbeck <[email protected]>\n# Eric Larson <[email protected]>\n# Jona Sassenhagen <[email protected]>\n# Phillip Alday <[email protected]>\n# Okba Bekhelifi <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\nimport re\nimport time\n\nimport numpy as np\n\nfrom ...utils import verbose, logger, warn\nfrom ..constants import FIFF\nfrom ..meas_info import _empty_info\nfrom ..base import BaseRaw, _check_update_montage\nfrom ..utils import (_read_segments_file, _synthesize_stim_channel,\n _mult_cal_one)\n\nfrom ...externals.six import StringIO, string_types\nfrom ...externals.six.moves import configparser\n\n\nclass RawBrainVision(BaseRaw):\n \"\"\"Raw object from Brain Vision EEG file.\n\n Parameters\n ----------\n vhdr_fname : str\n Path to the EEG header file.\n montage : str | None | instance of Montage\n Path or instance of montage containing electrode positions. If None,\n read sensor locations from header file if present, otherwise (0, 0, 0).\n See the documentation of :func:`mne.channels.read_montage` for more\n information.\n eog : list or tuple\n Names of channels or list of indices that should be designated\n EOG channels. Values should correspond to the vhdr file.\n Default is ``('HEOGL', 'HEOGR', 'VEOGb')``.\n misc : list or tuple of str | 'auto'\n Names of channels or list of indices that should be designated\n MISC channels. Values should correspond to the electrodes\n in the vhdr file. If 'auto', units in vhdr file are used for inferring\n misc channels. Default is ``'auto'``.\n scale : float\n The scaling factor for EEG data. Unless specified otherwise by\n header file, units are in microvolts. Default scale factor is 1.\n preload : bool\n If True, all data are loaded at initialization.\n If False, data are not read until save.\n response_trig_shift : int | None\n An integer that will be added to all response triggers when reading\n events (stimulus triggers will be unaffected). If None, response\n triggers will be ignored. Default is 0 for backwards compatibility, but\n typically another value or None will be necessary.\n event_id : dict | None\n The id of special events to consider in addition to those that\n follow the normal Brainvision trigger format ('S###').\n If dict, the keys will be mapped to trigger values on the stimulus\n channel. Example: {'SyncStatus': 1; 'Pulse Artifact': 3}. If None\n or an empty dict (default), only stimulus events are added to the\n stimulus channel. Keys are case sensitive.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n \"\"\"\n\n @verbose\n def __init__(self, vhdr_fname, montage=None,\n eog=('HEOGL', 'HEOGR', 'VEOGb'), misc='auto',\n scale=1., preload=False, response_trig_shift=0,\n event_id=None, verbose=None): # noqa: D102\n # Channel info and events\n logger.info('Extracting parameters from %s...' % vhdr_fname)\n vhdr_fname = os.path.abspath(vhdr_fname)\n info, data_filename, fmt, order, mrk_fname, montage, n_samples = \\\n _get_vhdr_info(vhdr_fname, eog, misc, scale, montage)\n self._order = order\n self._n_samples = n_samples\n events = _read_vmrk_events(mrk_fname, event_id, response_trig_shift)\n _check_update_montage(info, montage)\n with open(data_filename, 'rb') as f:\n if isinstance(fmt, dict): # ASCII, this will be slow :(\n n_skip = 0\n for ii in range(int(fmt['skiplines'])):\n n_skip += len(f.readline())\n offsets = np.cumsum([n_skip] + [len(line) for line in f])\n n_samples = len(offsets) - 1\n else:\n f.seek(0, os.SEEK_END)\n n_samples = f.tell()\n dtype_bytes = _fmt_byte_dict[fmt]\n offsets = None\n n_samples = n_samples // (dtype_bytes * (info['nchan'] - 1))\n self.preload = False # so the event-setting works\n self._create_event_ch(events, n_samples)\n super(RawBrainVision, self).__init__(\n info, last_samps=[n_samples - 1], filenames=[data_filename],\n orig_format=fmt, preload=preload, verbose=verbose,\n raw_extras=[offsets])\n\n def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):\n \"\"\"Read a chunk of raw data.\"\"\"\n # read data\n if self._order == 'C':\n _read_segments_c(self, data, idx, fi, start, stop, cals, mult)\n elif isinstance(self.orig_format, string_types):\n dtype = _fmt_dtype_dict[self.orig_format]\n n_data_ch = len(self.ch_names) - 1\n _read_segments_file(self, data, idx, fi, start, stop, cals, mult,\n dtype=dtype, n_channels=n_data_ch,\n trigger_ch=self._event_ch)\n else:\n offsets = self._raw_extras[fi]\n with open(self._filenames[fi], 'rb') as fid:\n fid.seek(offsets[start])\n block = np.empty((len(self.ch_names), stop - start))\n for ii in range(stop - start):\n line = fid.readline().decode('ASCII')\n line = line.strip().replace(',', '.').split()\n block[:-1, ii] = list(map(float, line))\n block[-1] = self._event_ch[start:stop]\n _mult_cal_one(data, block, idx, cals, mult)\n\n def _get_brainvision_events(self):\n \"\"\"Retrieve the events associated with the Brain Vision Raw object.\n\n Returns\n -------\n events : array, shape (n_events, 3)\n Events, each row consisting of an (onset, duration, trigger)\n sequence.\n \"\"\"\n return self._events.copy()\n\n def _set_brainvision_events(self, events):\n \"\"\"Set the events and update the synthesized stim channel.\n\n Parameters\n ----------\n events : array, shape (n_events, 3)\n Events, each row consisting of an (onset, duration, trigger)\n sequence.\n \"\"\"\n self._create_event_ch(events)\n\n def _create_event_ch(self, events, n_samp=None):\n \"\"\"Create the event channel.\"\"\"\n if n_samp is None:\n n_samp = self.last_samp - self.first_samp + 1\n events = np.array(events, int)\n if events.ndim != 2 or events.shape[1] != 3:\n raise ValueError(\"[n_events x 3] shaped array required\")\n # update events\n self._event_ch = _synthesize_stim_channel(events, n_samp)\n self._events = events\n if self.preload:\n self._data[-1] = self._event_ch\n\n\ndef _read_segments_c(raw, data, idx, fi, start, stop, cals, mult):\n \"\"\"Read chunk of vectorized raw data.\"\"\"\n n_samples = raw._n_samples\n dtype = _fmt_dtype_dict[raw.orig_format]\n n_bytes = _fmt_byte_dict[raw.orig_format]\n n_channels = len(raw.ch_names)\n trigger_ch = raw._event_ch\n block = np.zeros((n_channels, stop - start))\n with open(raw._filenames[fi], 'rb', buffering=0) as fid:\n for ch_id in np.arange(n_channels)[idx]:\n if ch_id == n_channels - 1: # stim channel\n stim_ch = trigger_ch[start:stop]\n block[ch_id] = stim_ch\n continue\n fid.seek(start * n_bytes + ch_id * n_bytes * n_samples)\n block[ch_id] = np.fromfile(fid, dtype, stop - start)\n\n _mult_cal_one(data, block, idx, cals, mult)\n\n\ndef _read_vmrk_events(fname, event_id=None, response_trig_shift=0):\n \"\"\"Read events from a vmrk file.\n\n Parameters\n ----------\n fname : str\n vmrk file to be read.\n event_id : dict | None\n The id of special events to consider in addition to those that\n follow the normal Brainvision trigger format ('S###').\n If dict, the keys will be mapped to trigger values on the stimulus\n channel. Example: {'SyncStatus': 1; 'Pulse Artifact': 3}. If None\n or an empty dict (default), only stimulus events are added to the\n stimulus channel. Keys are case sensitive.\n response_trig_shift : int | None\n Integer to shift response triggers by. None ignores response triggers.\n\n Returns\n -------\n events : array, shape (n_events, 3)\n An array containing the whole recording's events, each row representing\n an event as (onset, duration, trigger) sequence.\n \"\"\"\n if event_id is None:\n event_id = dict()\n # read vmrk file\n with open(fname, 'rb') as fid:\n txt = fid.read()\n\n # we don't actually need to know the coding for the header line.\n # the characters in it all belong to ASCII and are thus the\n # same in Latin-1 and UTF-8\n header = txt.decode('ascii', 'ignore').split('\\n')[0].strip()\n _check_mrk_version(header)\n if (response_trig_shift is not None and\n not isinstance(response_trig_shift, int)):\n raise TypeError(\"response_trig_shift must be an integer or None\")\n\n # although the markers themselves are guaranteed to be ASCII (they\n # consist of numbers and a few reserved words), we should still\n # decode the file properly here because other (currently unused)\n # blocks, such as that the filename are specifying are not\n # guaranteed to be ASCII.\n\n try:\n # if there is an explicit codepage set, use it\n # we pretend like it's ascii when searching for the codepage\n cp_setting = re.search('Codepage=(.+)',\n txt.decode('ascii', 'ignore'),\n re.IGNORECASE & re.MULTILINE)\n codepage = 'utf-8'\n if cp_setting:\n codepage = cp_setting.group(1).strip()\n # BrainAmp Recorder also uses ANSI codepage\n # an ANSI codepage raises a LookupError exception\n # python recognize ANSI decoding as cp1252\n if codepage == 'ANSI':\n codepage = 'cp1252'\n txt = txt.decode(codepage)\n except UnicodeDecodeError:\n # if UTF-8 (new standard) or explicit codepage setting fails,\n # fallback to Latin-1, which is Windows default and implicit\n # standard in older recordings\n txt = txt.decode('latin-1')\n\n # extract Marker Infos block\n m = re.search(r\"\\[Marker Infos\\]\", txt)\n if not m:\n return np.zeros(0)\n mk_txt = txt[m.end():]\n m = re.search(r\"\\[.*\\]\", mk_txt)\n if m:\n mk_txt = mk_txt[:m.start()]\n\n # extract event information\n items = re.findall(r\"^Mk\\d+=(.*)\", mk_txt, re.MULTILINE)\n events, dropped = list(), list()\n for info in items:\n mtype, mdesc, onset, duration = info.split(',')[:4]\n onset = int(onset)\n duration = (int(duration) if duration.isdigit() else 1)\n if mdesc in event_id:\n trigger = event_id[mdesc]\n else:\n try:\n trigger = int(re.findall(r'[A-Za-z]*\\s*?(\\d+)', mdesc)[0])\n except IndexError:\n trigger = None\n if mtype.lower().startswith('response'):\n if response_trig_shift is not None:\n trigger += response_trig_shift\n else:\n trigger = None\n if trigger:\n events.append((onset, duration, trigger))\n else:\n if len(mdesc) > 0:\n dropped.append(mdesc)\n\n if len(dropped) > 0:\n dropped = list(set(dropped))\n examples = \", \".join(dropped[:5])\n if len(dropped) > 5:\n examples += \", ...\"\n warn(\"Currently, {0} trigger(s) will be dropped, such as [{1}]. \"\n \"Consider using ``event_id`` to parse triggers that \"\n \"do not follow the 'S###' pattern.\".format(\n len(dropped), examples))\n\n events = np.array(events).reshape(-1, 3)\n return events\n\n\ndef _check_hdr_version(header):\n \"\"\"Check the header version.\"\"\"\n if header == 'Brain Vision Data Exchange Header File Version 1.0':\n return 1\n elif header == 'Brain Vision Data Exchange Header File Version 2.0':\n return 2\n else:\n raise ValueError(\"Currently only support versions 1.0 and 2.0, not %r \"\n \"Contact MNE-Developers for support.\" % header)\n\n\ndef _check_mrk_version(header):\n \"\"\"Check the marker version.\"\"\"\n tags = ['Brain Vision Data Exchange Marker File, Version 1.0',\n 'Brain Vision Data Exchange Marker File, Version 2.0']\n if header not in tags:\n raise ValueError(\"Currently only support %r, not %r\"\n \"Contact MNE-Developers for support.\"\n % (str(tags), header))\n\n\n_orientation_dict = dict(MULTIPLEXED='F', VECTORIZED='C')\n_fmt_dict = dict(INT_16='short', INT_32='int', IEEE_FLOAT_32='single')\n_fmt_byte_dict = dict(short=2, int=4, single=4)\n_fmt_dtype_dict = dict(short='<i2', int='<i4', single='<f4')\n_unit_dict = {'V': 1., # V stands for Volt\n u'µV': 1e-6,\n 'uV': 1e-6,\n 'C': 1, # C stands for celsius\n u'µS': 1e-6, # S stands for Siemens\n u'uS': 1e-6,\n u'ARU': 1, # ARU is the unity for the breathing data\n 'S': 1,\n 'N': 1} # Newton\n\n\ndef _get_vhdr_info(vhdr_fname, eog, misc, scale, montage):\n \"\"\"Extract all the information from the header file.\n\n Parameters\n ----------\n vhdr_fname : str\n Raw EEG header to be read.\n eog : list of str\n Names of channels that should be designated EOG channels. Names should\n correspond to the vhdr file.\n misc : list or tuple of str | 'auto'\n Names of channels or list of indices that should be designated\n MISC channels. Values should correspond to the electrodes\n in the vhdr file. If 'auto', units in vhdr file are used for inferring\n misc channels. Default is ``'auto'``.\n scale : float\n The scaling factor for EEG data. Unless specified otherwise by\n header file, units are in microvolts. Default scale factor is 1.\n montage : str | None | instance of Montage\n Path or instance of montage containing electrode positions. If None,\n read sensor locations from header file if present, otherwise (0, 0, 0).\n See the documentation of :func:`mne.channels.read_montage` for more\n information.\n\n Returns\n -------\n info : Info\n The measurement info.\n fmt : str\n The data format in the file.\n edf_info : dict\n A dict containing Brain Vision specific parameters.\n events : array, shape (n_events, 3)\n Events from the corresponding vmrk file.\n \"\"\"\n scale = float(scale)\n ext = os.path.splitext(vhdr_fname)[-1]\n if ext != '.vhdr':\n raise IOError(\"The header file must be given to read the data, \"\n \"not a file with extension '%s'.\" % ext)\n with open(vhdr_fname, 'rb') as f:\n # extract the first section to resemble a cfg\n header = f.readline()\n codepage = 'utf-8'\n # we don't actually need to know the coding for the header line.\n # the characters in it all belong to ASCII and are thus the\n # same in Latin-1 and UTF-8\n header = header.decode('ascii', 'ignore').strip()\n _check_hdr_version(header)\n\n settings = f.read()\n try:\n # if there is an explicit codepage set, use it\n # we pretend like it's ascii when searching for the codepage\n cp_setting = re.search('Codepage=(.+)',\n settings.decode('ascii', 'ignore'),\n re.IGNORECASE & re.MULTILINE)\n if cp_setting:\n codepage = cp_setting.group(1).strip()\n # BrainAmp Recorder also uses ANSI codepage\n # an ANSI codepage raises a LookupError exception\n # python recognize ANSI decoding as cp1252\n if codepage == 'ANSI':\n codepage = 'cp1252'\n settings = settings.decode(codepage)\n except UnicodeDecodeError:\n # if UTF-8 (new standard) or explicit codepage setting fails,\n # fallback to Latin-1, which is Windows default and implicit\n # standard in older recordings\n settings = settings.decode('latin-1')\n\n if settings.find('[Comment]') != -1:\n params, settings = settings.split('[Comment]')\n else:\n params, settings = settings, ''\n cfg = configparser.ConfigParser()\n if hasattr(cfg, 'read_file'): # newer API\n cfg.read_file(StringIO(params))\n else:\n cfg.readfp(StringIO(params))\n\n # get sampling info\n # Sampling interval is given in microsec\n sfreq = 1e6 / cfg.getfloat('Common Infos', 'SamplingInterval')\n info = _empty_info(sfreq)\n\n order = cfg.get('Common Infos', 'DataOrientation')\n if order not in _orientation_dict:\n raise NotImplementedError('Data Orientation %s is not supported'\n % order)\n order = _orientation_dict[order]\n\n data_format = cfg.get('Common Infos', 'DataFormat')\n if data_format == 'BINARY':\n fmt = cfg.get('Binary Infos', 'BinaryFormat')\n if fmt not in _fmt_dict:\n raise NotImplementedError('Datatype %s is not supported' % fmt)\n fmt = _fmt_dict[fmt]\n else:\n fmt = dict((key, cfg.get('ASCII Infos', key))\n for key in cfg.options('ASCII Infos'))\n\n # locate EEG and marker files\n path = os.path.dirname(vhdr_fname)\n data_filename = os.path.join(path, cfg.get('Common Infos', 'DataFile'))\n info['meas_date'] = int(time.time())\n info['buffer_size_sec'] = 1. # reasonable default\n\n # load channel labels\n nchan = cfg.getint('Common Infos', 'NumberOfChannels') + 1\n n_samples = None\n if order == 'C':\n try:\n n_samples = cfg.getint('Common Infos', 'DataPoints')\n except configparser.NoOptionError:\n logger.warning('No info on DataPoints found. Inferring number of '\n 'samples from the data file size.')\n with open(data_filename, 'rb') as fid:\n fid.seek(0, 2)\n n_bytes = fid.tell()\n n_samples = n_bytes // _fmt_byte_dict[fmt] // (nchan - 1)\n\n ch_names = [''] * nchan\n cals = np.empty(nchan)\n ranges = np.empty(nchan)\n cals.fill(np.nan)\n ch_dict = dict()\n misc_chs = dict()\n for chan, props in cfg.items('Channel Infos'):\n n = int(re.findall(r'ch(\\d+)', chan)[0]) - 1\n props = props.split(',')\n # default to microvolts because that's what the older brainvision\n # standard explicitly assumed; the unit is only allowed to be\n # something else if explicitly stated (cf. EEGLAB export below)\n if len(props) < 4:\n props += (u'µV',)\n name, _, resolution, unit = props[:4]\n ch_dict[chan] = name\n ch_names[n] = name\n if resolution == \"\":\n if not(unit): # For truncated vhdrs (e.g. EEGLAB export)\n resolution = 0.000001\n else:\n resolution = 1. # for files with units specified, but not res\n unit = unit.replace(u'\\xc2', u'') # Remove unwanted control characters\n cals[n] = float(resolution)\n ranges[n] = _unit_dict.get(unit, 1) * scale\n if unit not in ('V', u'µV', 'uV'):\n misc_chs[name] = (FIFF.FIFF_UNIT_CEL if unit == 'C'\n else FIFF.FIFF_UNIT_NONE)\n misc = list(misc_chs.keys()) if misc == 'auto' else misc\n\n # create montage\n if cfg.has_section('Coordinates') and montage is None:\n from ...transforms import _sph_to_cart\n from ...channels.montage import Montage\n montage_pos = list()\n montage_names = list()\n to_misc = list()\n for ch in cfg.items('Coordinates'):\n ch_name = ch_dict[ch[0]]\n montage_names.append(ch_name)\n radius, theta, phi = map(float, ch[1].split(','))\n # 1: radius, 2: theta, 3: phi\n pol = np.deg2rad(theta)\n az = np.deg2rad(phi)\n pos = _sph_to_cart(np.array([[radius * 85., az, pol]]))[0]\n if (pos == 0).all() and ch_name not in list(eog) + misc:\n to_misc.append(ch_name)\n montage_pos.append(pos)\n montage_sel = np.arange(len(montage_pos))\n montage = Montage(montage_pos, montage_names, 'Brainvision',\n montage_sel)\n if len(to_misc) > 0:\n misc += to_misc\n warn('No coordinate information found for channels {}. '\n 'Setting channel types to misc. To avoid this warning, set '\n 'channel types explicitly.'.format(to_misc))\n\n ch_names[-1] = 'STI 014'\n cals[-1] = 1.\n ranges[-1] = 1.\n if np.isnan(cals).any():\n raise RuntimeError('Missing channel units')\n\n # Attempts to extract filtering info from header. If not found, both are\n # set to zero.\n settings = settings.splitlines()\n idx = None\n\n if 'Channels' in settings:\n idx = settings.index('Channels')\n settings = settings[idx + 1:]\n hp_col, lp_col = 4, 5\n for idx, setting in enumerate(settings):\n if re.match(r'#\\s+Name', setting):\n break\n else:\n idx = None\n\n # If software filters are active, then they override the hardware setup\n # But we still want to be able to double check the channel names\n # for alignment purposes, we keep track of the hardware setting idx\n idx_amp = idx\n\n if 'S o f t w a r e F i l t e r s' in settings:\n idx = settings.index('S o f t w a r e F i l t e r s')\n for idx, setting in enumerate(settings[idx + 1:], idx + 1):\n if re.match(r'#\\s+Low Cutoff', setting):\n hp_col, lp_col = 1, 2\n warn('Online software filter detected. Using software '\n 'filter settings and ignoring hardware values')\n break\n else:\n idx = idx_amp\n\n if idx:\n lowpass = []\n highpass = []\n\n # for newer BV files, the unit is specified for every channel\n # separated by a single space, while for older files, the unit is\n # specified in the column headers\n divider = r'\\s+'\n if 'Resolution / Unit' in settings[idx]:\n shift = 1 # shift for unit\n else:\n shift = 0\n\n # extract filter units and convert s to Hz if necessary\n # this cannot be done as post-processing as the inverse t-f\n # relationship means that the min/max comparisons don't make sense\n # unless we know the units\n header = re.split(r'\\s\\s+', settings[idx])\n hp_s = '[s]' in header[hp_col]\n lp_s = '[s]' in header[lp_col]\n\n for i, ch in enumerate(ch_names[:-1], 1):\n line = re.split(divider, settings[idx + i])\n # double check alignment with channel by using the hw settings\n if idx == idx_amp:\n line_amp = line\n else:\n line_amp = re.split(divider, settings[idx_amp + i])\n assert ch in line_amp\n\n highpass.append(line[hp_col + shift])\n lowpass.append(line[lp_col + shift])\n if len(highpass) == 0:\n pass\n elif len(set(highpass)) == 1:\n if highpass[0] in ('NaN', 'Off'):\n pass # Placeholder for future use. Highpass set in _empty_info\n elif highpass[0] == 'DC':\n info['highpass'] = 0.\n else:\n info['highpass'] = float(highpass[0])\n if hp_s:\n info['highpass'] = 1. / info['highpass']\n else:\n heterogeneous_hp_filter = True\n if hp_s:\n # We convert channels with disabled filters to having\n # highpass relaxed / no filters\n highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC')\n else np.Inf for filt in highpass]\n info['highpass'] = np.max(np.array(highpass, dtype=np.float))\n # Coveniently enough 1 / np.Inf = 0.0, so this works for\n # DC / no highpass filter\n info['highpass'] = 1. / info['highpass']\n\n # not exactly the cleanest use of FP, but this makes us\n # more conservative in *not* warning.\n if info['highpass'] == 0.0 and len(set(highpass)) == 1:\n # not actually heterogeneous in effect\n # ... just heterogeneously disabled\n heterogeneous_hp_filter = False\n else:\n highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC')\n else 0.0 for filt in highpass]\n info['highpass'] = np.min(np.array(highpass, dtype=np.float))\n if info['highpass'] == 0.0 and len(set(highpass)) == 1:\n # not actually heterogeneous in effect\n # ... just heterogeneously disabled\n heterogeneous_hp_filter = False\n\n if heterogeneous_hp_filter:\n warn('Channels contain different highpass filters. '\n 'Lowest (weakest) filter setting (%0.2f Hz) '\n 'will be stored.' % info['highpass'])\n\n if len(lowpass) == 0:\n pass\n elif len(set(lowpass)) == 1:\n if lowpass[0] in ('NaN', 'Off'):\n pass # Placeholder for future use. Lowpass set in _empty_info\n else:\n info['lowpass'] = float(lowpass[0])\n if lp_s:\n info['lowpass'] = 1. / info['lowpass']\n else:\n heterogeneous_lp_filter = True\n if lp_s:\n # We convert channels with disabled filters to having\n # infinitely relaxed / no filters\n lowpass = [float(filt) if filt not in ('NaN', 'Off')\n else 0.0 for filt in lowpass]\n info['lowpass'] = np.min(np.array(lowpass, dtype=np.float))\n try:\n info['lowpass'] = 1. / info['lowpass']\n except ZeroDivisionError:\n if len(set(lowpass)) == 1:\n # No lowpass actually set for the weakest setting\n # so we set lowpass to the Nyquist frequency\n info['lowpass'] = info['sfreq'] / 2.\n # not actually heterogeneous in effect\n # ... just heterogeneously disabled\n heterogeneous_lp_filter = False\n else:\n # no lowpass filter is the weakest filter,\n # but it wasn't the only filter\n pass\n else:\n # We convert channels with disabled filters to having\n # infinitely relaxed / no filters\n lowpass = [float(filt) if filt not in ('NaN', 'Off')\n else np.Inf for filt in lowpass]\n info['lowpass'] = np.max(np.array(lowpass, dtype=np.float))\n\n if np.isinf(info['lowpass']):\n # No lowpass actually set for the weakest setting\n # so we set lowpass to the Nyquist frequency\n info['lowpass'] = info['sfreq'] / 2.\n if len(set(lowpass)) == 1:\n # not actually heterogeneous in effect\n # ... just heterogeneously disabled\n heterogeneous_lp_filter = False\n\n if heterogeneous_lp_filter:\n # this isn't clean FP, but then again, we only want to provide\n # the Nyquist hint when the lowpass filter was actually\n # calculated from dividing the sampling frequency by 2, so the\n # exact/direct comparison (instead of tolerance) makes sense\n if info['lowpass'] == info['sfreq'] / 2.0:\n nyquist = ', Nyquist limit'\n else:\n nyquist = \"\"\n warn('Channels contain different lowpass filters. '\n 'Highest (weakest) filter setting (%0.2f Hz%s) '\n 'will be stored.' % (info['lowpass'], nyquist))\n\n # Creates a list of dicts of eeg channels for raw.info\n logger.info('Setting channel info structure...')\n info['chs'] = []\n for idx, ch_name in enumerate(ch_names):\n if ch_name in eog or idx in eog or idx - nchan in eog:\n kind = FIFF.FIFFV_EOG_CH\n coil_type = FIFF.FIFFV_COIL_NONE\n unit = FIFF.FIFF_UNIT_V\n elif ch_name in misc or idx in misc or idx - nchan in misc:\n kind = FIFF.FIFFV_MISC_CH\n coil_type = FIFF.FIFFV_COIL_NONE\n if ch_name in misc_chs:\n unit = misc_chs[ch_name]\n else:\n unit = FIFF.FIFF_UNIT_NONE\n elif ch_name == 'STI 014':\n kind = FIFF.FIFFV_STIM_CH\n coil_type = FIFF.FIFFV_COIL_NONE\n unit = FIFF.FIFF_UNIT_NONE\n else:\n kind = FIFF.FIFFV_EEG_CH\n coil_type = FIFF.FIFFV_COIL_EEG\n unit = FIFF.FIFF_UNIT_V\n info['chs'].append(dict(\n ch_name=ch_name, coil_type=coil_type, kind=kind, logno=idx + 1,\n scanno=idx + 1, cal=cals[idx], range=ranges[idx], loc=np.zeros(12),\n unit=unit, unit_mul=0., # always zero- mne manual pg. 273\n coord_frame=FIFF.FIFFV_COORD_HEAD))\n\n # for stim channel\n mrk_fname = os.path.join(path, cfg.get('Common Infos', 'MarkerFile'))\n info._update_redundant()\n info._check_consistency()\n return info, data_filename, fmt, order, mrk_fname, montage, n_samples\n\n\ndef read_raw_brainvision(vhdr_fname, montage=None,\n eog=('HEOGL', 'HEOGR', 'VEOGb'), misc='auto',\n scale=1., preload=False, response_trig_shift=0,\n event_id=None, verbose=None):\n \"\"\"Reader for Brain Vision EEG file.\n\n Parameters\n ----------\n vhdr_fname : str\n Path to the EEG header file.\n montage : str | None | instance of Montage\n Path or instance of montage containing electrode positions.\n If None, sensor locations are (0,0,0). See the documentation of\n :func:`mne.channels.read_montage` for more information.\n eog : list or tuple of str\n Names of channels or list of indices that should be designated\n EOG channels. Values should correspond to the vhdr file\n Default is ``('HEOGL', 'HEOGR', 'VEOGb')``.\n misc : list or tuple of str | 'auto'\n Names of channels or list of indices that should be designated\n MISC channels. Values should correspond to the electrodes\n in the vhdr file. If 'auto', units in vhdr file are used for inferring\n misc channels. Default is ``'auto'``.\n scale : float\n The scaling factor for EEG data. Unless specified otherwise by\n header file, units are in microvolts. Default scale factor is 1.\n preload : bool\n If True, all data are loaded at initialization.\n If False, data are not read until save.\n response_trig_shift : int | None\n An integer that will be added to all response triggers when reading\n events (stimulus triggers will be unaffected). If None, response\n triggers will be ignored. Default is 0 for backwards compatibility, but\n typically another value or None will be necessary.\n event_id : dict | None\n The id of special events to consider in addition to those that\n follow the normal Brainvision trigger format ('S###').\n If dict, the keys will be mapped to trigger values on the stimulus\n channel. Example: {'SyncStatus': 1; 'Pulse Artifact': 3}. If None\n or an empty dict (default), only stimulus events are added to the\n stimulus channel. Keys are case sensitive.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n raw : instance of RawBrainVision\n A Raw object containing BrainVision data.\n\n See Also\n --------\n mne.io.Raw : Documentation of attribute and methods.\n \"\"\"\n return RawBrainVision(vhdr_fname=vhdr_fname, montage=montage, eog=eog,\n misc=misc, scale=scale, preload=preload,\n response_trig_shift=response_trig_shift,\n event_id=event_id, verbose=verbose)\n" ]
[ [ "numpy.fromfile", "numpy.empty", "numpy.zeros", "numpy.isinf", "numpy.arange", "numpy.isnan", "numpy.array", "numpy.deg2rad" ] ]
Myunghee13/DSCI560_HW5
[ "1a6104569b95ccca392ba67794e03054dd696c59" ]
[ "app.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom bokeh.plotting import figure, show, output_notebook,ColumnDataSource,curdoc\nfrom bokeh.models import HoverTool, Select, Div\nfrom bokeh.layouts import row, column\nfrom bokeh.transform import dodge\n\ndata1 = pd.read_csv('latimes-state-totals.csv')\n\ndata1['date_time']=pd.to_datetime(data1['date'])\ndata1 = data1[[\"date\", \"date_time\", \"new_confirmed_cases\"]]\ndata1 = data1.set_index(['date_time'])\ndata1.sort_index(inplace=True)\n\ndf1 = data1.loc['2020-08-01':'2020-08-31']\n\ndef make_plot1():\n dates = [str(int(ele[-2:])) for ele in df1.date]\n new_cases = list(df1.new_confirmed_cases)\n\n data = {\n \"dates\": dates,\n \"new_cases\": new_cases\n }\n\n source = ColumnDataSource(data=data)\n\n p = figure(x_range=dates, plot_height=350, title=\"New Coronavirus cases in August in California\",\n toolbar_location=None, y_axis_label = 'New Confirmed Cases',\n x_axis_label = 'August, 2020')\n\n p.vbar(x='dates', top='new_cases', color='#FFA07A', width=0.9, source=source)\n\n\n p.add_tools(HoverTool(\n tooltips=[\n ('date', \"August \"+'@dates'+\", 2020\"),\n (\"new cases\", \"@new_cases\"),\n ]\n ))\n\n p.xgrid.grid_line_color = None\n p.y_range.start = 0\n\n return p\n\nplot1 = make_plot1()\n\n\ndata2 = pd.read_csv('cdph-race-ethnicity.csv')\ndata2['date_time']=pd.to_datetime(data2['date'])\ndata2 = data2[data2.age == \"all\"]\ndata2 = data2[[\"date\",\"date_time\", \"race\",\"confirmed_cases_percent\", \"deaths_percent\",\"population_percent\"]]\ndata2 = data2.set_index(['date_time'])\n\ndata2.fillna(\"no record\", inplace = True)\ndata2.confirmed_cases_percent = [ele*100 for ele in data2.confirmed_cases_percent]\ndata2.deaths_percent = [ele*100 for ele in data2.deaths_percent]\ndata2.population_percent = [ele*100 for ele in data2.population_percent]\n\ndate_list = sorted(set(data2.date), reverse=True)\nsel_date = date_list[0]\nraces = ['asian', 'black', 'cdph-other', 'latino', 'other', 'white']\ndef get_dataset (date):\n df2 = data2.loc[date]\n\n data = {'races' : races,\n 'confirmed' : list(df2['confirmed_cases_percent']),\n 'death' : list(df2['deaths_percent']),\n 'population' : list(df2['population_percent'])\n }\n\n return ColumnDataSource(data=data)\n\ndef make_plot2(source):\n p = figure(x_range=races, y_range=(0, 100), plot_height=250, title=\"Coronavirus cases and deaths % per race in California\",\n toolbar_location=None) #, tools=\"hover\", tooltips=\"$name: @$name\")\n\n p.vbar(x=dodge('races', -0.25, range=p.x_range), top='confirmed', width=0.2, source=source,\n color=\"#c9d9d3\", legend_label=\"confirmed cases %\")\n\n p.vbar(x=dodge('races', 0.0, range=p.x_range), top='death', width=0.2, source=source,\n color=\"#718dbf\", legend_label=\"death %\")\n\n p.vbar(x=dodge('races', 0.25, range=p.x_range), top='population', width=0.2, source=source,\n color=\"#e84d60\", legend_label=\"population %\")\n\n p.add_tools(HoverTool(\n tooltips=[\n (\"race\", \"@races\"),\n (\"confirmed\", \"@confirmed{0,0.00}\"+\"%\"),\n (\"death\", \"@death{0,0.00}\"+\"%\"),\n (\"population\", \"@population{0,0.00}\"+\"%\"),\n\n ]\n ))\n p.x_range.range_padding = 0.1\n p.xgrid.grid_line_color = None\n p.legend.location = \"top_left\"\n p.legend.orientation = \"horizontal\"\n\n return p\n\ndef update_plot(attrname, old, new):\n src = get_dataset(date_select.value)\n source.data.update(src.data)\n\nsource = get_dataset (sel_date)\nplot2 = make_plot2(source)\n\ndate_select = Select(value=sel_date, title='Select Date', options=date_list)\ndate_select.on_change('value', update_plot)\n\n\ndiv1 = Div(text=\"\"\"\n<p><strong>Name: </strong>Myunghee Lee</p>\n<h1>1. Source for the data</h1>\n<p><strong>Source Link: </strong><a target=\"_blank\" href=\"https://github.com/datadesk/california-coronavirus-data\">The Los Angeles Times' independent tally of coronavirus cases in California.</a></p>\n<p><strong>Used files from the data source</strong></p>\n <ol>\n <strong><li>latimes-state-totals.csv</li></strong>\n <p>The statewide total of cases and deaths logged by local public health agencies each day</p>\n <p><strong>new_confirmed_cases: </strong>the net change in confirmed cases over the previous date.</p>\n \n <strong><li>cdph-race-ethnicity.csv: </li></strong>\n <p>Statewide demographic data tallying race totals by age for both cases and deaths.</p>\n <p>Provided by the <a target=\"_blank\" href=\"https://www.cdph.ca.gov/Programs/CID/DCDC/Pages/COVID-19/Race-Ethnicity.aspx\">California Department of Public Health.</a></p>\n <p><strong>race: </strong>The race being tallied</pi>\n <p><strong>age: </strong>The age bracket being tallied, 0-17, 18+, 18-34, 35-49, 50-64, 65-79, 80+, <strong>all</strong>, I selected \"all\" for the graph.</p> \n <p><strong>confirmed_cases_percent: </strong>The case totals percentage of the total in this age bracket</p>\n <p><strong>deaths_percent: </strong>The death totals percentage of the total in this age bracket.</p>\n <p><strong>population_percent: </strong>The race's percentage of the overall state population in this age bracket.</p>\n </ol>\n<h1>2. Date of last update</h1>\n<p>I downloaded the data from the source on <strong>November 5, 2020</strong>.</p>\n<h1>3. New coronavirus cases in August in California</h1>\n<p>You can see the number when you mouse over the bar.</p>\n\"\"\")\n#, width=1500, height=500)\ndiv2 = Div(text=\"\"\"\n<h1>4. Cases and deaths % by race to their population %</h1>\n<p>You can see the number when you mouse over the bars.</p>\n<p>You can select the date by the \"Select Date\" button.</p>\n<p><strong>No record</strong> if there is no date in the button.</p>\n\"\"\")\n\ncurdoc().add_root(column(div1, plot1, div2, row(plot2, date_select)))\ncurdoc().title = \"California Coronavirus Dashboard\"\n# bokeh serve --show app.py\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime" ] ]
Tridentflayer/structure_tester_project
[ "0c67e450f3c1cd29dd9385ce407cc1407d9b9251" ]
[ "mcculw-master/structure-tester/tests_and_examples/embeded_gui_example.py" ]
[ "###################################################################\n# #\n# PLOT A LIVE GRAPH (PyQt5) #\n# ----------------------------- #\n# EMBED A MATPLOTLIB ANIMATION INSIDE YOUR #\n# OWN GUI! #\n# #\n###################################################################\n\nimport sys\nimport os\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nimport functools\nimport numpy as np\nimport random as rd\nimport matplotlib\nmatplotlib.use(\"Qt5Agg\")\nfrom matplotlib.figure import Figure\nfrom matplotlib.animation import TimedAnimation\nfrom matplotlib.lines import Line2D\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nimport time\nimport threading\n\nclass CustomMainWindow(QMainWindow):\n def __init__(self):\n super(CustomMainWindow, self).__init__()\n # Define the geometry of the main window\n self.setGeometry(300, 300, 800, 400)\n self.setWindowTitle(\"my first window\")\n # Create FRAME_A\n self.FRAME_A = QFrame(self)\n self.FRAME_A.setStyleSheet(\"QWidget { background-color: %s }\" % QColor(210,210,235,255).name())\n self.LAYOUT_A = QGridLayout()\n self.FRAME_A.setLayout(self.LAYOUT_A)\n self.setCentralWidget(self.FRAME_A)\n # Place the zoom button\n self.zoomBtn = QPushButton(text = 'zoom')\n self.zoomBtn.setFixedSize(100, 50)\n self.zoomBtn.clicked.connect(self.zoomBtnAction)\n self.LAYOUT_A.addWidget(self.zoomBtn, *(0,0))\n # Place the matplotlib figure\n self.myFig = CustomFigCanvas()\n self.LAYOUT_A.addWidget(self.myFig, *(0,1))\n # Add the callbackfunc to ..\n myDataLoop = threading.Thread(name = 'myDataLoop', target = dataSendLoop, daemon = True, args = (self.addData_callbackFunc,))\n myDataLoop.start()\n self.show()\n return\n\n def zoomBtnAction(self):\n print(\"zoom in\")\n self.myFig.zoomIn(5)\n return\n\n def addData_callbackFunc(self, value):\n # print(\"Add data: \" + str(value))\n self.myFig.addData(value)\n return\n\n''' End Class '''\n\n\nclass CustomFigCanvas(FigureCanvas, TimedAnimation):\n def __init__(self):\n self.addedData = []\n print(matplotlib.__version__)\n # The data\n self.xlim = 200\n self.n = np.linspace(0, self.xlim - 1, self.xlim)\n a = []\n b = []\n a.append(2.0)\n a.append(4.0)\n a.append(2.0)\n b.append(4.0)\n b.append(3.0)\n b.append(4.0)\n self.y = (self.n * 0.0) + 50\n # The window\n self.fig = Figure(figsize=(5,5), dpi=100)\n self.ax1 = self.fig.add_subplot(111)\n # self.ax1 settings\n self.ax1.set_xlabel('time')\n self.ax1.set_ylabel('raw data')\n self.line1 = Line2D([], [], color='blue')\n self.line1_tail = Line2D([], [], color='red', linewidth=2)\n self.line1_head = Line2D([], [], color='red', marker='o', markeredgecolor='r')\n self.ax1.add_line(self.line1)\n self.ax1.add_line(self.line1_tail)\n self.ax1.add_line(self.line1_head)\n self.ax1.set_xlim(0, self.xlim - 1)\n self.ax1.set_ylim(0, 100)\n FigureCanvas.__init__(self, self.fig)\n TimedAnimation.__init__(self, self.fig, interval = 50, blit = True)\n return\n\n def new_frame_seq(self):\n return iter(range(self.n.size))\n\n def _init_draw(self):\n lines = [self.line1, self.line1_tail, self.line1_head]\n for l in lines:\n l.set_data([], [])\n return\n\n def addData(self, value):\n self.addedData.append(value)\n return\n\n def zoomIn(self, value):\n bottom = self.ax1.get_ylim()[0]\n top = self.ax1.get_ylim()[1]\n bottom += value\n top -= value\n self.ax1.set_ylim(bottom,top)\n self.draw()\n return\n\n def _step(self, *args):\n # Extends the _step() method for the TimedAnimation class.\n try:\n TimedAnimation._step(self, *args)\n except Exception as e:\n self.abc += 1\n print(str(self.abc))\n TimedAnimation._stop(self)\n pass\n return\n\n def _draw_frame(self, framedata):\n margin = 2\n while(len(self.addedData) > 0):\n self.y = np.roll(self.y, -1)\n self.y[-1] = self.addedData[0]\n del(self.addedData[0])\n\n self.line1.set_data(self.n[ 0 : self.n.size - margin ], self.y[ 0 : self.n.size - margin ])\n self.line1_tail.set_data(np.append(self.n[-10:-1 - margin], self.n[-1 - margin]), np.append(self.y[-10:-1 - margin], self.y[-1 - margin]))\n self.line1_head.set_data(self.n[-1 - margin], self.y[-1 - margin])\n self._drawn_artists = [self.line1, self.line1_tail, self.line1_head]\n return\n\n''' End Class '''\n\n\n# You need to setup a signal slot mechanism, to\n# send data to your GUI in a thread-safe way.\n# Believe me, if you don't do this right, things\n# go very very wrong..\nclass Communicate(QObject):\n data_signal = pyqtSignal(float)\n\n''' End Class '''\n\n\n\ndef datasendLoop(addData_callbackFunc):\n # Setup the signal-slot mechanism.\n mySrc = Communicate()\n mySrc.data_signal.connect(addData_callbackFunc)\n\n # Simulate some data\n n = np.linspace(0, 499, 500)\n y = 50 + 25*(np.sin(n / 8.3)) + 10*(np.sin(n / 7.5)) - 5*(np.sin(n / 1.5))\n i = 0\n\n while(True):\n if(i > 499):\n i = 0\n time.sleep(0.1)\n mySrc.data_signal.emit(y[i]) # <- Here you emit a signal!\n i += 1\n ###\n###\n\nif __name__== '__main__':\n app = QApplication(sys.argv)\n QApplication.setStyle(QStyleFactory.create('Plastique'))\n myGUI = CustomMainWindow()\n sys.exit(app.exec_())" ]
[ [ "matplotlib.lines.Line2D", "numpy.roll", "numpy.append", "matplotlib.animation.TimedAnimation._stop", "matplotlib.figure.Figure", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__", "matplotlib.animation.TimedAnimation._step", "matplotlib.use", "numpy.sin", "numpy.linspace", "matplotlib.animation.TimedAnimation.__init__" ] ]
alvarofpp/ufrn-imd1130-nosql
[ "6abf0553befa8fd914e1fd19446f6ccf5c35ba05" ]
[ "trabalho_final/postgres/geo_postgres.py" ]
[ "import psycopg2;\nimport time;\nimport numpy as np\n\ncon = psycopg2.connect(\n host = \"localhost\",\n database = \"mydb\",\n user = \"brunnom\",\n password = \"postgres\"\n)\n\ncur = con.cursor();\n\ntime1km = []\nqtd1km = 0;\ntime15km = []\nqtd15km = 0;\ntime2km = []\nqtd2km = 0;\ntime25km = []\nqtd25km = 0;\ntime3km = []\nqtd3km = 0;\n\n#1KM\nfor i in range (1000):\n start = time.time() \n cur.execute(\"SELECT * FROM lugares WHERE ST_DistanceSphere(geo, ST_MakePoint(-35.2084091, -5.8117310)) <= 1000\");\n rows = cur.fetchall();\n end = time.time()\n time1km.append(end - start)\n\nqtd1km = len(rows);\ntime1km = np.average(time1km);\nprint(\"1KM - Quantidade de pontos: \", qtd1km, \" - Média de tempo: \", time1km);\n\n\n#1.5KM\nfor i in range (1000):\n start = time.time() \n cur.execute(\"SELECT * FROM lugares WHERE ST_DistanceSphere(geo, ST_MakePoint(-35.2084091, -5.8117310)) <= 1500\");\n rows = cur.fetchall();\n end = time.time()\n time15km.append(end - start)\n\nqtd15km = len(rows);\ntime15km = np.average(time15km);\nprint(\"1.5KM - Quantidade de pontos: \", qtd15km, \" - Média de tempo: \", time15km);\n\n\n#2KM\nfor i in range (1000):\n start = time.time() \n cur.execute(\"SELECT * FROM lugares WHERE ST_DistanceSphere(geo, ST_MakePoint(-35.2084091, -5.8117310)) <= 2000\");\n rows = cur.fetchall();\n end = time.time()\n time2km.append(end - start)\n\nqtd2km = len(rows);\ntime2km = np.average(time2km);\nprint(\"2KM - Quantidade de pontos: \", qtd2km, \" - Média de tempo: \", time2km);\n\n\n#2.5KM\nfor i in range (1000):\n start = time.time() \n cur.execute(\"SELECT * FROM lugares WHERE ST_DistanceSphere(geo, ST_MakePoint(-35.2084091, -5.8117310)) <= 2500\");\n rows = cur.fetchall();\n end = time.time()\n time25km.append(end - start)\n\nqtd25km = len(rows);\ntime25km = np.average(time25km);\nprint(\"2.5KM - Quantidade de pontos: \", qtd25km, \" - Média de tempo: \", time25km);\n\n\n#3KM\nfor i in range (1000):\n start = time.time() \n cur.execute(\"SELECT * FROM lugares WHERE ST_DistanceSphere(geo, ST_MakePoint(-35.2084091, -5.8117310)) <= 3000\");\n rows = cur.fetchall();\n end = time.time()\n time3km.append(end - start)\n\nqtd3km = len(rows);\ntime3km = np.average(time3km);\nprint(\"3KM - Quantidade de pontos: \", qtd3km, \" - Média de tempo: \", time3km);\n\ncur.close();\ncon.close();" ]
[ [ "numpy.average" ] ]
PolymerGuy/recon
[ "05b14f0834fa675579eabdf43fac046259df19bb" ]
[ "recolo/data_structures/read_abaqus_rpts.py" ]
[ "import os\nimport numpy as np\nfrom collections import namedtuple\nimport logging\nfrom natsort import natsorted\n\n\ndef list_files_in_folder(path, file_type=\".rpt\",abs_path=False):\n \"\"\" List all files with a given extension for a given path. The output is sorted\n Parameters\n ----------\n path : str\n Path to the folder containing the files\n file_type : str\n The file extension ex. \".rpt\"\n Returns\n -------\n list\n A list of sorted file names\n \"\"\"\n if abs_path:\n return natsorted([os.path.join(path,file) for file in os.listdir(path) if file.endswith(file_type)])\n else:\n return natsorted([file for file in os.listdir(path) if file.endswith(file_type)])\n\n\n\n\nAbaqusData = namedtuple(\"AbaqusSimulation\",\n [\"disp_fields\", \"accel_fields\", \"slope_x_fields\", \"slope_y_fields\", \"times\", \"plate_len_x\",\n \"plate_len_y\", \"npts_x\", \"npts_y\", \"pixel_size_x\", \"pixel_size_y\", \"sampling_rate\"])\n\n\ndef load_abaqus_rpts(path_to_rpts, use_only_img_ids=None):\n \"\"\"\n Load Abaqus RPT files into a AbaqusData object containing all relevant fields\n Parameters\n ----------\n path_to_rpts : str\n Path to the folder containing the files\n use_only_img_ids : list\n A list of file ids which should be included in the AbaqusData object\n Returns\n -------\n abaqusData : AbaqusData\n The fields loaded from Abaqus\n \"\"\"\n logger = logging.getLogger(__name__)\n\n rpt_file_paths = list_files_in_folder(path_to_rpts, file_type=\".rpt\",abs_path=True)\n logger.info(\"Reading %i Abaqus .rpt files\" % len(rpt_file_paths))\n\n disp_fields = []\n slope_x_fields = []\n slope_y_fields = []\n accel_fields = []\n times = []\n\n if use_only_img_ids is not None:\n rpt_file_paths = [path for i, path in enumerate(rpt_file_paths) if i in use_only_img_ids]\n\n for file_name in rpt_file_paths:\n logger.info(\"Reading: %s \" % file_name)\n path_to_rpt = os.path.join(path_to_rpts, file_name)\n field_data = np.genfromtxt(path_to_rpt, dtype=float,\n skip_header=19)\n\n time = np.genfromtxt(path_to_rpt, dtype=str, skip_header=8, max_rows=1)[-1]\n\n node_label = field_data[:, 0]\n node_coord_x = field_data[:, 1]\n node_coord_y = field_data[:, 2]\n node_disp_z = field_data[:, 3]\n node_acceleration_z = field_data[:, 4]\n node_slope_x = field_data[:, 5]\n node_slope_y = field_data[:, 6]\n\n # All data is assumed to be sampled on a square grid\n seed = int(node_disp_z.size ** 0.5)\n\n plate_len_x = (node_coord_x.max() - node_coord_x.min()) * 1e-3\n plate_len_y = (node_coord_y.max() - node_coord_y.min()) * 1e-3\n\n disp_field = -node_disp_z.reshape((seed, seed)) * 1e-3\n accel_field = -node_acceleration_z.reshape((seed, seed)) * 1e-3\n slope_x_field = -node_slope_x.reshape((seed, seed)) * 1e-3\n slope_y_field = -node_slope_y.reshape((seed, seed)) * 1e-3\n\n disp_fields.append(disp_field)\n accel_fields.append(accel_field)\n times.append(float(time))\n slope_x_fields.append(slope_x_field)\n slope_y_fields.append(slope_y_field)\n npts_x = np.shape(disp_fields)[1]\n npts_y = np.shape(disp_fields)[2]\n pixel_size_x = plate_len_x / float(npts_x)\n pixel_size_y = plate_len_y / float(npts_y)\n sampling_rate = 1. / (times[1] - times[0])\n\n return AbaqusData(np.array(disp_fields), np.array(accel_fields), np.array(slope_x_fields), np.array(slope_y_fields),\n np.array(times), plate_len_x, plate_len_y, npts_x, npts_y, pixel_size_x, pixel_size_y,\n sampling_rate)\n" ]
[ [ "numpy.array", "numpy.genfromtxt", "numpy.shape" ] ]
Jaskaran197/Red-blood-cell-detection-SSD
[ "a33b330ad17454a7425aa7f57818c0a41b4e0ff9" ]
[ "utils/training_utils/ssd_vgg16.py" ]
[ "import os\nfrom losses import SSD_LOSS\nfrom utils import data_utils\nfrom networks import SSD_VGG16\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import SGD, Adam\nfrom data_generators import SSD_DATA_GENERATOR\nfrom tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, TerminateOnNaN, LearningRateScheduler\nfrom tensorflow.keras.applications.vgg16 import preprocess_input\n\n\ndef ssd_vgg16(config, args, callbacks):\n training_config = config[\"training\"]\n with open(args.label_maps, \"r\") as label_map_file:\n label_maps = [i.strip(\"\\n\") for i in label_map_file.readlines()]\n\n training_samples = data_utils.get_samples_from_split(\n split_file=args.training_split,\n images_dir=args.images_dir,\n labels_dir=args.labels_dir\n )\n\n if args.validation_split is not None:\n validation_samples = data_utils.get_samples_from_split(\n split_file=args.validation_split,\n images_dir=args.images_dir,\n labels_dir=args.labels_dir\n )\n\n training_data_generator = SSD_DATA_GENERATOR(\n samples=training_samples,\n config=config,\n label_maps=label_maps,\n shuffle=args.shuffle,\n batch_size=args.batch_size,\n augment=args.augment,\n process_input_fn=preprocess_input\n )\n\n if args.validation_split is not None:\n print(\"-- validation split specified\")\n validation_data_generator = SSD_DATA_GENERATOR(\n samples=validation_samples,\n config=config,\n label_maps=label_maps,\n shuffle=args.shuffle,\n batch_size=args.batch_size,\n augment=False,\n process_input_fn=preprocess_input\n )\n\n loss = SSD_LOSS(\n alpha=training_config[\"alpha\"],\n min_negative_boxes=training_config[\"min_negative_boxes\"],\n negative_boxes_ratio=training_config[\"negative_boxes_ratio\"]\n )\n\n if training_config[\"optimizer\"][\"name\"] == \"adam\":\n optimizer = Adam(\n learning_rate=args.learning_rate,\n beta_1=training_config[\"optimizer\"][\"beta_1\"],\n beta_2=training_config[\"optimizer\"][\"beta_2\"],\n epsilon=training_config[\"optimizer\"][\"epsilon\"],\n decay=training_config[\"optimizer\"][\"decay\"]\n )\n elif training_config[\"optimizer\"][\"name\"] == \"sgd\":\n optimizer = SGD(\n learning_rate=args.learning_rate,\n momentum=training_config[\"optimizer\"][\"momentum\"],\n decay=training_config[\"optimizer\"][\"decay\"],\n nesterov=training_config[\"optimizer\"][\"nesterov\"]\n )\n else:\n optimizer = Adam(\n learning_rate=args.learning_rate,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-08,\n decay=0.0\n )\n\n model = SSD_VGG16(\n config=config,\n label_maps=label_maps,\n is_training=True\n )\n\n if args.show_network_structure:\n model.summary()\n\n model.compile(\n optimizer=optimizer,\n loss=loss.compute\n )\n\n if args.checkpoint is not None:\n assert os.path.exists(args.checkpoint), \"checkpoint does not exist\"\n model.load_weights(args.checkpoint, by_name=True)\n\n model.fit(\n x=training_data_generator,\n validation_data=validation_data_generator if args.validation_split is not None else None,\n batch_size=args.batch_size,\n validation_batch_size=args.batch_size,\n epochs=args.epochs,\n initial_epoch=args.initial_epoch,\n callbacks=callbacks,\n )\n\n model.save_weights(os.path.join(args.output_dir, \"model.h5\"))\n" ]
[ [ "tensorflow.keras.optimizers.Adam", "tensorflow.keras.optimizers.SGD" ] ]
qipeng/cudamat
[ "a346369447e9b2dbb730e4218a4c0eaa153840ef" ]
[ "test_learn.py" ]
[ "import pdb\nimport numpy as np\nimport nose\nimport cudamat as cm\nimport learn as cl\n\ndef setup():\n cm.cublas_init()\n\ndef teardown():\n cm.cublas_shutdown()\n\ndef test_mult_by_sigmoid_deriv():\n m = 256\n n = 128\n c_targets = np.array(np.random.randn(m, n)*10, dtype=np.float32, order='F')\n c_acts = np.array(np.random.rand(m, n), dtype=np.float32, order='F')\n\n g_targets = cm.CUDAMatrix(c_targets)\n g_acts = cm.CUDAMatrix(c_acts)\n\n c_targets = c_targets * c_acts * (1. - c_acts)\n cl.mult_by_sigmoid_deriv(g_targets, g_acts)\n\n assert np.max(np.abs(c_acts - g_acts.asarray())) < 10**-2, \"Error in cudamat.learn.mult_by_sigmoid_deriv exceeded threshold\"\n\nif __name__ == '__main__':\n nose.runmodule()\n" ]
[ [ "numpy.random.randn", "numpy.random.rand" ] ]
GeniusDog/Intelligent-Projects-Using-Python
[ "ca4650abb0c477b28a5698032835ea993cb08bd4" ]
[ "Chapter04/cycledGAN_edges_to_bags.py" ]
[ "from __future__ import print_function, division\n#import scipy\nimport tensorflow as tf\nimport datetime\nimport matplotlib.pyplot as plt\n#import sys\n#from data_loader import DataLoader\nimport numpy as np\nimport os\nimport time \nimport glob\nfrom scipy.misc import imread,imresize,imsave\nimport copy\nimport fire\nfrom elapsedtimer import ElapsedTimer\n\n\ndef load_train_data(image_path, load_size=64,fine_size=64, is_testing=False):\n img_A = imread(image_path[0])\n img_B = imread(image_path[1])\n \n if not is_testing:\n img_A = imresize(img_A, [load_size, load_size])\n img_B = imresize(img_B, [load_size, load_size])\n # h1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size)))\n # w1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size)))\n # img_A = img_A[h1:h1+fine_size, w1:w1+fine_size]\n # img_B = img_B[h1:h1+fine_size, w1:w1+fine_size]\n\n if np.random.random() > 0.5:\n img_A = np.fliplr(img_A)\n img_B = np.fliplr(img_B)\n else:\n img_A = imresize(img_A, [fine_size, fine_size])\n img_B = imresize(img_B, [fine_size, fine_size])\n\n img_A = img_A/127.5 - 1 \n img_B = img_B/127.5 - 1 \n\n img_AB = np.concatenate((img_A, img_B), axis=2)\n \n return img_AB\n\ndef merge(images, size):\n h, w = images.shape[1], images.shape[2]\n img = np.zeros((h * size[0], w * size[1], 3))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j*h:j*h+h, i*w:i*w+w, :] = image\n\n return img\n\ndef image_save(images, size, path):\n return imsave(path, merge(images, size))\n\ndef save_images(images, size, image_path):\n return image_save(inverse_transform(images),size, image_path)\n\ndef inverse_transform(images):\n return (images + 1)*127.5\n\n\nclass ImagePool(object):\n def __init__(self, maxsize=50):\n self.maxsize = maxsize\n self.num_img = 0\n self.images = []\n\n def __call__(self, image):\n if self.maxsize <= 0:\n return image\n if self.num_img < self.maxsize:\n self.images.append(image)\n self.num_img += 1\n return image\n if np.random.rand() > 0.5:\n idx = int(np.random.rand()*self.maxsize)\n tmp1 = copy.copy(self.images[idx])[0]\n self.images[idx][0] = image[0]\n idx = int(np.random.rand()*self.maxsize)\n tmp2 = copy.copy(self.images[idx])[1]\n self.images[idx][1] = image[1]\n return [tmp1, tmp2]\n else:\n\t return image\n\n\nclass DiscoGAN():\n \n def __init__(self,dataset_dir,epochs=200):\n # Input shape\n self.dataset_dir = dataset_dir\n self.lambda_l2 = 1.0\n self.image_size = 64\n self.input_dim = 3\n self.output_dim = 3\n self.batch_size = 64 \n self.df = 64\n self.gf = 64\n self.channels = 3\n self.output_c_dim = 3\n self.l_r = 2e-4\n self.beta1 = 0.5\n self.beta2 = 0.99\n self.weight_decay = 0.00001\n self.epoch = epochs\n self.train_size = 10000\n self.epoch_step = 10\n self.load_size = 64\n self.fine_size = 64 \n self.checkpoint_dir = 'checkpoint'\n self.sample_dir = 'sample'\n self.print_freq = 5\n self.save_freq = 10 \n self.pool = ImagePool()\n \n return None\n \n\n def build_generator(self,image,reuse=False,name='generator'):\n \n with tf.variable_scope(name):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n \n \"\"\"U-Net Generator\"\"\"\n def lrelu(x, alpha,name='lrelu'):\n with tf.variable_scope(name):\n return tf.nn.relu(x) - alpha * tf.nn.relu(-x)\n \n def instance_norm(x,name='instance_norm'):\n\n with tf.variable_scope(name):\n \n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n \n epsilon = 1e-5\n mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)\n scale = tf.get_variable('scale',[x.get_shape()[-1]], \n initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))\n offset = tf.get_variable('offset',[x.get_shape()[-1]],initializer=tf.constant_initializer(0.0))\n out = scale*tf.div(x-mean, tf.sqrt(var+epsilon)) + offset\n return out\n \n \n def common_conv2d(layer_input,filters,f_size=4,stride=2,padding='SAME',norm=True,name='common_conv2d'):\n \n \"\"\"Layers used during downsampling\"\"\"\n with tf.variable_scope(name):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n \n d = tf.contrib.layers.conv2d(layer_input,filters,kernel_size=f_size,stride=stride,padding=padding)\n \n if norm:\n d = tf.contrib.layers.batch_norm(d)\n d = lrelu(d,alpha=0.2)\n return d\n \n #def common_deconv2d(layer_input,skip_input, filters,f_size=4,stride=2,dropout_rate=0,name='common_deconv2d'):\n def common_deconv2d(layer_input,filters,f_size=4,stride=2,padding='SAME',dropout_rate=0,name='common_deconv2d'):\n \"\"\"Layers used during upsampling\"\"\"\n with tf.variable_scope(name):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n\n\n u = tf.contrib.layers.conv2d_transpose(layer_input,filters,f_size,stride=stride,padding=padding)\n \n if dropout_rate:\n u = tf.contrib.layers.dropout(u,keep_prob=dropout_rate)\n u = tf.contrib.layers.batch_norm(u)\n u = tf.nn.relu(u)\n # u = tf.contrib.keras.layers.concatenate([skip_input,u])\n return u \n \n \n \n # Downsampling\n dwn1 = common_conv2d(image,self.gf,stride=2,norm=False,name='dwn1') # 64x64 -> 32x32\n\t #print('dwn1',np.shape(dwn1))\n dwn2 = common_conv2d(dwn1,self.gf*2,stride=2,name='dwn2') # 32x32 -> 16x16\n\t #print('dwn2',np.shape(dwn2))\n dwn3 = common_conv2d(dwn2,self.gf*4,stride=2,name='dwn3') # 16x16 -> 8x8\n\t # print('dwn3',np.shape(dwn3))\n dwn4 = common_conv2d(dwn3,self.gf*8,stride=2,name='dwn4') # 8x8 -> 4x4 \n\t # print('dwn4',np.shape(dwn4))\n dwn5 = common_conv2d(dwn4,100,stride=1,padding='valid',name='dwn5') # 4x4 -> 1x1 \n # print('dwn5',np.shape(dwn5))\n \n # Upsampling\n up1 = common_deconv2d(dwn5,self.gf*8,stride=1,padding='valid',name='up1') # 16x16 -> 16x16 \n #print(np.shape(up1))\n up2 = common_deconv2d(up1,self.gf*4,name='up2') # 16x16 -> 32x32\n up3 = common_deconv2d(up2,self.gf*2,name='up3') # 32x32 -> 64x64\n up4 = common_deconv2d(up3,self.gf,name='up4') # 64x64 -> 128x128 \n\t\t\t\n out_img = tf.contrib.layers.conv2d_transpose(up4,self.channels,kernel_size=4,stride=2,padding='SAME',activation_fn=tf.nn.tanh) # 128x128 -> 256x256\n #print('out_img',(np.shape(out_img))) \n \n return out_img\n\n def build_discriminator(self,image,reuse=False,name='discriminator'):\n \n \n with tf.variable_scope(name):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n \n def lrelu(x, alpha,name='lrelu'):\n\t\t\t\n with tf.variable_scope(name):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n\n return tf.nn.relu(x) - alpha * tf.nn.relu(-x)\n \n def instance_norm(x,name='instance_norm'):\n\t\t\t\n with tf.variable_scope(name):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n\n \n epsilon = 1e-5\n mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)\n scale = tf.get_variable('scale',[x.get_shape()[-1]], \n initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))\n offset = tf.get_variable('offset',[x.get_shape()[-1]],initializer=tf.constant_initializer(0.0))\n out = scale*tf.div(x-mean, tf.sqrt(var+epsilon)) + offset\n return out\n \n \n def d_layer(layer_input,filters,f_size=4,stride=2,norm=True,name='d_layer'):\n \"\"\"Discriminator layer\"\"\"\n with tf.variable_scope(name):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n else:\n assert tf.get_variable_scope().reuse is False\n\n d = tf.contrib.layers.conv2d(layer_input,filters,kernel_size=f_size,stride=2, padding='SAME')\n if norm:\n d = tf.contrib.layers.batch_norm(d)\n d = lrelu(d,alpha=0.2)\n return d\n \n \n down1 = d_layer(image,self.df, norm=False,name='down1') #256x256 -> 128x128\n #rint('down1',np.shape(down1))\n down2 = d_layer(down1,self.df*2,name='down2') #128x128 -> 64x64\n #rint('down2',np.shape(down2))\n down3 = d_layer(down2,self.df*4,name='down3') #64x64 -> 32x32 \n #rint('down3',np.shape(down3))\n down4 = d_layer(down3,self.df*8,name='down4') # 32x32 -> 16x16\n #rint('down4',np.shape(down4))\n \n down5 = tf.contrib.layers.conv2d(down4,1,kernel_size=4,stride=1,padding='valid')\n #rint('down5',np.shape(down5)) \n #rint(np.shape(down5))\n \n #logits = tf.reduce_mean(down5, [1,2,3])\n \n return down5\n \n def build_network(self):\n \n def squared_loss(y_pred,labels):\n return tf.reduce_mean((y_pred - labels)**2)\n \n def abs_loss(y_pred,labels):\n return tf.reduce_mean(tf.abs(y_pred - labels)) \n\n\n def binary_cross_entropy_loss(logits,labels):\n return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,logits=logits))\n \n self.images_real = tf.placeholder(tf.float32,[None,self.image_size,self.image_size,self.input_dim + self.output_dim])\n \n self.image_real_A = self.images_real[:,:,:,:self.input_dim]\n self.image_real_B = self.images_real[:,:,:,self.input_dim:self.input_dim + self.output_dim]\n self.images_fake_B = self.build_generator(self.image_real_A,reuse=False,name='generator_AB')\n self.images_fake_A = self.build_generator(self.images_fake_B,reuse=False,name='generator_BA')\n self.images_fake_A_ = self.build_generator(self.image_real_B,reuse=True,name='generator_BA')\n self.images_fake_B_ = self.build_generator(self.images_fake_A_,reuse=True,name='generator_AB')\n \n self.D_B_fake = self.build_discriminator(self.images_fake_B ,reuse=False, name=\"discriminatorB\")\n self.D_A_fake = self.build_discriminator(self.images_fake_A_,reuse=False, name=\"discriminatorA\") \n\n self.D_B_real = self.build_discriminator(self.image_real_B,reuse=True, name=\"discriminatorB\")\n self.D_A_real = self.build_discriminator(self.image_real_A,reuse=True, name=\"discriminatorA\")\n\n \n \n self.loss_GABA = self.lambda_l2*squared_loss(self.images_fake_A,self.image_real_A) + binary_cross_entropy_loss(labels=tf.ones_like(self.D_B_fake),logits=self.D_B_fake)\n self.loss_GBAB = self.lambda_l2*squared_loss(self.images_fake_B_,self.image_real_B) + binary_cross_entropy_loss(labels=tf.ones_like(self.D_A_fake),logits=self.D_A_fake)\n self.generator_loss = self.loss_GABA + self.loss_GBAB\n \n \n self.D_B_loss_real = binary_cross_entropy_loss(tf.ones_like(self.D_B_real),self.D_B_real)\n self.D_B_loss_fake = binary_cross_entropy_loss(tf.zeros_like(self.D_B_fake),self.D_B_fake)\n self.D_B_loss = (self.D_B_loss_real + self.D_B_loss_fake) / 2.0\n \n \n self.D_A_loss_real = binary_cross_entropy_loss(tf.ones_like(self.D_A_real),self.D_A_real)\n self.D_A_loss_fake = binary_cross_entropy_loss(tf.zeros_like(self.D_A_fake),self.D_A_fake)\n self.D_A_loss = (self.D_A_loss_real + self.D_A_loss_fake) / 2.0\n \n self.discriminator_loss = self.D_B_loss + self.D_A_loss\n \n self.loss_GABA_sum = tf.summary.scalar(\"g_loss_a2b\", self.loss_GABA)\n self.loss_GBAB_sum = tf.summary.scalar(\"g_loss_b2a\", self.loss_GBAB)\n self.g_total_loss_sum = tf.summary.scalar(\"g_loss\", self.generator_loss)\n self.g_sum = tf.summary.merge([self.loss_GABA_sum,self.loss_GBAB_sum,self.g_total_loss_sum])\n \n self.loss_db_sum = tf.summary.scalar(\"db_loss\", self.D_B_loss)\n self.loss_da_sum = tf.summary.scalar(\"da_loss\", self.D_A_loss)\n self.loss_d_sum = tf.summary.scalar(\"d_loss\",self.discriminator_loss)\n \n self.db_loss_real_sum = tf.summary.scalar(\"db_loss_real\", self.D_B_loss_real)\n self.db_loss_fake_sum = tf.summary.scalar(\"db_loss_fake\", self.D_B_loss_fake)\n self.da_loss_real_sum = tf.summary.scalar(\"da_loss_real\", self.D_A_loss_real)\n self.da_loss_fake_sum = tf.summary.scalar(\"da_loss_fake\", self.D_A_loss_fake)\n self.d_sum = tf.summary.merge(\n [self.loss_da_sum, self.da_loss_real_sum, self.da_loss_fake_sum,\n self.loss_db_sum, self.db_loss_real_sum, self.db_loss_fake_sum,\n self.loss_d_sum]\n )\n\n \n trainable_variables = tf.trainable_variables()\n \n self.d_variables = [var for var in trainable_variables if 'discriminator' in var.name]\n self.g_variables = [var for var in trainable_variables if 'generator' in var.name]\n \n print ('Variable printing start :' )\n for var in self.d_variables: \n print(var.name)\n \n self.test_image_A = tf.placeholder(tf.float32,[None, self.image_size,self.image_size,self.input_dim], name='test_A')\n self.test_image_B = tf.placeholder(tf.float32,[None, self.image_size, self.image_size,self.output_c_dim], name='test_B')\n self.saver = tf.train.Saver()\n \n \n def train_network(self):\n \n self.learning_rate = tf.placeholder(tf.float32)\n self.d_optimizer = tf.train.AdamOptimizer(self.learning_rate,beta1=self.beta1,beta2=self.beta2).minimize(self.discriminator_loss,var_list=self.d_variables)\n self.g_optimizer = tf.train.AdamOptimizer(self.learning_rate,beta1=self.beta1,beta2=self.beta2).minimize(self.generator_loss,var_list=self.g_variables) \n \n self.init_op = tf.global_variables_initializer()\n self.sess = tf.Session()\n self.sess.run(self.init_op)\n #self.dataset_dir = '/home/santanu/Downloads/DiscoGAN/edges2handbags/train/'\n self.writer = tf.summary.FileWriter(\"./logs\", self.sess.graph)\n count = 1\n start_time = time.time()\n \n for epoch in range(self.epoch):\n data_A = os.listdir(self.dataset_dir + 'trainA/')\n data_B = os.listdir(self.dataset_dir + 'trainB/')\n data_A = [ (self.dataset_dir + 'trainA/' + str(file_name)) for file_name in data_A ] \n\n data_B = [ (self.dataset_dir + 'trainB/' + str(file_name)) for file_name in data_B ] \n np.random.shuffle(data_A)\n np.random.shuffle(data_B)\n batch_ids = min(min(len(data_A), len(data_B)), self.train_size) // self.batch_size\n# lr = self.l_r if epoch < self.epoch_step else self.l_r*(self.epoch-epoch)/(self.epoch-self.epoch_step)\n lr = self.l_r if epoch < self.epoch_step else self.l_r*(self.epoch-epoch)/(self.epoch-self.epoch_step)\n \n for id_ in range(0, batch_ids):\n batch_files = list(zip(data_A[id_ * self.batch_size:(id_ + 1) * self.batch_size],\n data_B[id_ * self.batch_size:(id_ + 1) * self.batch_size]))\n batch_images = [load_train_data(batch_file, self.load_size, self.fine_size) for batch_file in batch_files]\n batch_images = np.array(batch_images).astype(np.float32)\n \n # Update G network and record fake outputs\n fake_A, fake_B, _, summary_str = self.sess.run(\n [self.images_fake_A_,self.images_fake_B,self.g_optimizer,self.g_sum],\n feed_dict={self.images_real: batch_images, self.learning_rate:lr})\n self.writer.add_summary(summary_str, count)\n [fake_A,fake_B] = self.pool([fake_A, fake_B])\n \n # Update D network\n _, summary_str = self.sess.run(\n [self.d_optimizer,self.d_sum],\n feed_dict={self.images_real: batch_images,\n # self.fake_A_sample: fake_A,\n # self.fake_B_sample: fake_B,\n self.learning_rate: lr})\n self.writer.add_summary(summary_str, count)\n \n count += 1\n print((\"Epoch: [%2d] [%4d/%4d] time: %4.4f\" % (\n epoch, id_, batch_ids, time.time() - start_time)))\n \n if count % self.print_freq == 1:\n self.sample_model(self.sample_dir, epoch, id_)\n \n if count % self.save_freq == 2:\n self.save_model(self.checkpoint_dir, count)\n\n \n \n def save_model(self,checkpoint_dir,step):\n model_name = \"cyclegan.model\"\n model_dir = \"%s_%s\" % (self.dataset_dir, self.image_size)\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n \n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n \n self.saver.save(self.sess,\n os.path.join(checkpoint_dir, model_name),\n global_step=step)\n\n def load_model(self,checkpoint_dir):\n \n print(\" [*] Reading checkpoint...\")\n \n model_dir = \"%s_%s\" % (self.dataset_dir, self.image_size)\n checkpoint_dir = os.path.join(checkpoint_dir, model_dir)\n \n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))\n return True\n else:\n return False\n \n \n \n def sample_model(self, sample_dir, epoch, id_):\n if not os.path.exists(sample_dir):\n os.makedirs(sample_dir)\n\n \n data_A = os.listdir(self.dataset_dir + 'trainA/')\n data_B = os.listdir(self.dataset_dir + 'trainB/') \n data_A = [ (self.dataset_dir + 'trainA/' + str(file_name)) for file_name in data_A ]\n data_B = [ (self.dataset_dir + 'trainB/' + str(file_name)) for file_name in data_B ]\n \n\n np.random.shuffle(data_A)\n np.random.shuffle(data_B)\n batch_files = list(zip(data_A[:self.batch_size], data_B[:self.batch_size]))\n sample_images = [load_train_data(batch_file, is_testing=True) for batch_file in batch_files]\n sample_images = np.array(sample_images).astype(np.float32)\n\n fake_A, fake_B = self.sess.run(\n [self.images_fake_A_,self.images_fake_B],\n feed_dict={self.images_real: sample_images}\n )\n save_images(fake_A, [self.batch_size, 1],\n './{}/A_{:02d}_{:04d}.jpg'.format(sample_dir, epoch, id_))\n save_images(fake_B, [self.batch_size, 1],\n './{}/B_{:02d}_{:04d}.jpg'.format(sample_dir, epoch, id_))\n\n def process_main(self):\n self.build_network()\n self.train_network()\n\n\nif __name__ == '__main__':\n with ElapsedTimer('DiscoGAN'):\n fire.Fire(DiscoGAN)\n \n \n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.contrib.layers.batch_norm", "tensorflow.variable_scope", "tensorflow.abs", "tensorflow.get_variable_scope", "tensorflow.summary.merge", "tensorflow.summary.FileWriter", "tensorflow.global_variables_initializer", "tensorflow.truncated_normal_initializer", "numpy.fliplr", "scipy.misc.imresize", "tensorflow.contrib.layers.conv2d", "numpy.random.rand", "tensorflow.contrib.layers.conv2d_transpose", "scipy.misc.imread", "tensorflow.nn.relu", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.constant_initializer", "numpy.zeros", "tensorflow.ones_like", "tensorflow.contrib.layers.dropout", "tensorflow.zeros_like", "tensorflow.train.Saver", "tensorflow.Session", "tensorflow.nn.moments", "tensorflow.placeholder", "numpy.random.shuffle", "tensorflow.train.get_checkpoint_state", "tensorflow.train.AdamOptimizer", "tensorflow.reduce_mean", "tensorflow.sqrt", "tensorflow.trainable_variables", "numpy.random.random", "numpy.array", "numpy.concatenate" ] ]
mguo123/pan_omics
[ "e1cacd543635b398fb08c0b31d08fa6b7c389658" ]
[ "src/rgt/motifanalysis/Motif.py" ]
[ "###################################################################################################\n# Libraries\n###################################################################################################\n\nfrom __future__ import division\n# Python 3 compatibility\nfrom __future__ import print_function\n\n# Python\nfrom os.path import basename\n\n# External\nfrom MOODS import tools, parsers\nfrom numpy import argmax\n\n\n# Internal\n\n\n###################################################################################################\n# Classes\n###################################################################################################\n\n\nclass Motif:\n \"\"\"\n Represent a DNA binding affinity motif.\n \"\"\"\n\n def __init__(self, input_file_name, pseudocounts, threshold):\n \"\"\" \n Initializes Motif.\n\n Fields:\n pfm -- Position Frequency Matrix.\n bg -- Background frequencies.\n pssm -- Position Specific Scoring Matrix.\n alphabet -- A list of letters, eg [\"Aa\", \"Cc\", \"Gg\", \"Tt\"]\n threshold -- Motif matching threshold.\n len -- Length of the motif.\n max -- Maximum PSSM score possible.\n is_palindrome -- True if consensus is biologically palindromic.\n \"\"\"\n\n # Initializing name\n self.name = \".\".join(basename(input_file_name).split(\".\")[:-1])\n\n # Creating PFM & PSSM\n self.pfm = parsers.pfm(str(input_file_name))\n self.bg = tools.flat_bg(len(self.pfm)) # total number of \"points\" to add, not per-row\n self.pssm = tools.log_odds(self.pfm, self.bg, pseudocounts, 2)\n self.pssm_rc = tools.reverse_complement(self.pssm)\n\n # how many bases this motif has\n self.len = len(self.pfm[0])\n\n # maximum value found in the whole PSSM\n self.max = max([max(e) for e in self.pssm])\n\n # we only support pure DNA or methylated DNA, for now.\n self.alphabet = [\"Aa\", \"Cc\", \"Gg\", \"Tt\"]\n if len(self.pfm) == 6:\n self.alphabet += [\"m\", \"1\"]\n\n self.threshold = threshold\n\n self.consensus = \"\".join([self.alphabet[i][0] for i in argmax(self.pssm, axis=0)])\n self.consensus_rc = \"\".join([self.alphabet[i][0] for i in argmax(self.pssm_rc, axis=0)])\n\n # Evaluating if motif is palindromic\n self.is_palindrome = self.consensus == self.consensus_rc\n" ]
[ [ "numpy.argmax" ] ]
james94/driverlessai-recipes
[ "87c35460db59ffda8dc18ad82cb3a9b8291410e4" ]
[ "transformers/executables/pe_imports_features.py" ]
[ "\"\"\"Extract LIEF features from PE files\"\"\"\nfrom h2oaicore.transformer_utils import CustomTransformer\nimport datatable as dt\nimport numpy as np\n\n\nclass PEImportsFeatures(CustomTransformer):\n _modules_needed_by_name = ['lief==0.9.0']\n _regression = True\n _binary = True\n _multiclass = True\n _is_reproducible = True\n _parallel_task = True # if enabled, params_base['n_jobs'] will be >= 1 (adaptive to system), otherwise 1\n _can_use_gpu = True # if enabled, will use special job scheduler for GPUs\n _can_use_multi_gpu = True # if enabled, can get access to multiple GPUs for single transformer (experimental)\n _numeric_output = True\n\n @staticmethod\n def get_default_properties():\n return dict(col_type=\"text\", min_cols=1, max_cols=1, relative_importance=1)\n\n @staticmethod\n def do_acceptance_test():\n return False\n\n def fit_transform(self, X: dt.Frame, y: np.array = None):\n return self.transform(X)\n\n def load_pe(self, file_path):\n with open(file_path, 'rb') as f:\n bytez = bytearray(f.read())\n return (bytez)\n\n def imports_features(self, lief_binary):\n from sklearn.feature_extraction import FeatureHasher\n\n imports = lief_binary.imports\n features = {}\n for lib in imports:\n if lib.name not in features:\n features[lib.name] = []\n for entry in lib.entries:\n if entry.is_ordinal:\n features[lib.name].append(\"ordinal\" + str(entry.ordinal))\n else:\n features[lib.name].append(entry.name[:10000])\n\n features_hashed = {}\n libraries = sorted(list(set([l.lower() for l in features.keys()])))\n for i, x in enumerate(FeatureHasher(256, input_type='string').transform([libraries]).toarray()[0]):\n features_hashed.update({f'Imports_libraries_hash_{i}': x})\n entries = sorted([lib.lower() + ':' + e for lib, elist in features.items() for e in elist])\n for i, x in enumerate(FeatureHasher(1024, input_type='string').transform([entries]).toarray()[0]):\n features_hashed.update({f'Imports_entries_hash_{i}': x})\n return features_hashed\n\n def get_imports_features(self, file_path):\n import lief\n try:\n pe_bytez = self.load_pe(file_path)\n lief_binary = lief.PE.parse(list(pe_bytez))\n X = self.imports_features(lief_binary)\n\n return X\n\n except:\n X = {f'Imports_libraries_hash_{i}': 0 for i in range(256)}\n X.update({f'Imports_entries_hash_{i}': 0 for i in range(1024)})\n return X\n\n def transform(self, X: dt.Frame):\n import pandas as pd\n\n ret_df = pd.DataFrame(\n [\n self.get_imports_features(x)\n for x in X.to_pandas().values[:, 0]\n ]\n )\n\n self._output_feature_names = ret_df.columns.to_list()\n self._feature_desc = self._output_feature_names\n\n return ret_df\n" ]
[ [ "sklearn.feature_extraction.FeatureHasher" ] ]
HanMeh/ABMT
[ "e2767bd29ad9e2da767948b5047cf7f287094c6b" ]
[ "demo.py" ]
[ "\"\"\"Main function of ABMT for the paper: Adversarial Brain Multiplex Prediction From a Single Brain Network with Application to Gender Fingerprinting\r\nView Network Normalization\r\n Details can be found in:\r\n (1) the original paper\r\n Ahmed Nebli, and Islem Rekik.\r\n ---------------------------------------------------------------------\r\n This file contains the implementation of three key steps of our netNorm framework:\r\n\r\n Inputs:\r\n sourceGraph: (N × m x m) matrix stacking the source graphs of all subjects\r\n (N × m x m) matrix stacking the target graphs of all subjects\r\n N the total number of views\r\n m the number of regions\r\n\r\n Output:\r\n Target graph: (N x m x m) matrix stacking predicted target graphs of all subjects\r\n (2) Dependencies: please install the following libraries:\r\n - TensorFlow\r\n - numpy\r\n - scikitlearn\r\n ---------------------------------------------------------------------\r\n Copyright 2020 Ahmed Nebli, Sousse University.\r\n Please cite the above paper if you use this code.\r\n All rights reserved.\r\n \"\"\"\r\nimport argparse\r\nimport os\r\nimport scipy.misc\r\nimport numpy as np\r\nfrom model_demo import graph2graph\r\nimport tensorflow as tf\r\nimport datetime\r\n\r\nparser = argparse.ArgumentParser(description='')\r\nparser.add_argument('--epoch', dest='epoch', type=int, default=1, help='# of epoch')\r\nparser.add_argument('--batch_size', dest='batch_size', type=int, default=40, help='# graphs in batch')\r\nparser.add_argument('--train_size', dest='train_size', type=int, default=1e8, help='# graphs used to train')\r\nparser.add_argument('--ngf', dest='ngf', type=int, default=200, help='# of gen filters in first conv layer')\r\nparser.add_argument('--ndf', dest='ndf', type=int, default=200, help='# of discri filters in first conv layer')\r\nparser.add_argument('--input_nc', dest='input_nc', type=int, default=40, help='# of input channels')\r\nparser.add_argument('--output_nc', dest='output_nc', type=int, default=40, help='# of output channels')\r\nparser.add_argument('--niter', dest='niter', type=int, default=1, help='# of iter at starting learning rate')\r\nparser.add_argument('--lr_d', dest='lr_d', type=float, default=0.0001, help='initial learning rate for adam')\r\nparser.add_argument('--lr_g', dest='lr_g', type=float, default=0.00005, help='initial learning rate for adam')\r\nparser.add_argument('--lr_c', dest='lr_c', type=float, default=0.001, help='intial learning rate for adam')\r\nparser.add_argument('--beta1', dest='beta1', type=float, default=0.5, help='m omentum term of adam')\r\nparser.add_argument('--flip', dest='flip', type=bool, default=True, help='if flip the graphs for data argumentation')\r\nparser.add_argument('--save_epoch_freq', dest='save_epoch_freq', type=int, default=2,\r\n help='save a model every save_epoch_freq epochs (does not overwrite previously saved models)')\r\nparser.add_argument('--save_latest_freq', dest='save_latest_freq', type=int, default=5,\r\n help='save the latest model every latest_freq sgd iterations (overwrites the previous latest model)')\r\nparser.add_argument('--print_freq', dest='print_freq', type=int, default=50, help='print the debug information every print_freq iterations')\r\nparser.add_argument('--continue_train', dest='continue_train', type=bool, default=False,\r\n help='if continue training, load the latest model: 1: true, 0: false')\r\nparser.add_argument('--serial_batches', dest='serial_batches', type=bool, default=False,\r\n help='f 1, takes graphsin order to make batches, otherwise takes them randomly')\r\nparser.add_argument('--serial_batch_iter', dest='serial_batch_iter', type=bool, default=True, help='iter into serial graph list')\r\nparser.add_argument('--checkpoint_dir', dest='checkpoint_dir', default='./checkpoint_auth_50',\r\n help='models are saved here,need to be distinguis hable for different dataset')\r\nparser.add_argument('--sample_dir', dest='sample_dir', default='./sample', help='sample are saved here')\r\nparser.add_argument('--test_dir', dest='test_dir', default='./validation_data_auth_50/',\r\n help='test sample are saved here, need to be distinguishable for different dataset')\r\nparser.add_argument('--L1_lambda', dest='L1_lambda', type=int, default=10000, help='weight on L1 term in objective')\r\nparser.add_argument('--train_dir', dest='train_dir', default='./', help='train sample are saved here')\r\nparser.add_argument('--graph_size', dest='graph_size', default=[35, 35], help='size of graph')\r\nparser.add_argument('--output_size', dest='output_size', default=[35, 35], help='size of graph')\r\nparser.add_argument('--dataset', dest='dataset', default='authentication', help='chose from authentication, scale-free and poisson-random')\r\nargs = parser.parse_args()\r\n\r\n\r\ndef main():\r\n start = datetime.datetime.now()\r\n if not os.path.exists(args.checkpoint_dir):\r\n os.makedirs(args.checkpoint_dir)\r\n if not os.path.exists(args.sample_dir):\r\n os.makedirs(args.sample_dir)\r\n if not os.path.exists(args.test_dir):\r\n os.makedirs(args.test_dir)\r\n if not os.path.exists(args.train_dir):\r\n os.makedirs(args.train_dir)\r\n tf.reset_default_graph()\r\n with tf.Session() as sess:\r\n model = graph2graph(sess, batch_size=args.batch_size,\r\n checkpoint_dir=args.checkpoint_dir, sample_dir=args.sample_dir, test_dir=args.test_dir, train_dir=args.train_dir,\r\n graph_size=args.graph_size, output_size=args.output_size, dataset=args.dataset)\r\n\r\n model.demo(args)\r\n end = datetime.datetime.now()\r\n print(end-start)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "tensorflow.reset_default_graph", "tensorflow.Session" ] ]
lkhphuc/pytorch-lightning
[ "6ebe0d7266fe29104f4c68dd9143326132885a30" ]
[ "pytorch_lightning/trainer/distrib_parts.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nRoot module for all distributed operations in Lightning.\nCurrently supports training on CPU, GPU (dp, ddp, ddp2, horovod) and TPU.\n\n\"\"\"\n\nfrom contextlib import ExitStack\nimport os\nfrom abc import ABC, abstractmethod\nimport time\nimport random\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom typing import Union, Callable, Any, List, Optional, Tuple, MutableSequence\n\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.overrides.data_parallel import (\n LightningDistributedDataParallel,\n LightningDataParallel,\n)\nfrom pytorch_lightning.utilities import move_data_to_device, AMPType\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.distributed import rank_zero_only\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\ntry:\n import torch_xla.core.xla_model as xm\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\ntry:\n import horovod.torch as hvd\nexcept (ModuleNotFoundError, ImportError):\n HOROVOD_AVAILABLE = False\nelse:\n HOROVOD_AVAILABLE = True\n\n\nclass TrainerDPMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n on_gpu: bool\n use_dp: bool\n use_ddp2: bool\n use_ddp: bool\n testing: bool\n use_single_gpu: bool\n root_gpu: ...\n amp_level: str\n precision: ...\n global_rank: int\n tpu_local_core_rank: int\n tpu_global_core_rank: int\n use_tpu: bool\n data_parallel_device_ids: ...\n progress_bar_callback: ...\n on_colab_kaggle: str\n save_spawn_weights: Callable\n logger: ...\n amp_type: AMPType\n\n @abstractmethod\n def call_setup_hook(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def run_pretrain_routine(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def init_optimizers(self, *args) -> Tuple[List, List, List]:\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def get_model(self) -> LightningModule:\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def reinit_scheduler_properties(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def setup(self, *args) -> None:\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def is_function_implemented(self, *args) -> bool:\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def copy_trainer_model_properties(self, model):\n if isinstance(model, LightningDataParallel):\n ref_model = model.module\n elif isinstance(model, LightningDistributedDataParallel):\n ref_model = model.module\n else:\n ref_model = model\n\n for m in [model, ref_model]:\n m.trainer = self\n m.logger = self.logger\n m.use_dp = self.use_dp\n m.use_ddp2 = self.use_ddp2\n m.use_ddp = self.use_ddp\n m.use_amp = self.amp_type is not None\n m.testing = self.testing\n m.use_single_gpu = self.use_single_gpu\n m.use_tpu = self.use_tpu\n m.tpu_local_core_rank = self.tpu_local_core_rank\n m.tpu_global_core_rank = self.tpu_global_core_rank\n\n def transfer_batch_to_tpu(self, batch: Any, tpu_id: Optional[int] = None):\n \"\"\"\n Transfers the data to the TPU.\n\n Args:\n batch: A tensor or collection of tensors.\n tpu_id: The id of the TPU core. If omitted, the first available core is chosen.\n\n Return:\n the tensor on the TPU device.\n\n See Also:\n - :func:`~pytorch_lightning.utilities.apply_func.move_data_to_device`\n \"\"\"\n if not XLA_AVAILABLE:\n raise MisconfigurationException(\n 'Requested to transfer batch to TPU but XLA is not available.'\n ' Are you sure this machine has TPUs?'\n )\n device = xm.xla_device(tpu_id)\n return self.__transfer_batch_to_device(batch, device)\n\n def transfer_batch_to_gpu(self, batch: Any, gpu_id: Optional[int] = None):\n \"\"\"\n Transfers the data to the GPU.\n\n Args:\n batch: A tensor or collection of tensors.\n gpu_id: The id of the GPU device. If omitted, the first available GPU is chosen.\n\n Return:\n the tensor on the GPU device.\n\n See Also:\n - :func:`~pytorch_lightning.utilities.apply_func.move_data_to_device`\n \"\"\"\n device = torch.device('cuda', gpu_id)\n return self.__transfer_batch_to_device(batch, device)\n\n def __transfer_batch_to_device(self, batch: Any, device: torch.device):\n model = self.get_model()\n if model is not None:\n return model.transfer_batch_to_device(batch, device)\n return move_data_to_device(batch, device)\n\n def horovod_train(self, model):\n # call setup after the ddp process has connected\n self.call_setup_hook(model)\n\n if torch.cuda.is_available() and self.on_gpu:\n # Horovod: pin GPU to local rank\n assert self.root_gpu == hvd.local_rank()\n torch.cuda.set_device(self.root_gpu)\n model.cuda(self.root_gpu)\n\n # avoid duplicating progress bar\n if hvd.rank() != 0 and self.progress_bar_callback is not None:\n self.progress_bar_callback.disable()\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n self.optimizers, self.lr_schedulers, self.optimizer_frequencies = self.init_optimizers(model)\n\n # Horovod: scale the learning rate by the number of workers to account for\n # increased total batch size\n for optimizer in self.optimizers:\n for param_group in optimizer.param_groups:\n param_group['lr'] *= hvd.size()\n\n # Horovod: adjust base LR used by schedulers to match scaled optimizer initial LR\n for scheduler in self.lr_schedulers:\n scheduler = scheduler['scheduler']\n if isinstance(scheduler, _LRScheduler):\n scheduler.base_lrs = [lr * hvd.size() for lr in scheduler.base_lrs]\n\n if self.amp_type:\n model, optimizers = model.configure_apex(amp, model, self.optimizers, self.amp_level)\n self.optimizers = optimizers\n self.reinit_scheduler_properties(self.optimizers, self.lr_schedulers)\n\n # Horovod: broadcast parameters & optimizer state to ensure consistent initialization\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n for optimizer in self.optimizers:\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\n def filter_named_parameters(model, optimizer):\n opt_params = set([p for group in optimizer.param_groups for p in group.get('params', [])])\n return [(name, p) for name, p in model.named_parameters() if p in opt_params]\n\n # Horovod: wrap optimizers to perform gradient aggregation via allreduce\n self.optimizers = [\n hvd.DistributedOptimizer(optimizer, named_parameters=filter_named_parameters(model, optimizer))\n for optimizer in self.optimizers\n ]\n\n # Update logger rank info from Horovod to avoid race conditions from different ranks\n # creating directories / writing files in the same locations.\n self.global_rank = hvd.rank()\n rank_zero_only.rank = self.global_rank\n\n with ExitStack() as stack:\n for optimizer in self.optimizers:\n # Synchronization will be performed explicitly following backward()\n stack.enter_context(optimizer.skip_synchronize())\n\n result = self.run_pretrain_routine(model)\n\n # Make sure all workers have finished training before returning to the user\n hvd.join()\n return result\n\n\ndef _normalize_parse_gpu_string_input(s: Union[int, str, List[int]]) -> Union[int, List[int]]:\n if isinstance(s, str):\n if s == '-1':\n return -1\n else:\n return [int(x.strip()) for x in s.split(',') if len(x) > 0]\n else:\n return s\n\n\ndef get_all_available_gpus() -> List[int]:\n \"\"\"\n Returns:\n a list of all available gpus\n \"\"\"\n return list(range(torch.cuda.device_count()))\n\n\ndef _check_data_type(device_ids: Any) -> None:\n \"\"\"\n Checks that the device_ids argument is one of: None, Int, String or List.\n Raises a MisconfigurationException otherwise.\n\n Args:\n device_ids: gpus/tpu_cores parameter as passed to the Trainer\n \"\"\"\n if device_ids is not None and (not isinstance(device_ids, (int, str, MutableSequence)) or isinstance(device_ids, bool)):\n raise MisconfigurationException(\"Device ID's (GPU/TPU) must be int, string or sequence of ints or None.\")\n\n\ndef _normalize_parse_gpu_input_to_list(gpus: Union[int, List[int]]) -> Optional[List[int]]:\n assert gpus is not None\n if isinstance(gpus, MutableSequence):\n return list(gpus)\n\n # must be an int\n if not gpus: # gpus==0\n return None\n if gpus == -1:\n return get_all_available_gpus()\n\n return list(range(gpus))\n\n\ndef sanitize_gpu_ids(gpus: List[int]) -> List[int]:\n \"\"\"\n Checks that each of the GPUs in the list is actually available.\n Raises a MisconfigurationException if any of the GPUs is not available.\n\n Args:\n gpus: list of ints corresponding to GPU indices\n\n Returns:\n unmodified gpus variable\n \"\"\"\n all_available_gpus = get_all_available_gpus()\n misconfig = False\n for gpu in gpus:\n if gpu not in all_available_gpus:\n misconfig = True\n\n if misconfig:\n # sometimes auto ddp might have different flags\n # but this is not what the user intended\n # correct for the user\n if len(gpus) == len(all_available_gpus):\n gpus = all_available_gpus\n else:\n raise MisconfigurationException(f\"\"\"\n You requested GPUs: {gpus}\n But your machine only has: {all_available_gpus}\n \"\"\")\n return gpus\n\n\ndef _parse_gpu_ids(gpus: Optional[Union[int, str, List[int]]]) -> Optional[List[int]]:\n \"\"\"\n Parses the GPU ids given in the format as accepted by the\n :class:`~pytorch_lightning.trainer.Trainer`.\n\n Args:\n gpus: An int -1 or string '-1' indicate that all available GPUs should be used.\n A list of ints or a string containing list of comma separated integers\n indicates specific GPUs to use.\n An int 0 means that no GPUs should be used.\n Any int N > 0 indicates that GPUs [0..N) should be used.\n\n Returns:\n a list of gpus to be used or ``None`` if no GPUs were requested\n\n If no GPUs are available but the value of gpus variable indicates request for GPUs\n then a MisconfigurationException is raised.\n \"\"\"\n\n # nothing was passed into the GPUs argument\n if callable(gpus):\n return None\n\n # Check that gpus param is None, Int, String or List\n _check_data_type(gpus)\n\n # Handle the case when no gpus are requested\n if gpus is None or isinstance(gpus, int) and gpus == 0:\n return None\n\n # We know user requested GPUs therefore if some of the\n # requested GPUs are not available an exception is thrown.\n\n gpus = _normalize_parse_gpu_string_input(gpus)\n gpus = _normalize_parse_gpu_input_to_list(gpus)\n if not gpus:\n raise MisconfigurationException(\"GPUs requested but none are available.\")\n gpus = sanitize_gpu_ids(gpus)\n\n return gpus\n\n\ndef determine_root_gpu_device(gpus: List[int]) -> Optional[int]:\n \"\"\"\n Args:\n gpus: non-empty list of ints representing which gpus to use\n\n Returns:\n designated root GPU device id\n \"\"\"\n if gpus is None:\n return None\n\n assert isinstance(gpus, list), \"gpus should be a list\"\n assert len(gpus) > 0, \"gpus should be a non empty list\"\n\n # set root gpu\n root_gpu = gpus[0]\n\n return root_gpu\n\n\ndef retry_jittered_backoff(func: Callable, num_retries: int = 5, cap_delay: float = 1.0, base_delay: float = 0.01):\n \"\"\"Retry jittered backoff.\n\n Based on:\n https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/\n\n Args:\n func: tested function\n num_retries: number of tries\n cap_delay: max sleep time\n base_delay: initial sleep time is 10ms\n \"\"\"\n sleep_delay = base_delay # initial sleep time is 10ms\n\n for i in range(num_retries):\n try:\n return func()\n except RuntimeError as err:\n if i == num_retries - 1:\n raise err\n else:\n continue\n time.sleep(sleep_delay)\n sleep_delay = min(cap_delay, random.uniform(base_delay, sleep_delay * 3))\n\n\ndef _parse_tpu_cores(tpu_cores: Union[int, str, List]) -> Optional[Union[List[int], int]]:\n \"\"\"\n Parses the tpu_cores given in the format as accepted by the\n :class:`~pytorch_lightning.trainer.Trainer`.\n\n Args:\n tpu_cores: An int 1 or string '1' indicate that 1 core with multi-processing should be used\n An int 8 or string '8' indicate that all 8 cores with multi-processing should be used\n A list of int or a string containing list of comma separated integer\n indicates specific TPU core to use.\n\n Returns:\n a list of tpu_cores to be used or ``None`` if no TPU cores were requested\n \"\"\"\n\n if callable(tpu_cores):\n return None\n\n _check_data_type(tpu_cores)\n\n if isinstance(tpu_cores, str):\n tpu_cores = _parse_tpu_cores_str(tpu_cores.strip())\n\n if not _tpu_cores_valid(tpu_cores):\n raise MisconfigurationException(\"`tpu_cores` can only be 1, 8 or [<1-8>]\")\n\n return tpu_cores\n\n\ndef _tpu_cores_valid(tpu_cores):\n return tpu_cores in (1, 8, None) or (\n isinstance(tpu_cores, (list, tuple, set)) and\n len(tpu_cores) == 1 and\n tpu_cores[0] in range(1, 9)\n )\n\n\ndef _parse_tpu_cores_str(tpu_cores):\n if tpu_cores in ('1', '8'):\n tpu_cores = int(tpu_cores)\n else:\n tpu_cores = [int(x.strip()) for x in tpu_cores.split(',') if len(x) > 0]\n return tpu_cores\n\n\ndef pick_single_gpu(exclude_gpus: list):\n for i in range(torch.cuda.device_count()):\n if i in exclude_gpus:\n continue\n # Try to allocate on device:\n device = torch.device(f\"cuda:{i}\")\n try:\n torch.ones(1).to(device)\n except RuntimeError:\n continue\n return i\n raise RuntimeError(\"No GPUs available.\")\n\n\ndef pick_multiple_gpus(nb):\n picked = []\n for _ in range(nb):\n picked.append(pick_single_gpu(exclude_gpus=picked))\n\n return picked\n" ]
[ [ "torch.ones", "torch.cuda.device_count", "torch.cuda.is_available", "torch.device", "torch.cuda.set_device" ] ]
xpertdev/insightface
[ "78654944d332573715c04ab5956761f5215d0f51" ]
[ "reconstruction/PBIDR/code/utils/general.py" ]
[ "import os\nfrom glob import glob\nimport torch\n\ndef mkdir_ifnotexists(directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n\ndef get_class(kls):\n parts = kls.split('.')\n module = \".\".join(parts[:-1])\n m = __import__(module)\n for comp in parts[1:]:\n m = getattr(m, comp)\n return m\n\ndef glob_imgs(path):\n imgs = []\n for ext in ['*.png', '*.jpg', '*.JPEG', '*.JPG']:\n imgs.extend(glob(os.path.join(path, ext)))\n return imgs\n\ndef split_input(model_input, total_pixels):\n '''\n Split the input to fit Cuda memory for large resolution.\n Can decrease the value of n_pixels in case of cuda out of memory error.\n '''\n n_pixels = 10000\n split = []\n for i, indx in enumerate(torch.split(torch.arange(total_pixels).cuda(), n_pixels, dim=0)):\n data = model_input.copy()\n data['uv'] = torch.index_select(model_input['uv'], 1, indx)\n data['object_mask'] = torch.index_select(model_input['object_mask'], 1, indx)\n split.append(data)\n return split\n\ndef split_input_albedo(model_input, total_pixels):\n '''\n Split the input to fit Cuda memory for large resolution.\n Can decrease the value of n_pixels in case of cuda out of memory error.\n '''\n n_pixels = 10000\n split = []\n for i, indx in enumerate(torch.split(torch.arange(total_pixels).cuda(), n_pixels, dim=0)):\n data = model_input.copy()\n data['uv'] = torch.index_select(model_input['uv'], 1, indx)\n data['object_mask'] = torch.index_select(model_input['object_mask'], 1, indx)\n data['rgb'] = torch.index_select(model_input['rgb'], 1, indx)\n split.append(data)\n return split\n\ndef merge_output(res, total_pixels, batch_size):\n ''' Merge the split output. '''\n\n model_outputs = {}\n for entry in res[0]:\n if res[0][entry] is None:\n continue\n if len(res[0][entry].shape) == 1:\n model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, -1, 1) for r in res],\n 1).reshape(batch_size * total_pixels)\n else:\n model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, -1, r[entry].shape[-1]) for r in res],\n 1).reshape(batch_size * total_pixels, -1)\n\n return model_outputs" ]
[ [ "torch.arange", "torch.index_select" ] ]
isplab-unil/kin-genomic-privacy
[ "a563a1cc02269d240efd687b64d20f25b12a9650" ]
[ "backend/kin_genomic_privacy/sequenced_family_tree.py" ]
[ "# -*- coding: utf-8 -*-\n\n__author__ = \"Didier Dupertuis, Benjamin Trubert, Kévin Huguenin\"\n__copyright__ = \"Copyright 2019, The Information Security and Privacy Lab at the University of Lausanne (https://www.unil.ch/isplab/)\"\n__credits__ = [\"Didier Dupertuis\", \"Benjamin Trubert\", \"Kévin Huguenin\", \"Mathias Humbert\"]\n\n__version__ = \"1\"\n__license__ = \"MIT\"\n__maintainer__ = \"Didier Dupertuis\"\n__email__ = \"[email protected]\"\n\n__project__ = \"Data-less Kin Genomic Privacy Estimator\"\n\nfrom collections.abc import MutableMapping, Hashable\nimport hashlib\nimport json\nimport logging\nimport math\nimport random\nfrom copy import deepcopy\nimport time\nfrom typing import List, Tuple, Union\nimport warnings\n\nimport networkx as nx\nimport numpy as np\nfrom pgmpy.models import BayesianModel\n\nfrom neticaPy import Netica\nfrom .genomic_privacy import entropy, ABSOLUTE_EQUALITY_TOLERANCE, snp2int\nfrom .MendelianInheritanceCPD import MendelianInheritanceCPD\nfrom .NeticaFamilyTree import NeticaFamilyTree\n\nlogger = logging.getLogger(__name__)\nlogging.getLogger(\"neticaPy.netica\").setLevel(logging.WARNING)\n\nclass SequencedFamilyTree(Hashable):\n\n SEQUENCED_DNA_ATTRIBUTE = \"sequencedDNA\"\n FAMILY_NODE_ATTRIBUTE = \"family_node\"\n RELATION_MAP = {\n # great grand parent generation\n 'great grandparent':{\n 'predecessor': 'other',\n 'successor': 'grand uncle/aunt'}\n ,\n # Grand parent generation\n 'grand uncle/aunt':\n {\"predecessor\": \"other\",\n 'successor': \"cousin once-removed\", # once removed = same generation as parent\n },\n\n 'grandparent': {\n 'predecessor': 'great grandparent',\n 'successor': 'uncle/aunt'\n },\n # parent generation\n 'parent': {\n 'predecessor': 'grandparent',\n 'successor': 'sibling'\n },\n 'uncle/aunt': {\n 'predecessor': 'other',\n 'successor': 'cousin'\n },\n 'cousin once-removed':{\n \"predecessor\": \"other\",\n 'successor': \"second cousin\"\n },\n\n 'uncle-/aunt-in-law': {\n 'predecessor': 'other',\n 'successor': 'other'\n },\n\n # generation you\n 'you': {\n \"predecessor\": \"parent\",\n 'successor': \"child\",\n },\n 'partner': {\n \"predecessor\": \"other\",\n 'successor': \"other\",\n },\n 'sibling': {\n 'predecessor': 'other',\n 'successor': 'nephew/niece' # also niblings\n },\n 'sibling-in-law': {\n 'predecessor': 'other',\n 'successor': 'nephew/niece'\n },\n 'cousin': {\n 'predecessor': 'uncle-/aunt-in-law',\n 'successor': 'other'\n },\n 'second cousin':\n {\"predecessor\": \"other\",\n 'successor': \"other\"},\n\n # generation child\n 'child': {\n 'predecessor': 'partner',\n 'successor': 'grandchild'\n },\n 'child-in-law': {\n 'predecessor': 'partner',\n 'successor': 'grandchild'\n },\n 'nephew/niece': {\n 'predecessor': 'sibling-in-law',\n 'successor': 'grand nephew/niece'\n },\n # generation grand child\n 'grand nephew/niece': {\n 'predecessor': 'other',\n 'successor': 'great grand nephew/niece'\n },\n 'great grand nephew/niece':{\n 'predecessor': 'other',\n 'successor': 'other'\n },\n 'grandchild':\n {'predecessor': 'child-in-law',\n 'successor': 'great grandchild'\n },\n # generation great grand child\n 'great grandchild':{\n 'predecessor':'other',\n 'successor':'other'\n },\n # other\n 'other':\n {'predecessor': 'other',\n 'successor': 'other'}\n }\n _cache = {}\n\n def __init__(self, family_tree_edges: list, sequenced_relatives: list, target: str, family_nodes: list, minimize:bool=True, cache=None):\n \"\"\"\n Represents a Family Tree containing sequenced members and the target of an inference attack\n\n Minimizes the tree in the sense that it keeps only family members relevant\n to inferring the target's SNPs.\n Family nodes are needed in the constructor to build a unique signature for the\n family tree. They are removed later on for inference.\n :param family_tree_edges: a list of 2-tuples, each of which is of the form [parent, family] or [family, child] in the\n family tree.\n :param sequenced_relatives: list of the nodes that are sequenced\n :param target: the family member whose SNPs one wants to infer.\n :param family_nodes: list of the nodes who are family nodes.\n :param minimize: whether minimization needs to be performed on this tree.\n Used by SequencedFamilyTree.unserialize()\n \"\"\"\n # create Bayesian network\n self.family_tree = BayesianModel(deepcopy(family_tree_edges))\n self._inference_network = False\n # check requirements\n if (len(family_tree_edges) == 0):\n self._add_node(target)\n assert all(n!=\"\" for n in self.nodes)\n assert (target in self.nodes)\n assert (target not in sequenced_relatives)\n for n in sequenced_relatives: assert (n in self.nodes)\n for n in family_nodes: assert (n in self.nodes)\n # TODO: assert that the list of edges indeed describes a family tree: F node: pred <= 2, I node: pred <= 1, succ <=1, NO CYCLES\n\n self.target = target\n # flag sequenced and family nodes with networkx properly\n for n in self.nodes:\n self._set_sequenced(n, n in sequenced_relatives)\n\n for n in self.nodes:\n self._set_family_node(n, n in family_nodes)\n\n\n # remove useless nodes and add back missing parents\n if minimize:\n added_parents = self._add_missing_parents()\n if logger: logger.info(\"Missing parents added upon start: %s\", str(added_parents))\n removed_nodes = self._remove_target_independant_nodes()\n if logger: logger.info(\"Nodes removed because independant from target: %s\", str(removed_nodes))\n removed_nodes = self._remove_non_sequenced_leaf_nodes()\n if logger: logger.info(\"Nodes removed because non-sequenced leaves: %s\", str(removed_nodes))\n # add missing parents, so that everybody has 2 parents\n added_parents = self._add_missing_parents()\n if logger: logger.info(\"Missing parents added to ensure everybody has 2 parents: %s\", str(added_parents))\n\n # create signature\n self._signature_visited_nodes = None\n self.signature = hashlib.md5(self._signature(self.target).encode('ascii')).hexdigest()\n\n if self.signature not in SequencedFamilyTree._cache:\n SequencedFamilyTree._cache[self.signature] = {}\n if cache:\n SequencedFamilyTree._cache[self.signature] = {**SequencedFamilyTree._cache[self.signature], **cache}\n\n\n @staticmethod\n def unserialize(serialized_SequencedFamilyTree: str, **kwargs):\n \"\"\"Re-builds a SequencedFamilyTree from the output of serialize()\"\"\"\n serialization = json.loads(serialized_SequencedFamilyTree)\n if 'family_tree_edges' in serialization.keys():\n edges = serialization['family_tree_edges']\n else:\n edges = serialization['edges']\n if 'family_nodes' not in serialization :\n family_node = list(set(n for e in edges for n in e if \"F\" in n))\n else:\n family_node = serialization[\"family_nodes\"]\n\n return SequencedFamilyTree(\n edges,\n serialization[\"sequenced_relatives\"],\n serialization[\"target\"],\n family_node,\n **kwargs\n )\n\n def serialize(self) -> str:\n \"\"\"\n Serializes the tree to a json object in a string\n Note: there is no guarantee of uniqueness of serialization across 2 equivalent SequencedFamilyTree.\n For a unique signature, use SequencedFamilyTree.signature\n :return: a string faithfully representing the minimal tree of this SequencedFamilyTree.\n \"\"\"\n edges = [e for e in self.edges]\n serialization = {\n \"family_tree_edges\": edges,\n \"sequenced_relatives\": self.sequenced_relatives(),\n \"target\": self.target,\n \"family_nodes\": self.family_nodes()\n }\n return json.dumps(serialization)\n\n @property\n def cache(self):\n return SequencedFamilyTree._cache[self.signature]\n @cache.setter\n def cache(self, value):\n assert(isinstance(value, MutableMapping))\n SequencedFamilyTree._cache[self.signature] = value\n\n @property\n def inference_network(self):\n if not self._inference_network:\n self._inference_network = self._create_inference_network()\n return self._inference_network\n @inference_network.setter\n def inference_network(self, value):\n warnings.warn(\"SequencedFamilyTree.inference_network setter: non-mutable parameter, new value ignored\")\n\n @property\n def nodes(self):\n return self.family_tree.nodes()\n @nodes.setter\n def nodes(self, value):\n warnings.warn(\"SequencedFamilyTree.nodes setter: non-mutable parameter, new value ignored\")\n\n @property\n def edges(self):\n return self.family_tree.edges()\n @edges.setter\n def edges(self, value):\n warnings.warn(\"SequencedFamilyTree.edges setter: non-mutable parameter, new value ignored\")\n\n # TODO: convert attribute name strings as constant strings\n def is_sequenced(self, node) -> bool:\n assert node in self.nodes\n return nx.get_node_attributes(self.family_tree, SequencedFamilyTree.SEQUENCED_DNA_ATTRIBUTE)[node]\n\n def _set_sequenced(self, node, sequencedDNA) -> None:\n assert node in self.nodes and (not self.is_family_node(node) or not sequencedDNA)\n nx.set_node_attributes(self.family_tree, values={node: sequencedDNA}, name=SequencedFamilyTree.SEQUENCED_DNA_ATTRIBUTE)\n\n def sequenced_relatives(self) -> List[str]:\n return [n for n, seq in nx.get_node_attributes(self.family_tree, SequencedFamilyTree.SEQUENCED_DNA_ATTRIBUTE).items() if seq]\n\n def is_family_node(self, node) -> bool:\n assert node in self.nodes\n return nx.get_node_attributes(self.family_tree, SequencedFamilyTree.FAMILY_NODE_ATTRIBUTE).get(node)\n\n def _set_family_node(self, node, is_family_node) -> None:\n assert node in self.nodes\n nx.set_node_attributes(self.family_tree, values={node: is_family_node}, name=SequencedFamilyTree.FAMILY_NODE_ATTRIBUTE)\n\n def family_nodes(self) -> List[str]:\n return [n for n, seq in nx.get_node_attributes(self.family_tree, SequencedFamilyTree.FAMILY_NODE_ATTRIBUTE).items() if seq]\n\n def _generate_new_node_id(self):\n return max(list(self.nodes), key=len)+\"n\"\n\n def _add_node(self, node, weight=None, sequencedDNA=False, family_node=False):\n \"\"\"Adds a node to the SequencedFamilyTree with its proper sequencedDNA and family_node attributes\n\n internal method, a family tree should be immutable once created\"\"\"\n self.family_tree.add_node(node, weight)\n self._set_sequenced(node, sequencedDNA)\n self._set_family_node(node, family_node)\n\n def _create_inference_network(self) -> BayesianModel:\n bayesian_network = BayesianModel(self.family_tree.edges())\n bayesian_network.add_node(self.target)\n for fn in self.family_nodes():\n if len(list(bayesian_network.successors(fn)))==0:\n bayesian_network.add_edge(fn,self._generate_new_node_id())\n bayesian_network.add_edges_from([(pred, succ) for pred in bayesian_network.predecessors(fn) for succ in bayesian_network.successors(fn)])\n bayesian_network.remove_node(fn)\n return bayesian_network\n\n def _remove_target_independant_nodes(self):\n \"\"\"\n Remove all nodes that are independent from target given sequenced nodes\n \"\"\"\n bayes_net = self._create_inference_network()\n nodes_to_remove = set()\n sequenced_relatives = self.sequenced_relatives()\n for node in bayes_net.nodes():\n if not bayes_net.is_active_trail(node, self.target, [n for n in sequenced_relatives if n != node]) and node in self.family_tree.nodes():\n nodes_to_remove.add(node)\n\n print(\"_remove_target_independant_nodes pre-remove edges:\")\n print(self.edges)\n for node in nodes_to_remove:\n print(\"_remove_target_independant_nodes: %s\" % node)\n self.family_tree.remove_node(node)\n return nodes_to_remove\n\n def _remove_non_sequenced_leaf_nodes(self):\n \"\"\"\n Removes all non-sequenced leaf nodes: nodes that aren't between a sequenced node and the target.\n \"\"\"\n sequenced_relatives = self.sequenced_relatives()\n nodes_to_keep = set(sequenced_relatives)\n nodes_to_keep.add(self.target)\n undirected_self = self.family_tree.to_undirected()\n for sn in sequenced_relatives:\n for path in nx.all_simple_paths(undirected_self, sn, self.target):\n nodes_to_keep.update(path)\n nodes_to_remove = set([n for n in list(self.nodes) if n not in nodes_to_keep])\n\n for n in nodes_to_remove:\n self.family_tree.remove_node(n)\n return nodes_to_remove\n\n def _add_missing_parents(self):\n \"\"\"\n Ensures that every node has either 0 or 2 parents.\n\n Some parents might be missing at initialization or have been removed during minimization.\n \"\"\"\n added_parents = []\n for fn in self.family_nodes():\n for i in range(2 - len(list(self.family_tree.predecessors(fn)))):\n new_parent = self._generate_new_node_id()\n added_parents.append((new_parent, fn))\n self._add_node(new_parent)\n self.family_tree.add_edge(new_parent, fn)\n return added_parents\n\n def _signature(self, root):\n \"\"\"Recursive function creating a unique signature corresponding to this SequencedFamilyTree.\n\n :return: a string uniquely representing this tree\n \"\"\"\n \n self._signature_visited_nodes = set()\n\n def _signature_recursive(current):\n self._signature_visited_nodes.add(current)\n preds = sorted([_signature_recursive(n) for n in self.family_tree.predecessors(current) if n not in self._signature_visited_nodes])\n succs = sorted([_signature_recursive(n) for n in self.family_tree.successors(current) if n not in self._signature_visited_nodes])\n seq = self.is_sequenced(current)\n return 'N(%s|%s|%s)' % (str(seq), ','.join(preds), ','.join(succs))\n\n return _signature_recursive(root)\n\n def __hash__(self):\n return self.signature\n\n def copy(self):\n return deepcopy(self)\n\n def _to_netica_net(self, maf):\n assert maf>=0 and maf <= 0.5\n\n #TODO : ASCIIFY PROPERLY: REPRESENT THEM IN HEXADECIMAL/base 64\n def bytify(str):\n return bytes(str.replace(\"@\", \"X\"), \"utf-8\")\n\n # todo in MendelianInheritanceCPD\n mendelian_inheritance = [\n [[1.0, 0.0, 0.0],\n [0.5, 0.5, 0.0],\n [0.0, 1.0, 0.0]],\n [[0.5, 0.5, 0.0],\n [0.25, 0.5, 0.25],\n [0.0, 0.5, 0.5]],\n [[0.0, 1.0, 0.0],\n [0.0, 0.5, 0.5],\n [0.0, 0.0, 1.0]]\n ]\n\n # create netica obj, env and net\n netica = Netica()\n env = netica.newenv()\n res = netica.initenv(env)\n net = netica.newnet(b\"GenomicPrivacy\", env)\n\n # create the nodes, links & CPTs\n # nodes = {node:netica.newnode(bytes(node.replace(\"@\",\"\"), encoding), 3, net) for node in self.bayesian_network.nodes()}\n nodes = {node: netica.newnode(bytify(node), 3, net) for node in self.inference_network.nodes()}\n for node, netica_node in nodes.items():\n # todo calculate b\"AA, Aa, aa\"\n netica.setnodestatenames(netica_node, b\"AA, Aa, aa\")\n parents = list(self.inference_network.predecessors(node))\n # no parents\n if len(parents) == 0:\n netica.setnodeprobs(netica_node, [], MendelianInheritanceCPD.prior(maf))\n # parents\n elif len(parents) == 2:\n p0 = nodes[parents[0]]\n p1 = nodes[parents[1]]\n netica.addlink(p0, netica_node)\n netica.addlink(p1, netica_node)\n # CPT\n for i0 in range(0, 3):\n for i1 in range(0, 3):\n netica.setnodeprobs(netica_node, [i0, i1], mendelian_inheritance[i0][i1])\n\n netica.compilenet(net)\n\n # move node extration in NeticaFamilyTree.__init__()\n return NeticaFamilyTree(\n maf,\n netica,\n env,\n net,\n nodes,\n nodes[self.target],\n [nodes[sr] for sr in self.sequenced_relatives()]\n )\n\n def compute_privacy_metrics(self, maf: float, detailed_results: bool = False) -> Union[Tuple[float, float], List[dict]]:\n \"\"\"Computes the privacy metrics for a given family tree and maf\n\n :param maf: a minor allele frequency for which the calculation is to be done\n :param detailed_results: whether to return detailed results, see return\n :return: If detailed_results=False, a 2-tuple of the form (mean posterior entropy, mean expected error),\n if detailed_results=True, it returns a list of dict (1 dict per configuration of the sequenced relatives) of the form:\n {\n \"evidence\": dict,\n \"p_evidence\": float,\n \"target_distrib\": List[float],\n \"entropy_posterior\": float,\n \"exp_error\": float\n }\n \"\"\"\n assert maf>=0 and maf <= 0.5\n\n with self._to_netica_net(maf) as netica_net:\n result = [] # list used only for detailed results\n evidence = {} # dict to contain observed evidence in each case\n # using lists for reference\n mean_entropy_posterior = [0] # only used for undetailed resutls\n mean_exp_error = [0] # only used for undetailed resutls\n\n # no sequenced relatives or maf is zero-> return prior with probability 1...\n if len(netica_net.sequenced_relatives) == 0 or abs(maf) <= ABSOLUTE_EQUALITY_TOLERANCE:\n prior_distrib = MendelianInheritanceCPD.prior(maf)\n entropy_prior = entropy(prior_distrib).item()\n error_prior = np.sum(np.array(prior_distrib * (1 - (np.array(prior_distrib))))).item()\n if detailed_results:\n return [KgpMetricDetailedResults(\n [], 1, prior_distrib, error_prior, entropy_prior, entropy_prior\n )]\n else:\n return entropy_prior, error_prior\n\n def compute_privacy_metrics_recursively(sequenced_relatives, p_inputs):\n # termination condition 1 : probability of inputs is zero.\n p_inputs_zero = math.isclose(np.sum(p_inputs), 0.0, abs_tol=ABSOLUTE_EQUALITY_TOLERANCE)\n if p_inputs_zero:\n # return an dummy result\n if detailed_results:\n result.append(KgpMetricDetailedResults(\n deepcopy(evidence), 0, [], 1, 0, 0\n ))\n # termination condition 2: no more sequenced relatives with free SNP variant\n elif len(sequenced_relatives) == 0:\n # compute and return results\n #todo rename variables consistentlx\n target_distrib = np.array(netica_net.netica.getnodebeliefs(netica_net.target))\n entropy_posterior = entropy(target_distrib)\n exp_error = np.sum((target_distrib * (1 - target_distrib)))\n # add the sum elements:\n # todo with returns, not refs\n mean_entropy_posterior[0] = mean_entropy_posterior[0] + p_inputs * entropy_posterior\n mean_exp_error[0] = mean_exp_error[0] + p_inputs * exp_error\n # add entry to detailed results if needed\n if detailed_results:\n result.append(KgpMetricDetailedResults(\n deepcopy(evidence),\n p_inputs,\n deepcopy(target_distrib),\n exp_error,\n entropy_posterior,\n p_inputs * entropy_posterior\n ))\n # ...otherwise, continue recursion\n else:\n # take a new sequenced relative with free SNP...\n new_evidence_snp = sequenced_relatives.pop()\n # ...and go to the next step of the recursion for each possible variant\n # result = (0,0)\n for variant in snp2int.values():\n netica_net.netica.enterfinding(new_evidence_snp, variant)\n evidence[new_evidence_snp] = variant\n # todo compute at begining of func, not as argument\n p_inputs = netica_net.netica.findingsprobability(netica_net.net)\n compute_privacy_metrics_recursively(sequenced_relatives, p_inputs)\n # result = (result[0] + exp_entropy, result[1] + exp_error)\n del evidence[new_evidence_snp]\n netica_net.netica.retractnodefindings(new_evidence_snp)\n sequenced_relatives.append(new_evidence_snp) # reestablish prior state of sequenced_relatives\n\n compute_privacy_metrics_recursively(netica_net.sequenced_relatives, 1)\n\n self.cache[maf] = mean_entropy_posterior[0], mean_exp_error[0]\n\n if detailed_results:\n # invert netica evidence dictionary\n inv_netica_nodes = {v: k for k, v in netica_net.nodes.items()}\n for res in result:\n res.evidence = { inv_netica_nodes[netica_name]: v for netica_name,v in res.evidence.items()}\n return result\n else:\n return mean_entropy_posterior[0], mean_exp_error[0]\n\n def get_privacy_metrics(self, maf, detailed_results: bool = False):\n \"\"\"Returns a tuple with (mean posterior entropy, mean expected error) from cache if possible and by computing it otherwise.\n\n If detailed_results=True, it doesn't use cache.\"\"\"\n assert maf>=0 and maf <= 0.5\n if not detailed_results:\n return self.cache[maf] if maf in self.cache else self.compute_privacy_metrics(maf)\n else:\n return self.compute_privacy_metrics(maf, detailed_results)\n\n def compute_normalized_entropy(self, maf, detailed_results: bool = False):\n \"\"\"Computes normalized entropy correctly, handling lim maf->0 correctly\n\n It mainly handles the edge-case lim maf->0:\n - normalized entropy->0 if any sequenced individual reveals information\n - normalized entropy->1 if no sequenced individual reveals information\n \"\"\"\n assert maf>=0 and maf <= 0.5\n normalized_entropy = float('nan')\n if len(self.sequenced_relatives())==0:\n normalized_entropy = 1\n elif math.isclose(maf, 0.0, abs_tol=ABSOLUTE_EQUALITY_TOLERANCE):\n normalized_entropy = 0\n privacy_metrics = self.get_privacy_metrics(maf, detailed_results)\n prior_entropy = entropy(MendelianInheritanceCPD.prior(maf)).tolist()\n if not detailed_results:\n if math.isnan(normalized_entropy):\n normalized_entropy = privacy_metrics[0] / prior_entropy\n return normalized_entropy\n else:\n for case in privacy_metrics:\n if math.isnan(normalized_entropy):\n case.normalized_entropy = case.entropy_posterior / prior_entropy\n else:\n case.normalized_entropy = normalized_entropy\n return privacy_metrics\n\n def snps_privacy_score(self, mafs_to_compute:List[float], mafs_to_interpolate = None):\n mafs_to_compute = sorted(mafs_to_compute)\n norm_post_entropies = [self.compute_normalized_entropy(maf) for maf in mafs_to_compute]\n if mafs_to_interpolate:\n assert all(maf>=0 and maf <= 0.5 for maf in mafs_to_interpolate)\n return np.mean(np.interp(mafs_to_interpolate, mafs_to_compute, norm_post_entropies))\n else:\n return np.mean(norm_post_entropies)\n\n def get_family_relation(self, interest_node, node_other):\n \"\"\"Returns the family relation of node_who relative to node_ref as a string\n \n For example, if \"B\" is the grandfather of \"A\", get_family_relation(\"A\",\"B\")\n should return \"grandfather\". get_family_relation(\"B\",\"A\") Should return \"grandchild\".\n Covered family relation: spouse, parent, grandparent, uncle/aunt, uncle-/aunt-in-law, cousin,\n sibling, sibling-in-law, nephew/niece, child, child-in-law, grandchild.\n If the relation is further apart, it returns \"other\".\n If interest_node or node_other isn't in the tree, it returns a networkx NetworkXNoPath Exception.\"\"\"\n\n # networkx Directed Graph documentation:\n # https://networkx.github.io/documentation/networkx-1.11/reference/classes.digraph.html?highlight=directed%20graph\n\n # networkx shortest path documentation:\n # https://networkx.github.io/documentation/networkx-1.11/reference/algorithms.shortest_paths.html?highlight=shortest%20path\n\n # can be used to classify relationships\n\n family_graph = self.inference_network\n if interest_node not in family_graph.nodes():\n raise nx.exception.NetworkXNoPath('Node \"%s\" not found' % interest_node)\n elif node_other not in family_graph.nodes():\n raise nx.exception.NetworkXNoPath('Node \"%s\" not found' % node_other)\n\n relation_path = nx.shortest_path(family_graph.to_undirected(),\n interest_node,\n node_other)\n relation_path.remove(interest_node)\n current_node = interest_node\n current_qualif = \"you\"\n for next_node in relation_path:\n if next_node in family_graph.successors(current_node):\n current_qualif = SequencedFamilyTree.RELATION_MAP[current_qualif][\"successor\"]\n\n elif next_node in family_graph.predecessors(current_node):\n current_qualif = SequencedFamilyTree.RELATION_MAP[current_qualif]['predecessor']\n current_node = next_node\n return current_qualif\n\n\nclass KgpMetricDetailedResults:\n def __init__(self, evidence, p_evidence, target_distrib, exp_error, entropy_posterior, product_p_evidence_entropy_posterior, normalized_entropy=None):\n self.evidence = evidence\n self.p_evidence = p_evidence\n self.target_distrib = target_distrib\n self.exp_error = exp_error\n self.entropy_posterior = entropy_posterior\n self.product_p_evidence_entropy_posterior = product_p_evidence_entropy_posterior\n self.normalized_entropy = normalized_entropy\n" ]
[ [ "numpy.array", "numpy.interp", "numpy.sum", "numpy.mean" ] ]
eribean/RyStats
[ "1cdd0ea55a074cc81e61d2845216f395ba095f10" ]
[ "inferential/test/test_ttests.py" ]
[ "import unittest\n\nimport numpy as np\n\nfrom RyStats.inferential import (unequal_variance_ttest, equal_variance_ttest, \n one_sample_ttest, repeated_ttest)\n\nfrom RyStats.inferential.ttests import _p_value_and_confidence_intervals \n\n\nclass TestEqualVariance(unittest.TestCase):\n \"\"\"Test Fixture for Equal Variance TTest.\"\"\"\n\n def test_equal_variance_two_tailed(self):\n \"\"\"Testing equal variance.\"\"\"\n rng = np.random.default_rng(49045463547)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(10, 2, 200)\n data2 = rng.normal(10, 2, 200)\n\n ttest = equal_variance_ttest(data1, data2)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01) \n\n def test_equal_variance_left_tailed(self):\n \"\"\"Testing equal variance.\"\"\"\n rng = np.random.default_rng(734433186)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(2, 1, 400)\n data2 = rng.normal(2, 1, 400)\n\n ttest = equal_variance_ttest(data1, data2, 'left')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01) \n\n def test_equal_variance_right_tailed(self):\n \"\"\"Testing equal variance.\"\"\"\n rng = np.random.default_rng(987131781)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(2, 1, 400)\n data2 = rng.normal(2, 1, 400)\n\n ttest = equal_variance_ttest(data1, data2, 'right')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)\n\n\nclass TestUnEqualVariance(unittest.TestCase):\n \"\"\"Test Fixture for UnEqual Variance TTest.\"\"\"\n\n def test_unequal_variance_two_tailed(self):\n \"\"\"Testing unequal variance two tailed.\"\"\"\n rng = np.random.default_rng(135481321)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(10, 2, 200)\n data2 = rng.normal(10, 2, 200)\n\n ttest = unequal_variance_ttest(data1, data2)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01) \n\n def test_unequal_variance_left_tailed(self):\n \"\"\"Testing unequal variance left tailed.\"\"\"\n rng = np.random.default_rng(324851351)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(2, 1, 100)\n data2 = rng.normal(2, 1, 100)\n\n ttest = unequal_variance_ttest(data1, data2, 'left')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01) \n\n def test_unequal_variance_right_tailed(self):\n \"\"\"Testing unequal variance right tailed.\"\"\"\n rng = np.random.default_rng(887943278)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(2, 1, 100)\n data2 = rng.normal(2, 1, 100)\n\n ttest = unequal_variance_ttest(data1, data2, 'right')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01) \n\n\nclass TestRepeated(unittest.TestCase):\n \"\"\"Test Fixture for Repeated TTest.\"\"\"\n\n def test_repeated_two_tailed(self):\n \"\"\"Testing repeated two tailed.\"\"\"\n rng = np.random.default_rng(6464584234)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(10, 2, 100)\n data2 = data1 + rng.normal(0, .02, 100)\n\n ttest = repeated_ttest(data1, data2)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01) \n\n def test_repeated_left_tailed(self):\n \"\"\"Testing repeated left tailed.\"\"\"\n rng = np.random.default_rng(734516519)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(2, 1, 100)\n data2 = data1 + rng.normal(0, .02, 100)\n\n ttest = repeated_ttest(data1, data2, 'left')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01) \n\n def test_repeated_right_tailed(self):\n \"\"\"Testing repeated right tailed.\"\"\"\n rng = np.random.default_rng(3571954324)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(2, 1, 100)\n data2 = data1 + rng.normal(0, .02, 100)\n\n ttest = repeated_ttest(data1, data2, 'right')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)\n\n def test_unequal_sample_size(self):\n \"\"\"Testing bad inputs.\"\"\"\n with self.assertRaises(AssertionError):\n repeated_ttest(np.ones((10,)), np.ones((12)))\n\n\nclass TestOneSample(unittest.TestCase):\n \"\"\"Test Fixture for One-Sample TTest.\"\"\"\n\n def test_onesample_two_tailed(self):\n \"\"\"Testing onesample two tailed.\"\"\"\n rng = np.random.default_rng(13489132474)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(-5, 2, 100)\n\n ttest = one_sample_ttest(data1, -5)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01) \n\n def test_onesample_left_tailed(self):\n \"\"\"Testing onesample left tailed.\"\"\"\n rng = np.random.default_rng(9876138761251)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(15, 1, 100)\n\n ttest = one_sample_ttest(data1, 15, 'left')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01) \n\n def test_one_sample_right_tailed(self):\n \"\"\"Testing onesample right tailed.\"\"\"\n rng = np.random.default_rng(615419864354)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(12.2, 1, 100)\n\n ttest = one_sample_ttest(data1, 12.2, 'right')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)\n\n\n\nclass TestMiscTest(unittest.TestCase):\n \"\"\"Test Fixture for random ttests.\"\"\"\n\n def test_fail_tailed_option(self):\n \"\"\"Testing bad tailed option.\"\"\"\n\n with self.assertRaises(ValueError):\n _p_value_and_confidence_intervals(2.3, 100, 'greater')\n\n def test_confidence_intervals(self):\n \"\"\"Testing the confidence interval test.\"\"\"\n # Taken from a T-Test table\n\n # Two Tailed\n p, ci = _p_value_and_confidence_intervals(2.228, 10, 'two')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n np.testing.assert_allclose(ci, [-2.228, 2.228], atol=.001)\n\n # Left One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.895, 7, 'left')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[0]))\n np.testing.assert_allclose(ci, [-np.inf, 1.895], atol=.001)\n\n # Right One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.761, 14, 'right')\n\n self.assertAlmostEqual(1-p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[1])) \n np.testing.assert_allclose(ci, [-1.761, np.inf], atol=.001)\n\n\n\nif __name__ == \"\"\"__main__\"\"\":\n unittest.main()" ]
[ [ "numpy.ones", "numpy.isinf", "numpy.random.default_rng", "numpy.testing.assert_allclose" ] ]
cheind/pytorch-blender-dr
[ "fd2e449dd81723bb1978f005736104f27cc1770b" ]
[ "py_torch/IoU_loss_test.py" ]
[ "import torch\r\nimport numpy as np\r\nimport math\r\n\r\ndef bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):\r\n # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4\r\n box2 = box2.T # 4xn\r\n\r\n # Get the coordinates of bounding boxes\r\n if x1y1x2y2: # x1, y1, x2, y2 = box1\r\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]\r\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]\r\n else: # transform from xywh to xyxy\r\n b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2\r\n b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2\r\n b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2\r\n b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2\r\n\r\n # Intersection area\r\n inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \\\r\n (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)\r\n\r\n # Union Area\r\n w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\r\n w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\r\n union = w1 * h1 + w2 * h2 - inter + eps\r\n\r\n iou = inter / union\r\n if GIoU or DIoU or CIoU:\r\n cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width\r\n ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height\r\n if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1\r\n c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared\r\n rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +\r\n (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared\r\n if DIoU:\r\n return iou - rho2 / c2 # DIoU\r\n elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47\r\n v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\r\n with torch.no_grad():\r\n alpha = v / ((1 + eps) - iou + v)\r\n # return torch.nan_to_num(iou - (rho2 / c2 + v * alpha), nan=1.0) # CIoU\r\n return iou - (rho2 / c2 + v * alpha) # CIoU\r\n else: # GIoU https://arxiv.org/pdf/1902.09630.pdf\r\n c_area = cw * ch + eps # convex area\r\n return iou - (c_area - union) / c_area # GIoU\r\n else:\r\n return iou # IoU\r\n\r\nif __name__ == '__main__':\r\n # ground truth\r\n box1 = torch.tensor([150, 120, 50, 30]) # xmin, ymin, widht, height\r\n # detections\r\n box2 = torch.tensor([\r\n [150, 120, 50, 30], # perfect match\r\n [150, 120, 30, 50],\r\n [140, 130, 50, 30],\r\n [10, 20, 50, 30], # non overlapping\r\n [0, 0, 0, 0], # invalid\r\n ])\r\n\r\n iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False)\r\n print('IoU:', iou, '==> bbox loss:', (1.0 - iou).mean())\r\n iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=True, DIoU=False, CIoU=False)\r\n print('GIoU:', iou, '==> bbox loss:', (1.0 - iou).mean())\r\n iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=True, CIoU=False)\r\n print('DIoU:', iou, '==> bbox loss:', (1.0 - iou).mean())\r\n iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=True)\r\n print('CIoU:', iou, '==> bbox loss:', (1.0 - iou).mean())\r\n\r\n # special case checking\r\n box1 = torch.tensor([0, 0, 0, 0]) # xmin, ymin, widht, height\r\n box2 = torch.tensor([[0, 0, 0, 0]]) # xmin, ymin, widht, height\r\n iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False)\r\n print('IoU:', iou, '==> bbox loss:', (1.0 - iou).mean())\r\n iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=True, DIoU=False, CIoU=False)\r\n print('GIoU:', iou, '==> bbox loss:', (1.0 - iou).mean())\r\n iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=True, CIoU=False)\r\n print('DIoU:', iou, '==> bbox loss:', (1.0 - iou).mean())\r\n iou = bbox_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=True)\r\n print('CIoU:', iou, '==> bbox loss:', (1.0 - iou).mean())\r\n " ]
[ [ "torch.atan", "torch.min", "torch.no_grad", "torch.tensor", "torch.max" ] ]
IncyLiu/autokeras
[ "e9dbf66b005e2ffaabe29bc366bb4e72fa79add8" ]
[ "tests/nn/test_layers.py" ]
[ "from autokeras.nn.layers import *\nimport numpy as np\n\n\ndef test_global_layer():\n layer = GlobalAvgPool2d()\n inputs = torch.Tensor(np.ones((100, 50, 30, 40)))\n assert layer(inputs).size() == (100, 50)\n" ]
[ [ "numpy.ones" ] ]
RupertDodkins/medis
[ "bdb1f00fb93506da2a1f251bc6780e70e97a16c5" ]
[ "examples/MKID_pic.py" ]
[ "import os\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Qt5Agg')\nfrom medis.params import tp, mp, cp, sp, ap, iop\nimport medis.Detector.get_photon_data as gpd\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nfrom medis.Utils.plot_tools import loop_frames, quicklook_im, view_datacube, compare_images, indep_images, grid\nimport pickle\nfrom medis.Utils.misc import dprint\n\n\nsp.show_wframe = False\nsp.save_obs = False\nsp.show_cube = False\nsp.num_processes = 1\nsp.return_E = True\n\n# Astro Parameters\nap.companion = False\n# ap.contrast = [5e-3, 1e-3]\nap.star_photons = int(1e7) # # G type star 10ly away gives 1e6 cts/cm^2/s\nap.lods = [[-1.2, 4.5]] # initial location (no rotation)\nap.exposure_time = 0.1 # 0.001\n\n# Telescope/optics Parameters\ntp.diam = 5.\ntp.beam_ratio = 0.4\ntp.obscure = True\ntp.use_ao = True\ntp.ao_act = 50\ntp.use_atmos = True\ntp.use_zern_ab = True\ntp.occulter_type = 'Vortex' # 'None'\ntp.aber_params = {'CPA': True,\n 'NCPA': True,\n 'QuasiStatic': False, # or Static\n 'Phase': True,\n 'Amp': False,\n 'n_surfs': 8,\n 'OOPP': [16,8,8,16,4,4,8,16]}#False}#\n\n# Wavelength and Spectral Range\nap.band = np.array([800, 1500])\nap.nwsamp = 1\nap.w_bins = 1\n\nnum_exp = 1 #5000\nap.sample_time = 0.1\nap.numframes = int(num_exp * ap.exposure_time / ap.sample_time)\ntp.piston_error = True\ntp.rot_rate = 0 # deg/s\ntp.pix_shift = [30,0]\nlod = 8\n\n# MKID Parameters\nmp.distort_phase = True\nmp.phase_uncertainty = True\nmp.phase_background = True\nmp.respons_var = True\nmp.bad_pix = True\nmp.hot_pix = 1\nmp.array_size = np.array([80,125])\nmp.R_mean = 8\nmp.g_mean = 0.2\nmp.g_sig = 0.04\nmp.bg_mean = -10\nmp.bg_sig = 40\nmp.pix_yield = 0.7 # check dis\n\n# sp.get_ints = {'w': [0], 'c': [0]}\n\n# ***** These need to be outside the if statement to have an effect!! ****\niop.aberdata = 'Palomar' # Rename Data Directory\niop.update(\"MKID_pic-ideal/\")\nif os.path.exists(iop.int_maps):\n os.remove(iop.int_maps)\n\ntp.detector = 'ideal'\n\nsp.save_locs = np.array(['add_obscurations', 'add_aber', 'quick_ao', 'dummy'])\nsp.gui_map_type = np.array(['phase', 'phase', 'phase', 'dummy'])\nphase_ind = range(4)\n\n\nif __name__ == '__main__':\n\n # Starting the Simulation\n print(\"Starting MKID_pic ideal-detector example\")\n fields = gpd.run_medis()[0, :, 0, 0]\n print(\"finished Ideal-loop of MKID_pic Example File\")\n\n fields = np.angle(fields[phase_ind], deg=False)\n grid(fields, logAmp=False)\n\n# **** dito *****\niop.update(\"MKID_pic-ideal2/\")\ntp.detector = 'MKIDs'\n\nif __name__ == '__main__':\n\n print(\"*****************************************************\")\n print(\"*****************************************************\")\n print(\"*****************************************************\")\n print(\"Starting MKID_pic MKID detector example \")\n mkid = gpd.run_medis()[0, :]\n print(\"finished MKID-loop of MKID_pic Example File\")\n\n\n compare_images(mkid[::2], vmax=200, logAmp=True, vmin=1, title=r'$I (cts)$', annos=['MKIDs 800 nm', '940 nm', '1080 nm', '1220 nm', '1360 nm', '1500 nm'])\n quicklook_im(np.mean(mkid[5:-1], axis=0), anno='MEDIS J Band', vmax=400, axis=None, title=r'$I (cts)$', logAmp=True, label='e')\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(9, 3.8))\n labels = ['Ideal', 'MKID']\n # images = [ideal,mkid]\n vmaxs = [0.01, 100]\n\n\n for m, ax in enumerate(axes):\n im = ax.imshow(images[m], interpolation='none', origin='lower', cmap=\"YlGnBu_r\", vmax=vmaxs[m])#norm= LogNorm(),\n props = dict(boxstyle='square', facecolor='w', alpha=0.7)\n ax.text(0.05, 0.05, labels[m], transform=ax.transAxes, fontweight='bold', color='k', fontsize=17,\n family='serif', bbox=props)\n ax.tick_params(direction='in', which='both', right=True, top=True)\n cax = fig.add_axes([0.44+ 0.485*m, 0.03, 0.02, 0.89])\n # cb = fig.colorbar(im, cax=cax, orientation='vertical',format=ticker.FuncFormatter(fmt))\n cb = fig.colorbar(im, cax=cax, orientation='vertical')\n ax.axis('off')\n # ax.set_xlabel('Radial Separation')\n # ax.set_yscale('log')\n # if p == 0:\n # ax.set_ylabel('Intensity Ratio')\n # ax.legend()\n plt.subplots_adjust(left=0.01, right=0.95, top=0.93, bottom=0.02)\n # plt.savefig(str(p) + '.pdf')\n # plt.show()\n\n plt.show()\n\n\n" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "numpy.angle", "matplotlib.use", "numpy.array", "numpy.mean" ] ]
BubuLK/sfepy
[ "127ab753a2f4f24ed359d0152088d11227c3dd49", "f02f88c5df9814ad710c658429e23c90744b0d9d" ]
[ "sfepy/terms/terms_basic.py", "tests/test_homogenization_engine.py" ]
[ "import numpy as nm\n\nfrom sfepy.base.base import assert_\nfrom sfepy.linalg import dot_sequences\nfrom sfepy.terms.terms import Term, terms\n\nclass ZeroTerm(Term):\n r\"\"\"\n A do-nothing term useful for introducing additional variables into the\n equations.\n\n :Definition:\n\n .. math::\n 0\n\n :Arguments:\n - virtual : :math:`q` or :math:`\\ul{v}`\n - state : :math:`p` or :math:`\\ul{u}`\n \"\"\"\n name = 'dw_zero'\n arg_types = ('virtual', 'state')\n arg_shapes = {'virtual' : ('N', None), 'state' : 'N'}\n\n @staticmethod\n def function(out):\n out.fill(0.0)\n\n return 0\n\n def get_fargs(self, vvar, svar,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n return ()\n\nclass IntegrateVolumeTerm(Term):\n r\"\"\"\n Evaluate (weighted) variable in a volume region.\n\n Depending on evaluation mode, integrate a variable over a volume region\n ('eval'), average it in elements ('el_avg') or interpolate it into volume\n quadrature points ('qp').\n\n Supports 'eval', 'el_avg' and 'qp' evaluation modes.\n\n :Definition:\n\n .. math::\n \\int_\\Omega y \\mbox{ , } \\int_\\Omega \\ul{y} \\\\\n \\int_\\Omega c y \\mbox{ , } \\int_\\Omega c \\ul{y}\n\n .. math::\n \\mbox{vector for } K \\from \\Ical_h:\n \\int_{T_K} y / \\int_{T_K} 1 \\mbox{ , }\n \\int_{T_K} \\ul{y} / \\int_{T_K} 1 \\\\\n \\mbox{vector for } K \\from \\Ical_h:\n \\int_{T_K} c y / \\int_{T_K} 1 \\mbox{ , }\n \\int_{T_K} c \\ul{y} / \\int_{T_K} 1\n\n .. math::\n y|_{qp} \\mbox{ , } \\ul{y}|_{qp} \\\\\n c y|_{qp} \\mbox{ , } c \\ul{y}|_{qp}\n\n :Arguments:\n - material : :math:`c` (optional)\n - parameter : :math:`y` or :math:`\\ul{y}`\n \"\"\"\n name = 'ev_volume_integrate'\n arg_types = ('opt_material', 'parameter')\n arg_shapes = [{'opt_material' : '1, 1', 'parameter' : 'N'},\n {'opt_material' : None}]\n\n @staticmethod\n def function(out, val_qp, vg, fmode):\n if fmode == 2:\n out[:] = val_qp\n status = 0\n\n else:\n status = vg.integrate(out, val_qp, fmode)\n\n return status\n\n def get_fargs(self, material, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n vg, _ = self.get_mapping(parameter)\n\n val_qp = self.get(parameter, 'val')\n if material is not None:\n val_qp *= material\n\n fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)\n\n return val_qp, vg, fmode\n\n def get_eval_shape(self, material, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)\n\n if mode != 'qp':\n n_qp = 1\n\n return (n_el, n_qp, n_c, 1), parameter.dtype\n\nclass IntegrateSurfaceTerm(Term):\n r\"\"\"\n Evaluate (weighted) variable in a surface region.\n\n Depending on evaluation mode, integrate a variable over a surface region\n ('eval'), average it in element faces ('el_avg') or interpolate it into\n surface quadrature points ('qp'). For vector variables, setting `term_mode`\n to `'flux'` leads to computing corresponding fluxes for the three modes\n instead.\n\n Supports 'eval', 'el_avg' and 'qp' evaluation modes.\n\n :Definition:\n\n .. math::\n \\int_\\Gamma y \\mbox{ , } \\int_\\Gamma \\ul{y}\n \\mbox{ , } \\int_\\Gamma \\ul{y} \\cdot \\ul{n} \\\\\n \\int_\\Gamma c y \\mbox{ , } \\int_\\Gamma c \\ul{y}\n \\mbox{ , } \\int_\\Gamma c \\ul{y} \\cdot \\ul{n} \\mbox{ flux }\n\n .. math::\n \\mbox{vector for } K \\from \\Ical_h:\n \\int_{T_K} y / \\int_{T_K} 1 \\mbox{ , }\n \\int_{T_K} \\ul{y} / \\int_{T_K} 1 \\mbox{ , }\n \\int_{T_K} (\\ul{y} \\cdot \\ul{n}) / \\int_{T_K} 1 \\\\\n \\mbox{vector for } K \\from \\Ical_h:\n \\int_{T_K} c y / \\int_{T_K} 1 \\mbox{ , }\n \\int_{T_K} c \\ul{y} / \\int_{T_K} 1 \\mbox{ , }\n \\int_{T_K} (c \\ul{y} \\cdot \\ul{n}) / \\int_{T_K} 1\n\n .. math::\n y|_{qp} \\mbox{ , } \\ul{y}|_{qp}\n \\mbox{ , } (\\ul{y} \\cdot \\ul{n})|_{qp} \\mbox{ flux } \\\\\n c y|_{qp} \\mbox{ , } c \\ul{y}|_{qp}\n \\mbox{ , } (c \\ul{y} \\cdot \\ul{n})|_{qp} \\mbox{ flux }\n\n :Arguments:\n - material : :math:`c` (optional)\n - parameter : :math:`y` or :math:`\\ul{y}`\n \"\"\"\n name = 'ev_surface_integrate'\n arg_types = ('opt_material', 'parameter')\n arg_shapes = [{'opt_material' : '1, 1', 'parameter' : 'N'},\n {'opt_material' : None}]\n integration = 'surface'\n\n @staticmethod\n def function(out, val_qp, sg, fmode):\n if fmode == 2:\n out[:] = val_qp\n status = 0\n\n elif fmode == 5:\n normal = sg.normal\n out[:] = dot_sequences(val_qp, normal)\n status = 0\n\n else:\n status = sg.integrate(out, val_qp, fmode)\n\n return status\n\n def get_fargs(self, material, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n sg, _ = self.get_mapping(parameter)\n\n val_qp = self.get(parameter, 'val')\n if material is not None:\n val_qp *= material\n\n fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)\n if term_mode == 'flux':\n n_fa, n_qp, dim, n_fn, n_c = self.get_data_shape(parameter)\n if n_c == dim:\n fmode += 3\n\n return val_qp, sg, fmode\n\n def get_eval_shape(self, material, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n n_fa, n_qp, dim, n_fn, n_c = self.get_data_shape(parameter)\n\n if mode != 'qp':\n n_qp = 1\n\n if term_mode == 'flux':\n n_c = 1\n\n return (n_fa, n_qp, n_c, 1), parameter.dtype\n\nclass IntegrateVolumeOperatorTerm(Term):\n r\"\"\"\n Volume integral of a test function weighted by a scalar function\n :math:`c`.\n\n :Definition:\n\n .. math::\n \\int_\\Omega q \\mbox{ or } \\int_\\Omega c q\n\n :Arguments:\n - material : :math:`c` (optional)\n - virtual : :math:`q`\n \"\"\"\n name = 'dw_volume_integrate'\n arg_types = ('opt_material', 'virtual')\n arg_shapes = [{'opt_material' : '1, 1', 'virtual' : (1, None)},\n {'opt_material' : None}]\n\n @staticmethod\n def function(out, material, bf, geo):\n bf_t = nm.tile(bf.transpose((0, 1, 3, 2)), (out.shape[0], 1, 1, 1))\n bf_t = nm.ascontiguousarray(bf_t)\n if material is not None:\n status = geo.integrate(out, material * bf_t)\n else:\n status = geo.integrate(out, bf_t)\n return status\n\n def get_fargs(self, material, virtual,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n assert_(virtual.n_components == 1)\n geo, _ = self.get_mapping(virtual)\n\n return material, geo.bf, geo\n\nclass IntegrateSurfaceOperatorTerm(IntegrateVolumeOperatorTerm):\n r\"\"\"\n Surface integral of a test function weighted by a scalar function\n :math:`c`.\n\n :Definition:\n\n .. math::\n \\int_{\\Gamma} q \\mbox{ or } \\int_\\Gamma c q\n\n :Arguments:\n - material : :math:`c` (optional)\n - virtual : :math:`q`\n \"\"\"\n name = 'dw_surface_integrate'\n arg_types = ('opt_material', 'virtual')\n arg_shapes = [{'opt_material' : '1, 1', 'virtual' : (1, None)},\n {'opt_material' : None}]\n integration = 'surface'\n\nclass VolumeTerm(Term):\n r\"\"\"\n Volume of a domain. Uses approximation of the parameter variable.\n\n :Definition:\n\n .. math::\n \\int_\\Omega 1\n\n :Arguments:\n - parameter : any variable\n \"\"\"\n name = 'd_volume'\n arg_types = ('parameter',)\n arg_shapes = [{'parameter' : 'N'}]\n\n @staticmethod\n def function(out, geo):\n out[:] = geo.volume\n\n return 0\n\n def get_fargs(self, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n geo, _ = self.get_mapping(parameter)\n\n return geo,\n\n def get_eval_shape(self, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n n_cell, n_qp, dim, n_n, n_c = self.get_data_shape(parameter)\n\n return (n_cell, 1, 1, 1), parameter.dtype\n\nclass SurfaceTerm(VolumeTerm):\n r\"\"\"\n Surface of a domain. Uses approximation of the parameter variable.\n\n :Definition:\n\n .. math::\n \\int_\\Gamma 1\n\n :Arguments:\n - parameter : any variable\n \"\"\"\n name = 'd_surface'\n arg_types = ('parameter',)\n arg_shapes = {'parameter' : 'N'}\n integration = 'surface'\n\nclass VolumeSurfaceTerm(Term):\n r\"\"\"\n Volume of a :math:`D`-dimensional domain, using a surface integral. Uses\n approximation of the parameter variable.\n\n :Definition:\n\n .. math::\n 1 / D \\int_\\Gamma \\ul{x} \\cdot \\ul{n}\n\n :Arguments:\n - parameter : any variable\n \"\"\"\n name = 'd_volume_surface'\n arg_types = ('parameter',)\n arg_shapes = {'parameter' : 'N'}\n integration = 'surface'\n\n function = staticmethod(terms.d_volume_surface)\n\n def get_fargs(self, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n sg, _ = self.get_mapping(parameter)\n\n sd = parameter.field.surface_data[self.region.name]\n coor = parameter.field.get_coor()\n\n return coor, sg, sd.econn.copy()\n\n def get_eval_shape(self, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n n_fa, n_qp, dim, n_fn, n_c = self.get_data_shape(parameter)\n\n return (n_fa, 1, 1, 1), parameter.dtype\n\nclass SurfaceMomentTerm(Term):\n r\"\"\"\n Surface integral of the outer product of the unit outward normal\n :math:`\\ul{n}` and the coordinate :math:`\\ul{x}` shifted by :math:`\\ul{x}_0`\n\n :Definition:\n\n .. math::\n \\int_{\\Gamma} \\ul{n} (\\ul{x} - \\ul{x}_0)\n\n :Arguments:\n - material : :math:`\\ul{x}_0` (special)\n - parameter : any variable\n \"\"\"\n name = 'd_surface_moment'\n arg_types = ('material', 'parameter')\n arg_shapes = {'material' : '.: D', 'parameter' : 'N'}\n integration = 'surface'\n\n function = staticmethod(terms.di_surface_moment)\n\n def get_fargs(self, material, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n sg, _ = self.get_mapping(parameter)\n\n sd = parameter.field.surface_data[self.region.name]\n coor = parameter.field.get_coor() \\\n - nm.asarray(material, dtype=nm.float64)[None,:]\n\n return coor, sg, sd.econn.copy()\n\n def get_eval_shape(self, material, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n n_fa, n_qp, dim, n_fn, n_c = self.get_data_shape(parameter)\n\n return (n_fa, 1, dim, dim), parameter.dtype\n\nclass IntegrateVolumeMatTerm(Term):\n r\"\"\"\n Evaluate material parameter :math:`m` in a volume region.\n\n Depending on evaluation mode, integrate a material parameter over a\n volume region ('eval'), average it in elements ('el_avg') or\n interpolate it into volume quadrature points ('qp').\n\n Uses reference mapping of :math:`y` variable.\n\n Supports 'eval', 'el_avg' and 'qp' evaluation modes.\n\n :Definition:\n\n .. math::\n \\int_\\Omega m\n\n .. math::\n \\mbox{vector for } K \\from \\Ical_h: \\int_{T_K} m / \\int_{T_K} 1\n\n .. math::\n m|_{qp}\n\n :Arguments:\n - material : :math:`m` (can have up to two dimensions)\n - parameter : :math:`y`\n \"\"\"\n name = 'ev_volume_integrate_mat'\n arg_types = ('material', 'parameter')\n arg_shapes = [{'material' : 'N, N', 'parameter' : 'N'}]\n\n @staticmethod\n def function(out, mat, geo, fmode):\n if fmode == 2:\n out[:] = mat\n status = 0\n\n else:\n status = geo.integrate(out, mat, fmode)\n\n return status\n\n def get_fargs(self, mat, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n geo, _ = self.get_mapping(parameter)\n\n fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)\n\n return mat, geo, fmode\n\n def get_eval_shape(self, mat, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)\n n_row, n_col = mat.shape[-2:]\n\n if mode != 'qp':\n n_qp = 1\n\n return (n_el, n_qp, n_row, n_col), mat.dtype\n\nclass IntegrateSurfaceMatTerm(IntegrateVolumeMatTerm):\n r\"\"\"\n Evaluate material parameter :math:`m` in a surface region.\n\n Depending on evaluation mode, integrate a material parameter over a\n surface region ('eval'), average it in faces ('el_avg') or\n interpolate it into surface quadrature points ('qp').\n\n Uses reference mapping of :math:`y` variable.\n\n Supports 'eval', 'el_avg' and 'qp' evaluation modes.\n\n :Definition:\n\n .. math::\n \\int_\\Gamma m\n\n .. math::\n \\mbox{vector for } K \\from \\Ical_h: \\int_{T_K} m / \\int_{T_K} 1\n\n .. math::\n m|_{qp}\n\n :Arguments:\n - material : :math:`m` (can have up to two dimensions)\n - parameter : :math:`y`\n \"\"\"\n name = 'ev_surface_integrate_mat'\n arg_types = ('material', 'parameter')\n arg_shapes = [{'material' : 'N, N', 'parameter' : 'N'}]\n integration = 'surface'\n\nclass SumNodalValuesTerm(Term):\n r\"\"\"\n Sum nodal values.\n\n :Arguments:\n - parameter : :math:`p` or :math:`\\ul{u}`\n \"\"\"\n name = 'd_sum_vals'\n arg_types = ('parameter',)\n arg_shapes = {'parameter' : 'N'}\n\n @staticmethod\n def function(out, vec):\n out[:] = nm.sum(vec, 0)\n\n return 0\n\n def get_fargs(self, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n vec = parameter.get_state_in_region(self.region)\n\n return vec,\n\n def get_eval_shape(self, parameter,\n mode=None, term_mode=None, diff_var=None, **kwargs):\n n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)\n\n return (n_el, n_c), parameter.dtype\n", "from __future__ import absolute_import\nfrom sfepy.base.testing import TestCommon\nfrom sfepy.homogenization.engine import HomogenizationEngine as he\nfrom sfepy.homogenization.engine import HomogenizationWorkerMulti as hwm\nimport six\nimport numpy as nm\n\nclass Test(TestCommon):\n\n @staticmethod\n def from_conf(conf, options):\n test = Test(conf=conf, options=options)\n return test\n\n def test_dependencies(self):\n get_deps = hwm.get_sorted_dependencies\n\n coefs = {'A' : {'requires' : ['a', 'd', 'c.B']},\n 'B' : {'requires' : ['b']}}\n requirements = {'a' : {'requires' : ['b', 'c']},\n 'b' : {'requires' : ['c']},\n 'c' : {},\n 'd' : {'requires' : ['b', 'a']}}\n\n deps = get_deps(requirements, coefs, None)\n ok = ((deps == ['c', 'b', 'a', 'd', 'c.B', 'c.A'])\n or (deps == ['c', 'b', 'c.B', 'a', 'd', 'c.A'])\n or (deps == ['c', 'b', 'a', 'c.B', 'd', 'c.A']))\n self.report(deps, ':', ok)\n\n coefs['B']['requires'] = ['b', 'c.A']\n\n try:\n deps = get_deps(requirements, coefs, None)\n\n except ValueError as err:\n self.report('detected:', str(err))\n _ok = 'circular requirement \"c.' in str(err)\n\n else:\n _ok = False\n\n self.report('circular dependency detection 1:', _ok)\n ok = ok and _ok\n\n coefs['B']['requires'] = ['b']\n requirements['c']['requires'] = ['d']\n\n try:\n deps = get_deps(requirements, coefs, None)\n\n except ValueError as err:\n self.report('detected:', str(err))\n _ok = 'circular requirement' in str(err)\n\n else:\n _ok = False\n\n self.report('circular dependency detection 2:', _ok)\n ok = ok and _ok\n\n return ok\n\n def test_chunk_micro(self):\n coefs = {'A' : {'requires' : ['a', 'd', 'c.B']},\n 'B' : {'requires' : ['b']}}\n requirements = {'a' : {'requires' : ['b', 'c']},\n 'b' : {'requires' : ['c']},\n 'c' : {},\n 'd' : {'requires' : ['b', 'a']}}\n\n volumes = {'total': {'expression': ''}}\n coefs = he.define_volume_coef(coefs, volumes)\n orig_deps_num = len(requirements) + len(coefs)\n\n num_workers, num_micro, chunks_per_worker = 5, 61, 2\n store_micro_idxs = [0, 1, 18, 20, 21]\n micro_chunk_tab, requirements, coefs = \\\n hwm.chunk_micro_tasks(num_workers, num_micro, requirements, coefs,\n chunks_per_worker, store_micro_idxs)\n\n dep_names = hwm.get_sorted_dependencies(requirements, coefs, None)\n\n ok = (orig_deps_num * len(micro_chunk_tab)) == len(dep_names)\n self.report('splitting into chunks:', ok)\n\n deps = {}\n for k in dep_names:\n chunk_id = int(k[-3:])\n nmic = len(range(*micro_chunk_tab[chunk_id].indices(num_micro)))\n deps[k] = [1] * nmic\n if k[2:] in coefs and 'Volume_total' not in k:\n reqs = '#'.join(coefs[k[2:]]['requires'])\n ok = ok and 'Volume_total' in reqs\n\n self.report('volume dependecy:', ok)\n\n deps = hwm.dechunk_reqs_coefs(deps, len(micro_chunk_tab))\n\n ok = ok and\\\n nm.all([(nm.sum(v) == num_micro) for v in six.itervalues(deps)])\n self.report('merging chunks:', ok)\n\n return ok\n" ]
[ [ "numpy.sum", "numpy.ascontiguousarray", "numpy.asarray" ], [ "numpy.sum" ] ]
SebastianJia/e2e-coref
[ "9a68d6816cfb4ac00bca9c83f587891239215dce" ]
[ "coref_model.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport operator\nimport random\nimport math\nimport json\nimport threading\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport h5py\n\nimport util\nimport coref_ops\nimport conll\nimport metrics\n\nclass CorefModel(object):\n def __init__(self, config):\n self.config = config\n self.context_embeddings = util.EmbeddingDictionary(config[\"context_embeddings\"])\n self.head_embeddings = util.EmbeddingDictionary(config[\"head_embeddings\"], maybe_cache=self.context_embeddings)\n self.char_embedding_size = config[\"char_embedding_size\"]\n self.char_dict = util.load_char_dict(config[\"char_vocab_path\"])\n self.max_span_width = config[\"max_span_width\"]\n self.genres = { g:i for i,g in enumerate(config[\"genres\"]) }\n if config[\"lm_path\"]:\n self.lm_file = h5py.File(self.config[\"lm_path\"], \"r\")\n else:\n self.lm_file = None\n self.lm_layers = self.config[\"lm_layers\"]\n self.lm_size = self.config[\"lm_size\"]\n self.eval_data = None # Load eval data lazily.\n\n input_props = []\n input_props.append((tf.string, [None, None])) # Tokens.\n input_props.append((tf.float32, [None, None, self.context_embeddings.size])) # Context embeddings.\n input_props.append((tf.float32, [None, None, self.head_embeddings.size])) # Head embeddings.\n input_props.append((tf.float32, [None, None, self.lm_size, self.lm_layers])) # LM embeddings.\n input_props.append((tf.int32, [None, None, None])) # Character indices.\n input_props.append((tf.int32, [None])) # Text lengths.\n input_props.append((tf.int32, [None])) # Speaker IDs.\n input_props.append((tf.int32, [])) # Genre.\n input_props.append((tf.bool, [])) # Is training.\n input_props.append((tf.int32, [None])) # Gold starts.\n input_props.append((tf.int32, [None])) # Gold ends.\n input_props.append((tf.int32, [None])) # Cluster ids.\n\n self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]\n dtypes, shapes = zip(*input_props)\n queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes)\n self.enqueue_op = queue.enqueue(self.queue_input_tensors)\n self.input_tensors = queue.dequeue()\n\n self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n self.reset_global_step = tf.assign(self.global_step, 0)\n learning_rate = tf.train.exponential_decay(self.config[\"learning_rate\"], self.global_step,\n self.config[\"decay_frequency\"], self.config[\"decay_rate\"], staircase=True)\n trainable_params = tf.trainable_variables()\n gradients = tf.gradients(self.loss, trainable_params)\n gradients, _ = tf.clip_by_global_norm(gradients, self.config[\"max_gradient_norm\"])\n optimizers = {\n \"adam\" : tf.train.AdamOptimizer,\n \"sgd\" : tf.train.GradientDescentOptimizer\n }\n optimizer = optimizers[self.config[\"optimizer\"]](learning_rate)\n self.train_op = optimizer.apply_gradients(zip(gradients, trainable_params), global_step=self.global_step)\n\n def start_enqueue_thread(self, session):\n with open(self.config[\"train_path\"]) as f:\n train_examples = [json.loads(jsonline) for jsonline in f.readlines()]\n def _enqueue_loop():\n while True:\n random.shuffle(train_examples)\n for example in train_examples:\n tensorized_example = self.tensorize_example(example, is_training=True)\n feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))\n session.run(self.enqueue_op, feed_dict=feed_dict)\n enqueue_thread = threading.Thread(target=_enqueue_loop)\n enqueue_thread.daemon = True\n enqueue_thread.start()\n\n def restore(self, session):\n # Don't try to restore unused variables from the TF-Hub ELMo module.\n vars_to_restore = [v for v in tf.global_variables() if \"module/\" not in v.name]\n saver = tf.train.Saver(vars_to_restore)\n checkpoint_path = os.path.join(self.config[\"log_dir\"], \"model.max.ckpt\")\n print(\"Restoring from {}\".format(checkpoint_path))\n session.run(tf.global_variables_initializer())\n saver.restore(session, checkpoint_path)\n\n def load_lm_embeddings(self, doc_key):\n if self.lm_file is None:\n return np.zeros([0, 0, self.lm_size, self.lm_layers])\n file_key = doc_key.replace(\"/\", \":\")\n group = self.lm_file[file_key]\n num_sentences = len(list(group.keys()))\n sentences = [group[str(i)][...] for i in range(num_sentences)]\n lm_emb = np.zeros([num_sentences, max(s.shape[0] for s in sentences), self.lm_size, self.lm_layers])\n for i, s in enumerate(sentences):\n lm_emb[i, :s.shape[0], :, :] = s\n return lm_emb\n\n def tensorize_mentions(self, mentions):\n if len(mentions) > 0:\n starts, ends = zip(*mentions)\n else:\n starts, ends = [], []\n return np.array(starts), np.array(ends)\n\n def tensorize_span_labels(self, tuples, label_dict):\n if len(tuples) > 0:\n starts, ends, labels = zip(*tuples)\n else:\n starts, ends, labels = [], [], []\n return np.array(starts), np.array(ends), np.array([label_dict[c] for c in labels])\n\n def tensorize_example(self, example, is_training):\n clusters = example[\"clusters\"]\n\n gold_mentions = sorted(tuple(m) for m in util.flatten(clusters))\n gold_mention_map = {m:i for i,m in enumerate(gold_mentions)}\n cluster_ids = np.zeros(len(gold_mentions))\n for cluster_id, cluster in enumerate(clusters):\n for mention in cluster:\n cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id + 1\n\n sentences = example[\"sentences\"]\n num_words = sum(len(s) for s in sentences)\n speakers = util.flatten(example[\"speakers\"])\n\n assert num_words == len(speakers)\n\n max_sentence_length = max(len(s) for s in sentences)\n max_word_length = max(max(max(len(w) for w in s) for s in sentences), max(self.config[\"filter_widths\"]))\n text_len = np.array([len(s) for s in sentences])\n tokens = [[\"\"] * max_sentence_length for _ in sentences]\n context_word_emb = np.zeros([len(sentences), max_sentence_length, self.context_embeddings.size])\n head_word_emb = np.zeros([len(sentences), max_sentence_length, self.head_embeddings.size])\n char_index = np.zeros([len(sentences), max_sentence_length, max_word_length])\n for i, sentence in enumerate(sentences):\n for j, word in enumerate(sentence):\n tokens[i][j] = word\n context_word_emb[i, j] = self.context_embeddings[word]\n head_word_emb[i, j] = self.head_embeddings[word]\n char_index[i, j, :len(word)] = [self.char_dict[c] for c in word]\n tokens = np.array(tokens)\n# print(context_word_emb[0][0].shape)\n# print(head_word_emb[0,0].shape)\n\n speaker_dict = { s:i for i,s in enumerate(set(speakers)) }\n speaker_ids = np.array([speaker_dict[s] for s in speakers])\n\n doc_key = example[\"doc_key\"]\n genre = self.genres[doc_key[:2]]\n\n gold_starts, gold_ends = self.tensorize_mentions(gold_mentions)\n\n lm_emb = self.load_lm_embeddings(doc_key)\n# print(lm_emb.shape)\n example_tensors = (tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids)\n\n if is_training and len(sentences) > self.config[\"max_training_sentences\"]:\n return self.truncate_example(*example_tensors)\n else:\n return example_tensors\n\n def truncate_example(self, tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids):\n max_training_sentences = self.config[\"max_training_sentences\"]\n num_sentences = context_word_emb.shape[0]\n assert num_sentences > max_training_sentences\n\n sentence_offset = random.randint(0, num_sentences - max_training_sentences)\n word_offset = text_len[:sentence_offset].sum()\n num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()\n tokens = tokens[sentence_offset:sentence_offset + max_training_sentences, :]\n context_word_emb = context_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]\n head_word_emb = head_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]\n lm_emb = lm_emb[sentence_offset:sentence_offset + max_training_sentences, :, :, :]\n char_index = char_index[sentence_offset:sentence_offset + max_training_sentences, :, :]\n text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]\n\n speaker_ids = speaker_ids[word_offset: word_offset + num_words]\n gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words)\n gold_starts = gold_starts[gold_spans] - word_offset\n gold_ends = gold_ends[gold_spans] - word_offset\n cluster_ids = cluster_ids[gold_spans]\n\n return tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids\n\n def get_candidate_labels(self, candidate_starts, candidate_ends, labeled_starts, labeled_ends, labels):\n same_start = tf.equal(tf.expand_dims(labeled_starts, 1), tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates]\n same_end = tf.equal(tf.expand_dims(labeled_ends, 1), tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates]\n same_span = tf.logical_and(same_start, same_end) # [num_labeled, num_candidates]\n candidate_labels = tf.matmul(tf.expand_dims(labels, 0), tf.to_int32(same_span)) # [1, num_candidates]\n candidate_labels = tf.squeeze(candidate_labels, 0) # [num_candidates]\n return candidate_labels\n\n def get_dropout(self, dropout_rate, is_training):\n return 1 - (tf.to_float(is_training) * dropout_rate)\n\n def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, c):\n k = util.shape(top_span_emb, 0)\n top_span_range = tf.range(k) # [k]\n antecedent_offsets = tf.expand_dims(top_span_range, 1) - tf.expand_dims(top_span_range, 0) # [k, k]\n antecedents_mask = antecedent_offsets >= 1 # [k, k]\n fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.expand_dims(top_span_mention_scores, 0) # [k, k]\n fast_antecedent_scores += tf.log(tf.to_float(antecedents_mask)) # [k, k]\n fast_antecedent_scores += self.get_fast_antecedent_scores(top_span_emb) # [k, k]\n\n _, top_antecedents = tf.nn.top_k(fast_antecedent_scores, c, sorted=False) # [k, c]\n top_antecedents_mask = util.batch_gather(antecedents_mask, top_antecedents) # [k, c]\n top_fast_antecedent_scores = util.batch_gather(fast_antecedent_scores, top_antecedents) # [k, c]\n top_antecedent_offsets = util.batch_gather(antecedent_offsets, top_antecedents) # [k, c]\n return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets\n\n def distance_pruning(self, top_span_emb, top_span_mention_scores, c):\n k = util.shape(top_span_emb, 0)\n top_antecedent_offsets = tf.tile(tf.expand_dims(tf.range(c) + 1, 0), [k, 1]) # [k, c]\n raw_top_antecedents = tf.expand_dims(tf.range(k), 1) - top_antecedent_offsets # [k, c]\n top_antecedents_mask = raw_top_antecedents >= 0 # [k, c]\n top_antecedents = tf.maximum(raw_top_antecedents, 0) # [k, c]\n\n top_fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.gather(top_span_mention_scores, top_antecedents) # [k, c]\n top_fast_antecedent_scores += tf.log(tf.to_float(top_antecedents_mask)) # [k, c]\n return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets\n\n def get_predictions_and_loss(self, tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids):\n self.dropout = self.get_dropout(self.config[\"dropout_rate\"], is_training)\n self.lexical_dropout = self.get_dropout(self.config[\"lexical_dropout_rate\"], is_training)\n self.lstm_dropout = self.get_dropout(self.config[\"lstm_dropout_rate\"], is_training)\n\n num_sentences = tf.shape(context_word_emb)[0]\n max_sentence_length = tf.shape(context_word_emb)[1]\n\n context_emb_list = [context_word_emb]\n head_emb_list = [head_word_emb]\n\n if self.config[\"char_embedding_size\"] > 0:\n char_emb = tf.gather(tf.get_variable(\"char_embeddings\", [len(self.char_dict), self.config[\"char_embedding_size\"]]), char_index) # [num_sentences, max_sentence_length, max_word_length, emb]\n flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, util.shape(char_emb, 2), util.shape(char_emb, 3)]) # [num_sentences * max_sentence_length, max_word_length, emb]\n flattened_aggregated_char_emb = util.cnn(flattened_char_emb, self.config[\"filter_widths\"], self.config[\"filter_size\"]) # [num_sentences * max_sentence_length, emb]\n aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb, [num_sentences, max_sentence_length, util.shape(flattened_aggregated_char_emb, 1)]) # [num_sentences, max_sentence_length, emb]\n context_emb_list.append(aggregated_char_emb)\n head_emb_list.append(aggregated_char_emb)\n\n if not self.lm_file:\n elmo_module = hub.Module(\"https://tfhub.dev/google/elmo/2\")\n lm_embeddings = elmo_module(\n inputs={\"tokens\": tokens, \"sequence_len\": text_len},\n signature=\"tokens\", as_dict=True)\n word_emb = lm_embeddings[\"word_emb\"] # [num_sentences, max_sentence_length, 512]\n lm_emb = tf.stack([tf.concat([word_emb, word_emb], -1),\n lm_embeddings[\"lstm_outputs1\"],\n lm_embeddings[\"lstm_outputs2\"]], -1) # [num_sentences, max_sentence_length, 1024, 3]\n lm_emb_size = util.shape(lm_emb, 2)\n lm_num_layers = util.shape(lm_emb, 3)\n with tf.variable_scope(\"lm_aggregation\"):\n self.lm_weights = tf.nn.softmax(tf.get_variable(\"lm_scores\", [lm_num_layers], initializer=tf.constant_initializer(0.0)))\n self.lm_scaling = tf.get_variable(\"lm_scaling\", [], initializer=tf.constant_initializer(1.0))\n flattened_lm_emb = tf.reshape(lm_emb, [num_sentences * max_sentence_length * lm_emb_size, lm_num_layers])\n flattened_aggregated_lm_emb = tf.matmul(flattened_lm_emb, tf.expand_dims(self.lm_weights, 1)) # [num_sentences * max_sentence_length * emb, 1]\n aggregated_lm_emb = tf.reshape(flattened_aggregated_lm_emb, [num_sentences, max_sentence_length, lm_emb_size])\n aggregated_lm_emb *= self.lm_scaling\n context_emb_list.append(aggregated_lm_emb)\n\n context_emb = tf.concat(context_emb_list, 2) # [num_sentences, max_sentence_length, emb]\n head_emb = tf.concat(head_emb_list, 2) # [num_sentences, max_sentence_length, emb]\n context_emb = tf.nn.dropout(context_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb]\n head_emb = tf.nn.dropout(head_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb]\n\n text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) # [num_sentence, max_sentence_length]\n\n context_outputs = self.lstm_contextualize(context_emb, text_len, text_len_mask) # [num_words, emb]\n num_words = util.shape(context_outputs, 0)\n\n genre_emb = tf.gather(tf.get_variable(\"genre_embeddings\", [len(self.genres), self.config[\"feature_size\"]]), genre) # [emb]\n\n sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1), [1, max_sentence_length]) # [num_sentences, max_sentence_length]\n flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]\n flattened_head_emb = self.flatten_emb_by_sentence(head_emb, text_len_mask) # [num_words]\n\n candidate_starts = tf.tile(tf.expand_dims(tf.range(num_words), 1), [1, self.max_span_width]) # [num_words, max_span_width]\n candidate_ends = candidate_starts + tf.expand_dims(tf.range(self.max_span_width), 0) # [num_words, max_span_width]\n candidate_start_sentence_indices = tf.gather(flattened_sentence_indices, candidate_starts) # [num_words, max_span_width]\n candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends, num_words - 1)) # [num_words, max_span_width]\n candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices, candidate_end_sentence_indices)) # [num_words, max_span_width]\n flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width]\n candidate_starts = tf.boolean_mask(tf.reshape(candidate_starts, [-1]), flattened_candidate_mask) # [num_candidates]\n candidate_ends = tf.boolean_mask(tf.reshape(candidate_ends, [-1]), flattened_candidate_mask) # [num_candidates]\n candidate_sentence_indices = tf.boolean_mask(tf.reshape(candidate_start_sentence_indices, [-1]), flattened_candidate_mask) # [num_candidates]\n\n candidate_cluster_ids = self.get_candidate_labels(candidate_starts, candidate_ends, gold_starts, gold_ends, cluster_ids) # [num_candidates]\n\n candidate_span_emb = self.get_span_emb(flattened_head_emb, context_outputs, candidate_starts, candidate_ends) # [num_candidates, emb]\n candidate_mention_scores = self.get_mention_scores(candidate_span_emb) # [k, 1]\n candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [k]\n\n k = tf.to_int32(tf.floor(tf.to_float(tf.shape(context_outputs)[0]) * self.config[\"top_span_ratio\"]))\n top_span_indices = coref_ops.extract_spans(tf.expand_dims(candidate_mention_scores, 0),\n tf.expand_dims(candidate_starts, 0),\n tf.expand_dims(candidate_ends, 0),\n tf.expand_dims(k, 0),\n util.shape(context_outputs, 0),\n True) # [1, k]\n top_span_indices.set_shape([1, None])\n top_span_indices = tf.squeeze(top_span_indices, 0) # [k]\n\n top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k]\n top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k]\n top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb]\n top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k]\n top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k]\n top_span_sentence_indices = tf.gather(candidate_sentence_indices, top_span_indices) # [k]\n top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k]\n\n c = tf.minimum(self.config[\"max_top_antecedents\"], k)\n\n if self.config[\"coarse_to_fine\"]:\n top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(top_span_emb, top_span_mention_scores, c)\n else:\n top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.distance_pruning(top_span_emb, top_span_mention_scores, c)\n\n dummy_scores = tf.zeros([k, 1]) # [k, 1]\n for i in range(self.config[\"coref_depth\"]):\n with tf.variable_scope(\"coref_layer\", reuse=(i > 0)):\n top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb]\n top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb) # [k, c]\n top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1]\n top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb]\n attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb, 1) # [k, emb]\n with tf.variable_scope(\"f\"):\n f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb]\n top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb]\n\n top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1]\n\n top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c]\n top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c]\n same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c]\n non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1]\n pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c]\n dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1]\n top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1]\n loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k]\n loss = tf.reduce_sum(loss) # []\n\n return [candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores], loss\n\n def get_span_emb(self, head_emb, context_outputs, span_starts, span_ends):\n span_emb_list = []\n\n span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb]\n span_emb_list.append(span_start_emb)\n\n span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb]\n span_emb_list.append(span_end_emb)\n\n span_width = 1 + span_ends - span_starts # [k]\n\n if self.config[\"use_features\"]:\n span_width_index = span_width - 1 # [k]\n span_width_emb = tf.gather(tf.get_variable(\"span_width_embeddings\", [self.config[\"max_span_width\"], self.config[\"feature_size\"]]), span_width_index) # [k, emb]\n span_width_emb = tf.nn.dropout(span_width_emb, self.dropout)\n span_emb_list.append(span_width_emb)\n\n if self.config[\"model_heads\"]:\n span_indices = tf.expand_dims(tf.range(self.config[\"max_span_width\"]), 0) + tf.expand_dims(span_starts, 1) # [k, max_span_width]\n span_indices = tf.minimum(util.shape(context_outputs, 0) - 1, span_indices) # [k, max_span_width]\n span_text_emb = tf.gather(head_emb, span_indices) # [k, max_span_width, emb]\n with tf.variable_scope(\"head_scores\"):\n self.head_scores = util.projection(context_outputs, 1) # [num_words, 1]\n span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1]\n span_mask = tf.expand_dims(tf.sequence_mask(span_width, self.config[\"max_span_width\"], dtype=tf.float32), 2) # [k, max_span_width, 1]\n span_head_scores += tf.log(span_mask) # [k, max_span_width, 1]\n span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1]\n span_head_emb = tf.reduce_sum(span_attention * span_text_emb, 1) # [k, emb]\n span_emb_list.append(span_head_emb)\n\n span_emb = tf.concat(span_emb_list, 1) # [k, emb]\n return span_emb # [k, emb]\n\n def get_mention_scores(self, span_emb):\n with tf.variable_scope(\"mention_scores\"):\n return util.ffnn(span_emb, self.config[\"ffnn_depth\"], self.config[\"ffnn_size\"], 1, self.dropout) # [k, 1]\n\n def softmax_loss(self, antecedent_scores, antecedent_labels):\n gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [k, max_ant + 1]\n marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k]\n log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k]\n return log_norm - marginalized_gold_scores # [k]\n\n def bucket_distance(self, distances):\n \"\"\"\n Places the given values (designed for distances) into 10 semi-logscale buckets:\n [0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].\n \"\"\"\n logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3\n use_identity = tf.to_int32(distances <= 4)\n combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx\n return tf.clip_by_value(combined_idx, 0, 9)\n\n def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb):\n k = util.shape(top_span_emb, 0)\n c = util.shape(top_antecedents, 1)\n\n feature_emb_list = []\n\n if self.config[\"use_metadata\"]:\n top_antecedent_speaker_ids = tf.gather(top_span_speaker_ids, top_antecedents) # [k, c]\n same_speaker = tf.equal(tf.expand_dims(top_span_speaker_ids, 1), top_antecedent_speaker_ids) # [k, c]\n speaker_pair_emb = tf.gather(tf.get_variable(\"same_speaker_emb\", [2, self.config[\"feature_size\"]]), tf.to_int32(same_speaker)) # [k, c, emb]\n feature_emb_list.append(speaker_pair_emb)\n\n tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [k, c, 1]) # [k, c, emb]\n feature_emb_list.append(tiled_genre_emb)\n\n if self.config[\"use_features\"]:\n antecedent_distance_buckets = self.bucket_distance(top_antecedent_offsets) # [k, c]\n antecedent_distance_emb = tf.gather(tf.get_variable(\"antecedent_distance_emb\", [10, self.config[\"feature_size\"]]), antecedent_distance_buckets) # [k, c]\n feature_emb_list.append(antecedent_distance_emb)\n\n feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb]\n feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb]\n\n target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb]\n similarity_emb = top_antecedent_emb * target_emb # [k, c, emb]\n target_emb = tf.tile(target_emb, [1, c, 1]) # [k, c, emb]\n\n pair_emb = tf.concat([target_emb, top_antecedent_emb, similarity_emb, feature_emb], 2) # [k, c, emb]\n\n with tf.variable_scope(\"slow_antecedent_scores\"):\n slow_antecedent_scores = util.ffnn(pair_emb, self.config[\"ffnn_depth\"], self.config[\"ffnn_size\"], 1, self.dropout) # [k, c, 1]\n slow_antecedent_scores = tf.squeeze(slow_antecedent_scores, 2) # [k, c]\n return slow_antecedent_scores # [k, c]\n\n def get_fast_antecedent_scores(self, top_span_emb):\n with tf.variable_scope(\"src_projection\"):\n source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)), self.dropout) # [k, emb]\n target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb]\n return tf.matmul(source_top_span_emb, target_top_span_emb, transpose_b=True) # [k, k]\n\n def flatten_emb_by_sentence(self, emb, text_len_mask):\n num_sentences = tf.shape(emb)[0]\n max_sentence_length = tf.shape(emb)[1]\n\n emb_rank = len(emb.get_shape())\n if emb_rank == 2:\n flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])\n elif emb_rank == 3:\n flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)])\n else:\n raise ValueError(\"Unsupported rank: {}\".format(emb_rank))\n return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))\n\n def lstm_contextualize(self, text_emb, text_len, text_len_mask):\n num_sentences = tf.shape(text_emb)[0]\n\n current_inputs = text_emb # [num_sentences, max_sentence_length, emb]\n\n for layer in range(self.config[\"contextualization_layers\"]):\n with tf.variable_scope(\"layer_{}\".format(layer)):\n with tf.variable_scope(\"fw_cell\"):\n cell_fw = util.CustomLSTMCell(self.config[\"contextualization_size\"], num_sentences, self.lstm_dropout)\n with tf.variable_scope(\"bw_cell\"):\n cell_bw = util.CustomLSTMCell(self.config[\"contextualization_size\"], num_sentences, self.lstm_dropout)\n state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]), tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))\n state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]), tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))\n\n (fw_outputs, bw_outputs), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=current_inputs,\n sequence_length=text_len,\n initial_state_fw=state_fw,\n initial_state_bw=state_bw)\n\n text_outputs = tf.concat([fw_outputs, bw_outputs], 2) # [num_sentences, max_sentence_length, emb]\n text_outputs = tf.nn.dropout(text_outputs, self.lstm_dropout)\n if layer > 0:\n highway_gates = tf.sigmoid(util.projection(text_outputs, util.shape(text_outputs, 2))) # [num_sentences, max_sentence_length, emb]\n text_outputs = highway_gates * text_outputs + (1 - highway_gates) * current_inputs\n current_inputs = text_outputs\n\n return self.flatten_emb_by_sentence(text_outputs, text_len_mask)\n\n def get_predicted_antecedents(self, antecedents, antecedent_scores):\n predicted_antecedents = []\n for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1):\n if index < 0:\n predicted_antecedents.append(-1)\n else:\n predicted_antecedents.append(antecedents[i, index])\n return predicted_antecedents\n\n def get_predicted_clusters(self, top_span_starts, top_span_ends, predicted_antecedents):\n mention_to_predicted = {}\n predicted_clusters = []\n for i, predicted_index in enumerate(predicted_antecedents):\n if predicted_index < 0:\n continue\n assert i > predicted_index\n predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))\n if predicted_antecedent in mention_to_predicted:\n predicted_cluster = mention_to_predicted[predicted_antecedent]\n else:\n predicted_cluster = len(predicted_clusters)\n predicted_clusters.append([predicted_antecedent])\n mention_to_predicted[predicted_antecedent] = predicted_cluster\n\n mention = (int(top_span_starts[i]), int(top_span_ends[i]))\n predicted_clusters[predicted_cluster].append(mention)\n mention_to_predicted[mention] = predicted_cluster\n\n predicted_clusters = [tuple(pc) for pc in predicted_clusters]\n mention_to_predicted = { m:predicted_clusters[i] for m,i in mention_to_predicted.items() }\n\n return predicted_clusters, mention_to_predicted\n\n def evaluate_coref(self, top_span_starts, top_span_ends, predicted_antecedents, gold_clusters, evaluator):\n gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]\n mention_to_gold = {}\n for gc in gold_clusters:\n for mention in gc:\n mention_to_gold[mention] = gc\n\n predicted_clusters, mention_to_predicted = self.get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents)\n evaluator.update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold)\n return predicted_clusters\n\n def load_eval_data(self):\n if self.eval_data is None:\n def load_line(line):\n example = json.loads(line)\n return self.tensorize_example(example, is_training=False), example\n with open(self.config[\"eval_path\"]) as f:\n self.eval_data = [load_line(l) for l in f.readlines()]\n num_words = sum(tensorized_example[2].sum() for tensorized_example, _ in self.eval_data)\n print(\"Loaded {} eval examples.\".format(len(self.eval_data)))\n\n def evaluate(self, session, official_stdout=False):\n self.load_eval_data()\n\n coref_predictions = {}\n coref_evaluator = metrics.CorefEvaluator()\n\n for example_num, (tensorized_example, example) in enumerate(self.eval_data):\n _, _, _, _, _, _, _, _, _, gold_starts, gold_ends, _ = tensorized_example\n feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}\n candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run(self.predictions, feed_dict=feed_dict)\n predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)\n coref_predictions[example[\"doc_key\"]] = self.evaluate_coref(top_span_starts, top_span_ends, predicted_antecedents, example[\"clusters\"], coref_evaluator)\n if example_num % 10 == 0:\n print(\"Evaluated {}/{} examples.\".format(example_num + 1, len(self.eval_data)))\n\n summary_dict = {}\n conll_results = conll.evaluate_conll(self.config[\"conll_eval_path\"], coref_predictions, official_stdout)\n average_f1 = sum(results[\"f\"] for results in conll_results.values()) / len(conll_results)\n summary_dict[\"Average F1 (conll)\"] = average_f1\n print(\"Average F1 (conll): {:.2f}%\".format(average_f1))\n\n p,r,f = coref_evaluator.get_prf()\n summary_dict[\"Average F1 (py)\"] = f\n print(\"Average F1 (py): {:.2f}%\".format(f * 100))\n summary_dict[\"Average precision (py)\"] = p\n print(\"Average precision (py): {:.2f}%\".format(p * 100))\n summary_dict[\"Average recall (py)\"] = r\n print(\"Average recall (py): {:.2f}%\".format(r * 100))\n\n return util.make_summary(summary_dict), average_f1\n" ]
[ [ "tensorflow.reduce_logsumexp", "tensorflow.PaddingFIFOQueue", "tensorflow.reshape", "tensorflow.nn.top_k", "tensorflow.logical_and", "tensorflow.variable_scope", "tensorflow.matmul", "tensorflow.squeeze", "tensorflow.sequence_mask", "tensorflow.concat", "tensorflow.Variable", "tensorflow.nn.softmax", "tensorflow.reduce_sum", "tensorflow.nn.dropout", "tensorflow.minimum", "tensorflow.nn.bidirectional_dynamic_rnn", "tensorflow.global_variables_initializer", "tensorflow.clip_by_global_norm", "numpy.logical_and", "tensorflow.train.exponential_decay", "tensorflow.clip_by_value", "tensorflow.constant_initializer", "tensorflow.shape", "numpy.zeros", "tensorflow.to_float", "tensorflow.expand_dims", "numpy.argmax", "tensorflow.global_variables", "tensorflow.assign", "tensorflow.gradients", "tensorflow.train.Saver", "tensorflow.tile", "tensorflow.reduce_any", "tensorflow.zeros", "tensorflow.placeholder", "tensorflow.equal", "tensorflow.range", "tensorflow.trainable_variables", "tensorflow.to_int32", "numpy.array", "tensorflow.log", "tensorflow.gather", "tensorflow.get_variable", "tensorflow.maximum" ] ]
cambiegroup/aizynthfinder
[ "f5bafb2ac4749284571c05ae6df45b6f45cccd30" ]
[ "tests/test_score.py" ]
[ "import pytest\nimport numpy as np\n\nfrom aizynthfinder.context.scoring import (\n StateScorer,\n NumberOfReactionsScorer,\n AverageTemplateOccurenceScorer,\n NumberOfPrecursorsScorer,\n NumberOfPrecursorsInStockScorer,\n PriceSumScorer,\n RouteCostScorer,\n ScorerCollection,\n ScorerException,\n)\nfrom aizynthfinder.chem import Molecule, UniqueMolecule\nfrom aizynthfinder.mcts.mcts import SearchTree\nfrom aizynthfinder.reactiontree import ReactionTree\n\n\ndef test_state_scorer_node(generate_root, default_config):\n root = generate_root(\"CCCCOc1ccc(CC(=O)N(C)O)cc1\")\n scorer = StateScorer(default_config)\n\n assert repr(scorer) == \"state score\"\n assert round(scorer(root), 4) == 0.0491\n\n\ndef test_state_scorer_nodes(generate_root, default_config):\n root = generate_root(\"CCCCOc1ccc(CC(=O)N(C)O)cc1\")\n scorer = StateScorer(default_config)\n\n scores = scorer([root, root])\n\n assert repr(scorer) == \"state score\"\n assert round(scores[0], 4) == 0.0491\n assert round(scores[1], 4) == 0.0491\n\n\ndef test_state_scorer_tree(load_reaction_tree, default_config, mock_stock):\n mock_stock(\n default_config, \"N#Cc1cccc(N)c1F\", \"O=C(Cl)c1ccc(F)cc1\", \"CN1CCC(Cl)CC1\", \"O\"\n )\n tree = ReactionTree.from_dict(load_reaction_tree(\"sample_reaction.json\"))\n scorer = StateScorer(default_config)\n\n assert round(scorer(tree), 4) == 0.994\n\n\ndef test_state_scorer_trees(load_reaction_tree, default_config, mock_stock):\n mock_stock(\n default_config, \"N#Cc1cccc(N)c1F\", \"O=C(Cl)c1ccc(F)cc1\", \"CN1CCC(Cl)CC1\", \"O\"\n )\n tree = ReactionTree.from_dict(load_reaction_tree(\"sample_reaction.json\"))\n scorer = StateScorer(default_config)\n\n scores = scorer([tree, tree])\n\n assert round(scores[0], 4) == 0.994\n assert round(scores[1], 4) == 0.994\n\n\ndef test_sort(shared_datadir, default_config, mock_stock):\n mock_stock(default_config, \"CCCO\", \"CC\")\n search_tree = SearchTree.from_json(\n shared_datadir / \"tree_without_repetition.json\", default_config\n )\n nodes = list(search_tree.graph())\n scorer = StateScorer(default_config)\n\n sorted_nodes, scores, _ = scorer.sort(nodes)\n\n assert [np.round(score, 4) for score in scores] == [0.9976, 0.0491]\n assert sorted_nodes == [nodes[1], nodes[0]]\n\n\ndef test_number_of_reaction_scorer_node(shared_datadir, default_config):\n search_tree = SearchTree.from_json(\n shared_datadir / \"tree_without_repetition.json\", default_config\n )\n nodes = list(search_tree.graph())\n scorer = NumberOfReactionsScorer()\n\n assert scorer(nodes[1]) == 1\n\n\ndef test_number_of_reaction_scorer_tree(load_reaction_tree):\n tree = ReactionTree.from_dict(load_reaction_tree(\"sample_reaction.json\"))\n scorer = NumberOfReactionsScorer()\n\n assert scorer(tree) == 2\n\n\ndef test_template_occurence_scorer_no_metadata(shared_datadir, default_config):\n search_tree = SearchTree.from_json(\n shared_datadir / \"tree_without_repetition.json\", default_config\n )\n nodes = list(search_tree.graph())\n scorer = AverageTemplateOccurenceScorer()\n\n assert scorer(nodes[1]) == 0\n\n\ndef test_template_occurence_scorer(shared_datadir, default_config):\n search_tree = SearchTree.from_json(\n shared_datadir / \"tree_without_repetition.json\", default_config\n )\n nodes = list(search_tree.graph())\n nodes[0][nodes[1]][\"action\"].metadata[\"library_occurence\"] = 5\n scorer = AverageTemplateOccurenceScorer()\n\n assert scorer(nodes[0]) == 0\n assert scorer(nodes[1]) == 5\n\n\ndef test_template_occurence_scorer_tree(load_reaction_tree):\n tree = ReactionTree.from_dict(load_reaction_tree(\"sample_reaction.json\"))\n scorer = AverageTemplateOccurenceScorer()\n\n assert scorer(tree) == 0\n\n\ndef test_template_occurence_scorer_tree_one_node():\n rt = ReactionTree()\n rt.root = Molecule(smiles=\"CCCCOc1ccc(CC(=O)N(C)O)cc1\")\n rt.graph.add_node(rt.root)\n scorer = AverageTemplateOccurenceScorer()\n\n assert scorer(rt) == 0.0\n\n\ndef test_scorers_one_mcts_node(default_config):\n tree = SearchTree(default_config, root_smiles=\"CCCCOc1ccc(CC(=O)N(C)O)cc1\")\n node = tree.root\n\n assert pytest.approx(StateScorer(default_config)(node), abs=1e-3) == 0.0497\n assert NumberOfReactionsScorer(default_config)(node) == 0\n assert NumberOfPrecursorsScorer(default_config)(node) == 1\n assert NumberOfPrecursorsInStockScorer(default_config)(node) == 0\n assert PriceSumScorer(default_config)(node) == 10\n assert RouteCostScorer(default_config)(node) == 10\n\n\ndef test_scoring_branched_mcts_tree(shared_datadir, default_config):\n search_tree = SearchTree.from_json(\n shared_datadir / \"tree_with_branching.json\", default_config\n )\n nodes = list(search_tree.graph())\n\n assert pytest.approx(StateScorer(default_config)(nodes[-1]), abs=1e-6) == 0.00012363\n assert NumberOfReactionsScorer()(nodes[-1]) == 14\n assert NumberOfPrecursorsScorer(default_config)(nodes[-1]) == 8\n assert NumberOfPrecursorsInStockScorer(default_config)(nodes[-1]) == 0\n assert PriceSumScorer(default_config)(nodes[-1]) == 80\n cost_score = RouteCostScorer(default_config)(nodes[-1])\n assert pytest.approx(cost_score, abs=1e-3) == 410.6577\n\n\ndef test_scoring_branch_mcts_tree_in_stock(shared_datadir, default_config, mock_stock):\n mock_stock(\n default_config,\n \"CC(C)(C)CO\",\n \"CC(C)(C)OC(=O)N(CCCl)CCCl\",\n \"N#CCc1cccc(O)c1F\",\n \"O=[N+]([O-])c1ccccc1F\",\n \"O=C1CCC(=O)N1Br\",\n \"O=C=Nc1csc(C(F)(F)F)n1\",\n \"CCC[Sn](Cl)(CCC)CCC\",\n \"COc1ccc2ncsc2c1\",\n )\n search_tree = SearchTree.from_json(\n shared_datadir / \"tree_with_branching.json\", default_config\n )\n nodes = list(search_tree.graph())\n\n assert pytest.approx(StateScorer(default_config)(nodes[-1]), abs=1e-3) == 0.950\n assert NumberOfReactionsScorer()(nodes[-1]) == 14\n assert NumberOfPrecursorsScorer(default_config)(nodes[-1]) == 8\n assert NumberOfPrecursorsInStockScorer(default_config)(nodes[-1]) == 8\n assert PriceSumScorer(default_config)(nodes[-1]) == 8\n cost_score = RouteCostScorer(default_config)(nodes[-1])\n assert pytest.approx(cost_score, abs=1e-3) == 77.4797\n\n\ndef test_scorers_tree_one_node_route(default_config):\n tree = ReactionTree()\n tree.root = UniqueMolecule(smiles=\"CCCCOc1ccc(CC(=O)N(C)O)cc1\")\n tree.graph.add_node(tree.root)\n\n assert pytest.approx(StateScorer(default_config)(tree), abs=1e-3) == 0.0497\n assert NumberOfReactionsScorer(default_config)(tree) == 0\n assert NumberOfPrecursorsScorer(default_config)(tree) == 1\n assert NumberOfPrecursorsInStockScorer(default_config)(tree) == 0\n assert PriceSumScorer(default_config)(tree) == 10\n assert RouteCostScorer(default_config)(tree) == 10\n\n\ndef test_scoring_branched_route(load_reaction_tree, default_config):\n tree = ReactionTree.from_dict(load_reaction_tree(\"branched_route.json\"))\n\n assert pytest.approx(StateScorer(default_config)(tree), abs=1e-6) == 0.00012363\n assert NumberOfReactionsScorer(default_config)(tree) == 14\n assert NumberOfPrecursorsScorer(default_config)(tree) == 8\n assert NumberOfPrecursorsInStockScorer(default_config)(tree) == 0\n assert PriceSumScorer(default_config)(tree) == 80\n cost_score = RouteCostScorer(default_config)(tree)\n assert pytest.approx(cost_score, abs=1e-3) == 410.6577\n\n\ndef test_scoring_branched_route_in_stock(\n load_reaction_tree, default_config, mock_stock\n):\n mock_stock(\n default_config,\n \"CC(C)(C)CO\",\n \"CC(C)(C)OC(=O)N(CCCl)CCCl\",\n \"N#CCc1cccc(O)c1F\",\n \"O=[N+]([O-])c1ccccc1F\",\n \"O=C1CCC(=O)N1Br\",\n \"O=C=Nc1csc(C(F)(F)F)n1\",\n \"CCC[Sn](Cl)(CCC)CCC\",\n \"COc1ccc2ncsc2c1\",\n )\n tree = ReactionTree.from_dict(load_reaction_tree(\"branched_route.json\"))\n\n assert pytest.approx(StateScorer(default_config)(tree), abs=1e-3) == 0.950\n assert NumberOfReactionsScorer(default_config)(tree) == 14\n assert NumberOfPrecursorsScorer(default_config)(tree) == 8\n assert NumberOfPrecursorsInStockScorer(default_config)(tree) == 8\n assert PriceSumScorer(default_config)(tree) == 8\n cost_score = RouteCostScorer(default_config)(tree)\n assert pytest.approx(cost_score, abs=1e-3) == 77.4797\n\n\ndef test_create_scorer_collection(default_config):\n collection = ScorerCollection(default_config)\n\n assert len(collection) == 5\n\n assert \"state score\" in collection.names()\n assert \"number of reactions\" in collection.names()\n\n assert isinstance(collection[\"state score\"], StateScorer)\n\n with pytest.raises(KeyError):\n collection[\"dummy\"]\n\n\ndef test_delete_scorer_to_collection(default_config):\n collection = ScorerCollection(default_config)\n\n del collection[\"state score\"]\n\n assert \"state score\" not in collection.names()\n\n\ndef test_add_scorer_to_collection(default_config):\n collection = ScorerCollection(default_config)\n del collection[\"state score\"]\n\n collection.load(StateScorer(default_config))\n\n assert \"state score\" in collection.names()\n\n\ndef test_add_scorer_to_collection_no_scorer(default_config):\n collection = ScorerCollection(default_config)\n\n with pytest.raises(ScorerException):\n collection.load(Molecule(smiles=\"CCC\"))\n\n\ndef test_load_scorer_to_collection_only_class(default_config):\n collection = ScorerCollection(default_config)\n del collection[\"state score\"]\n\n collection.load_from_config(**{\"StateScorer\": {}})\n\n assert \"state score\" in collection.names()\n\n\ndef test_load_scorer_to_collection_full_package(default_config):\n collection = ScorerCollection(default_config)\n del collection[\"state score\"]\n\n collection.load_from_config(**{\"aizynthfinder.context.scoring.StateScorer\": {}})\n\n assert \"state score\" in collection.names()\n\n\ndef test_load_scorer_to_collection_failures(default_config):\n collection = ScorerCollection(default_config)\n\n with pytest.raises(ScorerException, match=\".*load module.*\"):\n collection.load_from_config(**{\"mypackage.scoring.StateScorer\": {}})\n\n with pytest.raises(ScorerException, match=\".*class.*\"):\n collection.load_from_config(**{\"aizynthfinder.context.scoring.NoScorer\": {}})\n" ]
[ [ "numpy.round" ] ]
stuart-fb/pyrobot
[ "2f06f337f84e2c0b172dcf5ee0cd8c7de73a50e1" ]
[ "src/pyrobot/utils/util.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport sys\nimport numpy as np\nimport rospy\nimport tf\nimport geometry_msgs.msg\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom pyrobot.utils.planning_scene_interface import PlanningSceneInterface\n\n\n\ndef list_to_pose(pose_list):\n pose_msg = Pose()\n if len(pose_list) == 7:\n pose_msg.position.x = pose_list[0]\n pose_msg.position.y = pose_list[1]\n pose_msg.position.z = pose_list[2]\n pose_msg.orientation.x = pose_list[3]\n pose_msg.orientation.y = pose_list[4]\n pose_msg.orientation.z = pose_list[5]\n pose_msg.orientation.w = pose_list[6]\n elif len(pose_list) == 6: \n pose_msg.position.x = pose_list[0]\n pose_msg.position.y = pose_list[1]\n pose_msg.position.z = pose_list[2]\n q = tf.transformations.quaternion_from_euler(\n pose_list[3],\n pose_list[4], \n pose_list[5])\n pose_msg.orientation.x = q[0]\n pose_msg.orientation.y = q[1]\n pose_msg.orientation.z = q[2]\n pose_msg.orientation.w = q[3]\n return pose_msg\n\ndef get_tf_transform(tf_listener, tgt_frame, src_frame):\n \"\"\"\n Uses ROS TF to lookup the current transform from tgt_frame to src_frame,\n If the returned transform is applied to data, it will transform data in\n the src_frame into the tgt_frame\n\n :param tgt_frame: target frame\n :param src_frame: source frame\n :type tgt_frame: string\n :type src_frame: string\n\n :returns: trans, translation (x,y,z)\n :rtype: tuple (of floats)\n :returns: quat, rotation as a quaternion (x,y,z,w)\n :rtype: tuple (of floats)\n \"\"\"\n try:\n tf_listener.waitForTransform(tgt_frame, src_frame,\n rospy.Time(0),\n rospy.Duration(3))\n (trans, quat) = tf_listener.lookupTransform(tgt_frame,\n src_frame,\n rospy.Time(0))\n except (tf.LookupException,\n tf.ConnectivityException,\n tf.ExtrapolationException):\n raise RuntimeError('Cannot fetch the transform from'\n ' {0:s} to {1:s}'.format(tgt_frame, src_frame))\n return trans, quat\n\ndef quat_to_rot_mat(quat):\n \"\"\"\n Convert the quaternion into rotation matrix. The quaternion we used\n here is in the form of [x, y, z, w]\n\n :param quat: quaternion [x, y, z, w] (shape: :math:`[4,]`)\n :type quat: numpy.ndarray\n\n :return: the rotation matrix (shape: :math:`[3, 3]`)\n :rtype: numpy.ndarray\n \"\"\"\n return tf.transformations.quaternion_matrix(quat)[:3, :3]\n\n\ndef euler_to_quat(euler):\n \"\"\"\n Convert the yaw, pitch, roll into quaternion.\n\n :param euler: the yaw, pitch, roll angles (shape: :math:`[3,]`)\n :type quat: numpy.ndarray\n\n :return: quaternion [x, y, z, w] (shape: :math:`[4,]`)\n :rtype: numpy.ndarray\n \"\"\"\n return tf.transformations.quaternion_from_euler(euler[0], euler[1],\n euler[2], axes='rzyx')\n\n\ndef rot_mat_to_quat(rot):\n \"\"\"\n Convert the rotation matrix into quaternion.\n\n :param quat: the rotation matrix (shape: :math:`[3, 3]`)\n :type quat: numpy.ndarray\n\n :return: quaternion [x, y, z, w] (shape: :math:`[4,]`)\n :rtype: numpy.ndarray\n \"\"\"\n R = np.eye(4)\n R[:3, :3] = rot\n return tf.transformations.quaternion_from_matrix(R)\n\n\nclass MoveitObjectHandler(object):\n '''\n Use this class to create objects that reside in moveit environments\n '''\n def __init__(self, frame='/base_link'):\n '''\n Constructor of the MoveitObjectHandler class.\n '''\n self.planning_scene_interface = PlanningSceneInterface(frame)\n self.scene_objects = []\n self.attached_objects = []\n \n def add_world_object(self, id_name, pose, size, frame='/base_link'):\n '''\n Adds the particular BOX TYPE objects to the moveit planning scene\n \n :param id_name: unique id that object should be labeled with\n :param pose: pose of the object\n :param size: size of the object\n :param frame: frame in which the object pose is passed\n\n :type id_name: string\n :type pose: list of double of length 7 (x,y,z, q_x, q_y, q_z, q_w)\n :type size: tuple of length 3\n :type frame: string\n '''\n assert type(size) is tuple, 'size should be tuple'\n assert len(size)==3, 'size should be of length 3'\n assert not id_name in self.scene_objects, \\\n 'Object with the same name already exists!'\n self.scene_objects.append(id_name)\n\n pose = list_to_pose(pose)\n pose_stamped = PoseStamped()\n pose_stamped.header.frame_id = frame\n pose_stamped.pose = pose\n\n self.planning_scene_interface.addBox(id_name, size[0], size[1], \n size[2], pose_stamped)\n\n def remove_world_object(self, id_name):\n '''\n Removes a specified object for the Moveit planning scene\n\n :param id_name: unique id that object should be labeled with\n :type frame: string\n\n ''' \n assert id_name in self.scene_objects, 'Incorrect object name!'\n self.scene_objects.remove(id_name)\n self.planning_scene_interface.removeCollisionObject(id_name)\n\n def attach_arm_object(self, link_name, id_name, pose, size):\n '''\n Attaches the specified Box type object to the robot\n\n :param link_name: name of the link to which the bject \n should be attached\n :param id_name: unique id associated with the object\n :param pose: pose of the object\n :parma size: size of the object\n \n :type link_name: string\n :type id_name: string\n :type pose: list of double of length 7 (x,y,z, q_x, q_y, q_z, q_w)\n :type size: tuple of length 3\n '''\n assert type(size) is tuple, 'size should be tuple'\n assert len(size)==3, 'size should be of length 3'\n assert not id_name in self.attached_objects, \\\n 'Object with the same name already exists!'\n self.scene_objects.append(id_name)\n self.attached_objects.append(id_name)\n\n self.planning_scene_interface.attachBox(id_name, size[0], size[1], size[2],\n pose, link_name) \n\n def detach_arm_object(self, link_name, id_name, remove_from_world=True):\n '''\n Detaches an object earlier attached to the robot\n\n :param link_name: name of the link from which the bject \n should be detached\n :param id_name: unique id associated with the object\n :param remove_from_world: if set true, deletes the \n object from the scene.\n \n :type link_name: string\n :type id_name: string\n :type remove_from_world: bool\n '''\n assert id_name in self.attached_objects, 'Incorrect object name!'\n self.planning_scene_interface.remove_attached_object(link_name, id_name)\n self.attached_objects.remove(id_name)\n\n if remove_from_world is True:\n self.remove_world_object(id_name)\n\n def remove_all_objects(self):\n '''\n Removes all the objects in the current Moveit planning scene\n '''\n ## get add objects\n dict_obj = self.scene.get_objects()\n ## get attach object\n dict_attach_obj = self.scene.get_attached_objects()\n ## remove add objects\n for i in dict_obj.keys():\n self.remove_world_object(i)\n ## remove attached objects\n for i in dict_attach_obj.keys():\n self.detach_arm_object(dict_attach_obj[i].link_name,i)\n\n def add_table(self, pose=None, size=None):\n '''\n Adds a table in the planning scene in the base frame.\n \n :param pose: pose of the object\n :parma size: size of the object\n \n \n :type pose: list of double of length 7 (x,y,z, q_x, q_y, q_z, q_w)\n :type size: tuple of length 3\n '''\n if pose is not None and size is not None:\n self.add_world_object('table', \n pose=pose, \n size=size)\n else:\n # Default table.\n print('Creating default table.')\n self.add_world_object('table', \n pose=[0.8,0.0,-0.23,0.,0.,0.,1.],\n size=(1.35,2.0,0.1))\n\n def add_kinect(self, pose=None, size=None):\n '''\n Adds a kinect object to the planning scene in the base frame.\n\n :param pose: pose of the object\n :parma size: size of the object\n \n \n :type pose: list of double of length 7 (x,y,z, q_x, q_y, q_z, q_w)\n :type size: tuple of length 3 \n '''\n if pose is not None and size is not None:\n self.add_world_object('kinect', \n pose=pose, \n size=size)\n else:\n # Default kinect.\n print('Creating default kinect.')\n self.add_world_object('kinect', \n pose=[0., 0.0,0.75,0.,0.,0.,1.], \n size=(0.25,0.25,0.3))\n\n def add_gripper(self, pose=None, size=None):\n '''\n Attaches gripper object to 'gripper' link.\n \n :param pose: pose of the object\n :param size: size of the object\n \n \n :type pose: list of double of length 7 (x,y,z, q_x, q_y, q_z, q_w)\n :type size: tuple of length 3\n '''\n if pose is not None and size is not None:\n self.attach_arm_object('right_gripper',\n 'gripper', \n pose=pose, \n size=size)\n else:\n # Default gripper.\n print('Creating default gripper.')\n self.attach_arm_object('right_gripper',\n 'gripper', \n pose=[0., 0.0, 0.07,0.,0.,0.,1.], \n size=(0.02,0.1,0.07))\n\n def remove_table(self):\n '''\n Removes table object from the planning scene\n '''\n self.remove_world_object('table')\n\n def remove_gripper(self):\n '''\n Removes table object from the planning scene\n '''\n self.detach_object('gripper')\n rospy.sleep(0.2)\n self.detach_object('gripper')\n rospy.sleep(0.2)\n self.remove_world_object('gripper')\n rospy.sleep(0.2)\n self.remove_world_object('gripper')\n \n\n" ]
[ [ "numpy.eye" ] ]
autonomousvision/stylegan_xl
[ "8c76531bcbf0931c295ecd1d32f75af998d1411f" ]
[ "pg_modules/discriminator.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.transforms import Normalize\nimport pickle\n\nfrom training.diffaug import DiffAugment\nfrom training.networks_stylegan2 import FullyConnectedLayer\nfrom pg_modules.blocks import conv2d, DownBlock, DownBlockPatch\nfrom pg_modules.projector import F_RandomProj\nfrom feature_networks.constants import VITS\n\nclass SingleDisc(nn.Module):\n def __init__(self, nc=None, ndf=None, start_sz=256, end_sz=8, head=None, patch=False):\n super().__init__()\n\n # midas channels\n nfc_midas = {4: 512, 8: 512, 16: 256, 32: 128, 64: 64, 128: 64,\n 256: 32, 512: 16, 1024: 8}\n\n # interpolate for start sz that are not powers of two\n if start_sz not in nfc_midas.keys():\n sizes = np.array(list(nfc_midas.keys()))\n start_sz = sizes[np.argmin(abs(sizes - start_sz))]\n self.start_sz = start_sz\n\n # if given ndf, allocate all layers with the same ndf\n if ndf is None:\n nfc = nfc_midas\n else:\n nfc = {k: ndf for k, v in nfc_midas.items()}\n\n # for feature map discriminators with nfc not in nfc_midas\n # this is the case for the pretrained backbone (midas.pretrained)\n if nc is not None and head is None:\n nfc[start_sz] = nc\n\n layers = []\n\n # Head if the initial input is the full modality\n if head:\n layers += [conv2d(nc, nfc[256], 3, 1, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True)]\n\n # Down Blocks\n DB = DownBlockPatch if patch else DownBlock\n while start_sz > end_sz:\n layers.append(DB(nfc[start_sz], nfc[start_sz//2]))\n start_sz = start_sz // 2\n\n layers.append(conv2d(nfc[end_sz], 1, 4, 1, 0, bias=False))\n self.main = nn.Sequential(*layers)\n\n def forward(self, x, c):\n return self.main(x)\n\nclass SingleDiscCond(nn.Module):\n def __init__(self, nc=None, ndf=None, start_sz=256, end_sz=8, head=None, patch=False, c_dim=1000, cmap_dim=64, rand_embedding=False):\n super().__init__()\n self.cmap_dim = cmap_dim\n\n # midas channels\n nfc_midas = {4: 512, 8: 512, 16: 256, 32: 128, 64: 64, 128: 64,\n 256: 32, 512: 16, 1024: 8}\n\n # interpolate for start sz that are not powers of two\n if start_sz not in nfc_midas.keys():\n sizes = np.array(list(nfc_midas.keys()))\n start_sz = sizes[np.argmin(abs(sizes - start_sz))]\n self.start_sz = start_sz\n\n # if given ndf, allocate all layers with the same ndf\n if ndf is None:\n nfc = nfc_midas\n else:\n nfc = {k: ndf for k, v in nfc_midas.items()}\n\n # for feature map discriminators with nfc not in nfc_midas\n # this is the case for the pretrained backbone (midas.pretrained)\n if nc is not None and head is None:\n nfc[start_sz] = nc\n\n layers = []\n\n # Head if the initial input is the full modality\n if head:\n layers += [conv2d(nc, nfc[256], 3, 1, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True)]\n\n # Down Blocks\n DB = DownBlockPatch if patch else DownBlock\n while start_sz > end_sz:\n layers.append(DB(nfc[start_sz], nfc[start_sz//2]))\n start_sz = start_sz // 2\n self.main = nn.Sequential(*layers)\n\n self.cls = conv2d(nfc[end_sz], self.cmap_dim, 4, 1, 0, bias=False)\n\n # Pretrained Embeddings\n embed_path = 'in_embeddings/tf_efficientnet_lite0.pkl'\n with open(embed_path, 'rb') as f:\n self.embed = pickle.Unpickler(f).load()['embed']\n print(f'loaded imagenet embeddings from {embed_path}: {self.embed}')\n if rand_embedding:\n self.embed.__init__(num_embeddings=self.embed.num_embeddings, embedding_dim=self.embed.embedding_dim)\n print(f'initialized embeddings with random weights')\n\n self.embed_proj = FullyConnectedLayer(self.embed.embedding_dim, self.cmap_dim, activation='lrelu')\n\n def forward(self, x, c):\n h = self.main(x)\n out = self.cls(h)\n\n cmap = self.embed_proj(self.embed(c.argmax(1))).unsqueeze(-1).unsqueeze(-1)\n out = (out * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))\n\n return out\n\nclass MultiScaleD(nn.Module):\n def __init__(\n self,\n channels,\n resolutions,\n num_discs=4,\n proj_type=2, # 0 = no projection, 1 = cross channel mixing, 2 = cross scale mixing\n cond=0,\n patch=False,\n **kwargs,\n ):\n super().__init__()\n\n assert num_discs in [1, 2, 3, 4, 5]\n\n # the first disc is on the lowest level of the backbone\n self.disc_in_channels = channels[:num_discs]\n self.disc_in_res = resolutions[:num_discs]\n Disc = SingleDiscCond if cond else SingleDisc\n\n mini_discs = []\n for i, (cin, res) in enumerate(zip(self.disc_in_channels, self.disc_in_res)):\n start_sz = res if not patch else 16\n mini_discs += [str(i), Disc(nc=cin, start_sz=start_sz, end_sz=8, patch=patch)],\n\n self.mini_discs = nn.ModuleDict(mini_discs)\n\n def forward(self, features, c, rec=False):\n all_logits = []\n for k, disc in self.mini_discs.items():\n all_logits.append(disc(features[k], c).view(features[k].size(0), -1))\n\n all_logits = torch.cat(all_logits, dim=1)\n return all_logits\n\nclass ProjectedDiscriminator(torch.nn.Module):\n def __init__(\n self,\n backbones,\n diffaug=True,\n interp224=True,\n backbone_kwargs={},\n **kwargs\n ):\n super().__init__()\n self.backbones = backbones\n self.diffaug = diffaug\n self.interp224 = interp224\n\n # get backbones and multi-scale discs\n feature_networks, discriminators = [], []\n\n for i, bb_name in enumerate(backbones):\n\n feat = F_RandomProj(bb_name, **backbone_kwargs)\n disc = MultiScaleD(\n channels=feat.CHANNELS,\n resolutions=feat.RESOLUTIONS,\n **backbone_kwargs,\n )\n\n feature_networks.append([bb_name, feat])\n discriminators.append([bb_name, disc])\n\n self.feature_networks = nn.ModuleDict(feature_networks)\n self.discriminators = nn.ModuleDict(discriminators)\n\n def train(self, mode=True):\n self.feature_networks = self.feature_networks.train(False)\n self.discriminators = self.discriminators.train(mode)\n return self\n\n def eval(self):\n return self.train(False)\n\n def forward(self, x, c):\n logits = []\n\n for bb_name, feat in self.feature_networks.items():\n\n # apply augmentation (x in [-1, 1])\n x_aug = DiffAugment(x, policy='color,translation,cutout') if self.diffaug else x\n\n # transform to [0,1]\n x_aug = x_aug.add(1).div(2)\n\n # apply F-specific normalization\n x_n = Normalize(feat.normstats['mean'], feat.normstats['std'])(x_aug)\n\n # upsample if smaller, downsample if larger + VIT\n if self.interp224 or bb_name in VITS:\n x_n = F.interpolate(x_n, 224, mode='bilinear', align_corners=False)\n\n # forward pass\n features = feat(x_n)\n logits += self.discriminators[bb_name](features, c)\n\n return logits\n" ]
[ [ "torch.nn.Sequential", "numpy.sqrt", "torch.nn.ModuleDict", "torch.cat", "torch.nn.functional.interpolate", "torch.nn.LeakyReLU" ] ]
Chrasmus/SDCN_July18_T3_P3_CapStone_System_Integration
[ "b64e01a0202adb4a6d82c2a598756a07e585fcbb" ]
[ "ros/src/tl_detector/light_classification/tl_classifier.py" ]
[ "from styx_msgs.msg import TrafficLight\nimport tensorflow as tf\nimport numpy as np\n\nclass TLClassifier(object):\n def __init__(self, is_site):\n #TODO load classifier\n # main code source : object_detection_tutorial.ipynb from Google's model-zoo on GitHub\n if is_site:\n PATH_TO_FROZEN_GRAPH = r'/home/workspace/CarND-Capstone/frozen_models/frozen_site_inception/frozen_inference_graph.pb'\n else:\n PATH_TO_FROZEN_GRAPH = r'/home/workspace/CarND-Capstone/frozen_models/frozen_simulator_inception2/frozen_inference_graph.pb'\n\n self.scores_threshold = 0.25\n \n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n \n # get the tensors by their names\n self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n \n self.sess = tf.Session(graph=self.detection_graph)\n \n\n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n #TODO implement light color prediction\n with self.detection_graph.as_default():\n #output_dict = self.sess.run([self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n # feed_dict={self.image_tensor: np.expand_dims(image, 0)})\n (boxes, scores, classes, num_detections) = self.sess.run([self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: np.expand_dims(image, 0)})\n \n #classes = output_dict['detection_classes'][0]\n #scores = output_dict['detection_scores'][0]\n # remove 'useless' one-dimensions with 'squeeze' function\n classes = np.squeeze(classes).astype(np.uint8)\n scores = np.squeeze(scores)\n \n print('Classes (GREEN=1 and RED=2)= ', classes[0], ' - Scores = ', scores[0])\n \n if scores[0] > self.scores_threshold:\n if classes[0] == 1:\n return TrafficLight.GREEN\n elif classes[0] == 2:\n return TrafficLight.RED\n elif classes[0] == 3:\n return TrafficLight.YELLOW\n \n return TrafficLight.UNKNOWN\n" ]
[ [ "numpy.squeeze", "tensorflow.gfile.GFile", "tensorflow.Graph", "numpy.expand_dims", "tensorflow.Session", "tensorflow.import_graph_def", "tensorflow.GraphDef" ] ]
zihanzawad/Blockchain-Project
[ "94d4542fce7653dcb95d13fe6a3951160e3defd2" ]
[ "app/backend/Scripts/Transformer.py" ]
[ "from pdf2image import convert_from_path, convert_from_bytes\nfrom os import path, makedirs\nfrom hashlib import sha256\nimport numpy as np\nimport base64\n\n\nclass Transformer():\n\n #read pdf from file path and convert to jpegs\n def save_pdf_as_image(inputPath:str, outputPath:str):\n if not path.exists(outputPath):\n makedirs(outputPath)\n\n pdfAsImages = convert_from_path(inputPath)\n for pageNum, page in enumerate(pdfAsImages):\n fileName = outputPath + 'output' + str(pageNum)\n page.save(fileName, \"JPEG\")\n return pdfAsImages\n\n def pdf_as_images(inputPath: str):\n pdfAsImages = convert_from_path(inputPath)\n return pdfAsImages\n\n #read pdf from byte input and convert to jpegs\n def bytes_to_images(bytes:bytes):\n pdfAsImages = convert_from_bytes(bytes)\n return pdfAsImages\n\n #convert PIL arrays to numpy arrays\n def PIL_to_Numpy(input:list):\n pagesAsNumpy = []\n for page in input:\n pageAsNumpy = np.asarray(page)\n pagesAsNumpy.append(pageAsNumpy)\n return pagesAsNumpy\n\n #separate a page into 18 separate chunks\n def PDF_to_Numpy(imagesAsNumpy: list, chunks: int=18) -> list:\n chunkedImages = []\n for image in imagesAsNumpy:\n chunked_image = np.array_split(image, chunks)\n chunkedImages.append(chunked_image)\n return chunkedImages\n\n #return SHA256 hash of input\n def encrypt_data(data:list):\n hash = sha256(data)\n return hash.hexdigest()\n\n #convert chunked numpy representation into array of SHA256 hashes\n def encrypt_document(input:list):\n encryptedPages = []\n for page in input:\n currentPage = []\n for chunk in page:\n currentPage.append(Transformer.encrypt_data(chunk))\n encryptedPages.append(currentPage)\n return encryptedPages\n\n #converts bytes to array of SHA256 hash strings\n def bytes_to_hash_array(bytes:bytes):\n images = Transformer.bytes_to_images(bytes)\n pilArray = Transformer.PIL_to_Numpy(images)\n npArray = Transformer.PDF_to_Numpy(pilArray)\n hashArray = Transformer.encrypt_document(npArray)\n return hashArray\n\n def images_to_hash_array(images:list):\n pilArray = Transformer.PIL_to_Numpy(images)\n npArray = Transformer.PDF_to_Numpy(pilArray)\n hashArray = Transformer.encrypt_document(npArray)\n return hashArray\n\n #compares hash array lists\n def compare_document_hashes(original: list, toVerify: list):\n tamperedRegions = []\n if len(original) == len(toVerify):\n for pageNum in range(len(original)):\n for chunkNum in range(len(original[pageNum])):\n if original[pageNum][chunkNum] != toVerify[pageNum][chunkNum]:\n tamperedRegions.append([pageNum, chunkNum])\n if bool(tamperedRegions):\n return tamperedRegions\n else:\n return 1\n return 0\n\n # Highlights tampered areas\n def visualise_tamper(pagesAsNumpy:list, tamperedRegions:list, chunks: int = 18):\n\n pages = np.array(pagesAsNumpy,dtype=float)/255\n\n for region in tamperedRegions:\n page = region[0]\n chunk = region[1]\n lower = round(np.shape(pages[page])[0]*chunk/chunks)\n upper = round(np.shape(pages[page])[0]*(chunk+1)/chunks)\n pages[page,lower:upper,:,1] *= 0.4\n pages[page,lower:upper,:,2] *= 0.4\n\n for i in range(len(pages)):\n print(pages[0])\n\n #imshow(pages[0])" ]
[ [ "numpy.array", "numpy.array_split", "numpy.shape", "numpy.asarray" ] ]
rmxrmx/NLP-E21
[ "fd5c3af70a2434cc30a3ffb52e4e0872cbffdd23" ]
[ "syllabus/classes/class7/main.py" ]
[ "import numpy as np\nimport torch\n\nfrom datasets import load_dataset\nimport gensim.downloader as api\n\nfrom util import batch\nfrom LSTM import RNN\nfrom embedding import gensim_to_torch_embedding\n\n# DATASET\ndataset = load_dataset(\"conllpp\")\ntrain = dataset[\"train\"]\n\n# inspect the dataset\ntrain[\"tokens\"][:1]\ntrain[\"ner_tags\"][:1]\nnum_classes = train.features[\"ner_tags\"].feature.num_classes\n\n\n# CONVERTING EMBEDDINGS\nmodel = api.load(\"glove-wiki-gigaword-50\")\n\n# convert gensim word embedding to torch word embedding\nembedding_layer, vocab = gensim_to_torch_embedding(model)\n\n\n# PREPARING A BATCH\n\n# shuffle dataset\nshuffled_train = dataset[\"train\"].shuffle(seed=1)\n\n# batch it using a utility function (don't spend time on the function, but make sure you understand the output)\nbatch_size = 10\nbatches_tokens = batch(shuffled_train[\"tokens\"], batch_size)\nbatches_tags = batch(shuffled_train[\"ner_tags\"], batch_size)\n\n\ndef tokens_to_idx(tokens, vocab=model.key_to_index):\n \"\"\"\n Ideas to understand this function:\n - Write documentation for this function including type hints for each arguement and return statement\n - What does the .get method do?\n - Why lowercase?\n \"\"\"\n return [vocab.get(t.lower(), vocab[\"UNK\"]) for t in tokens]\n\n\n# sample using only the first batch\nbatch_tokens = next(batches_tokens)\nbatch_tags = next(batches_tags)\nbatch_tok_idx = [tokens_to_idx(sent) for sent in batch_tokens]\nbatch_size = len(batch_tokens)\n\n# compute length of longest sentence in batch\nbatch_max_len = max([len(s) for s in batch_tok_idx])\n\n# prepare a numpy array with the data, initializing the data with 'PAD'\n# and all labels with -1; initializing labels to -1 differentiates tokens\n# with tags from 'PAD' tokens\nbatch_input = vocab[\"PAD\"] * np.ones((batch_size, batch_max_len))\nbatch_labels = -1 * np.ones((batch_size, batch_max_len))\n\n# copy the data to the numpy array\nfor i in range(batch_size):\n tok_idx = batch_tok_idx[i]\n tags = batch_tags[i]\n size = len(tok_idx)\n\n batch_input[i][:size] = tok_idx\n batch_labels[i][:size] = tags\n\n\n# since all data are indices, we convert them to torch LongTensors (integers)\nbatch_input, batch_labels = torch.LongTensor(batch_input), torch.LongTensor(\n batch_labels\n)\n\n# CREATE MODEL\nmodel = RNN(\n embedding_layer=embedding_layer, output_dim=num_classes + 1, hidden_dim_size=256\n)\n\n# FORWARD PASS\nX = batch_input\ny = model(X)\n\nloss = model.loss_fn(outputs=y, labels=batch_labels)\n# loss.backward()" ]
[ [ "numpy.ones", "torch.LongTensor" ] ]
dsbrown1331/vav-icml
[ "90f40c2b5b52f3cc142ffd4e02bb82d88e1e221d" ]
[ "gridworld_vav/experiments/basic_value_alignment/gaussian_reward_value_alignment_experiment_runner_diffmethods_arp.py" ]
[ "#I want to rerun things with the ARP rather than the AEC...\n\nimport sys\nimport os\nexp_path = os.path.dirname(os.path.abspath(__file__))\nprint(exp_path)\nproject_path = os.path.abspath(os.path.join(exp_path, \"..\", \"..\"))\nsys.path.insert(0, project_path)\nprint(sys.path)\n\nimport src.experiment_utils as eutils\nimport src.utils as utils\nimport src.mdp as mdp\nimport src.machine_teaching\nimport copy\nimport numpy as np\nimport src.value_alignment_verification as vav\nimport src.alignment_heuristics as ah\nimport random\nimport sys\nimport src.machine_teaching as machine_teaching\n\n#evaluate several different verification methods and compute accuracies\n\n\ndef random_weights(num_features):\n rand_n = np.random.randn(num_features)\n l2_ball_weights = rand_n / np.linalg.norm(rand_n)\n return l2_ball_weights\n #return 1.0 - 2.0 * np.random.rand(num_features)\n\ndef sample_gaussian_weights(mean_vec, stdev_scalar):\n weights = np.random.normal(0.0, stdev_scalar, len(mean_vec)) + mean_vec\n return weights / np.linalg.norm(weights)\n\ninit_seed = 1234\nnum_trials = 10 #number of mdps with random rewards to try\nnum_eval_policies_tries = 50\n\n#scot params\nnum_rollouts = 20\n#used for scot and traj comparisons\nrollout_length = 20 #should be more than np.log(eps * (1-gamma))/np.log(gamma) to gurantee epsilong accuracy\n\n# how far to sample\nsigma = 0.4\n\ndebug = False\nprecision = 0.00001\nnum_rows_list = [4,5,6,7,8]#[4,8,16]\nnum_cols_list = [4,5,6,7,8]#[4,8,16]\nnum_features_list = [3,4,5,6,7,8]\n#verifier_list =['arp-pref',\"arp-bb\", \"arp-w\",\"scot\",\"state-value-critical-0.2\"]\nverifier_list =[\"arp-pref\"]\n\n\nexp_data_dir = os.path.join(project_path, \"results\", \"arp_gaussian\")\n\nif not os.path.exists(exp_data_dir):\n os.makedirs(exp_data_dir)\n\nfor num_features in num_features_list:\n for num_rows in num_rows_list:\n num_cols = num_rows #keep it square grid for now\n\n result_writers = []\n for i, verifier_name in enumerate(verifier_list):\n filename = \"arp{}_states{}x{}_features{}.txt\".format(verifier_name, num_rows, num_cols, num_features)\n full_path = os.path.join(exp_data_dir, filename)\n print(\"writing to\", full_path)\n result_writers.append(open(full_path,'w'))\n #input()\n\n for r_iter in range(num_trials):\n print(\"=\"*10, r_iter, \"=\"*10)\n print(\"features\", num_features, \"num_rows\", num_rows)\n ##For this test I want to verify that the ranking-based machine teaching is able to correctly verify whether an agent is value aligned or not.\n #MDP is deterministic with fixed number or rows, cols, and features\n #try a variable number of eval policies since bigger domains can have more possible policies (this is just a heuristic to make sure we try a lot but not as many for really small mdps)\n # 2 * num_features * num_rows * num_cols #Note this isn't how many we'll actually end up with since we reject if same as optimal policy\n initials = [(i,j) for i in range(num_rows) for j in range(num_cols)]\n terminals = []#[(num_rows-1,num_cols-1)]\n gamma = 0.9\n seed = init_seed + r_iter \n print(\"seed\", seed)\n np.random.seed(seed)\n random.seed(seed)\n\n #First let's generate a random MDP\n state_features = eutils.create_random_features_row_col_m(num_rows, num_cols, num_features)\n #print(\"state features\\n\",state_features)\n true_weights = random_weights(num_features)\n true_world = mdp.LinearFeatureGridWorld(state_features, true_weights, initials, terminals, gamma)\n V = mdp.value_iteration(true_world, epsilon=precision)\n Qopt = mdp.compute_q_values(true_world, V=V, eps=precision)\n opt_policy = mdp.find_optimal_policy(true_world, Q = Qopt, epsilon=precision)\n \n if debug:\n print(\"true weights: \", true_weights) \n \n print(\"rewards\")\n true_world.print_rewards()\n print(\"value function\")\n \n true_world.print_map(V)\n print(\"mdp features\")\n utils.display_onehot_state_features(true_world)\n \n print(\"optimal policy\")\n true_world.print_map(true_world.to_arrows(opt_policy))\n \n #now find a bunch of other optimal policies for the same MDP but with different weight vectors.\n world = copy.deepcopy(true_world)\n eval_policies = []\n eval_Qvalues = []\n eval_weights = []\n num_eval_policies = 0\n for i in range(num_eval_policies_tries):\n #print(\"trying\", i)\n #change the reward weights\n \n eval_weight_vector = sample_gaussian_weights(true_weights, sigma)\n # print(\"true weights\", true_weights)\n # print(\"new weights\", eval_weight_vector)\n world.weights = eval_weight_vector\n #find the optimal policy under this MDP\n Qval = mdp.compute_q_values(world, eps=precision)\n eval_policy = mdp.find_optimal_policy(world, Q=Qval, epsilon=precision)\n #only save if not equal to optimal policy\n #\n if eval_policy != opt_policy:# and eval_policy not in eval_policies:\n if debug:\n print(\"found distinct eval policy\")\n world.print_map(world.to_arrows(eval_policy))\n \n eval_policies.append(eval_policy)\n eval_Qvalues.append(Qval)\n eval_weights.append(eval_weight_vector)\n num_eval_policies += 1\n\n print(\"There are {} distinct optimal policies\".format(len(eval_policies)))\n if len(eval_policies) == 0:\n print(\"The only possible policy is the optimal policy. There must be a problem with the features. Can't do verification if only on policy possible!\")\n sys.exit()\n \n\n print()\n print(\"Generating verification tests\")\n\n #TODO: save computation by solving for halfspaces once for ARP-w and ARP-bb\n teacher = machine_teaching.StateActionRankingTeacher(true_world, Qopt, opt_policy, debug=debug, epsilon=precision)\n \n #TODO: we don't need the tests, just the halfspaces, but we do need to know which are equality\n tests, halfspaces = teacher.get_optimal_value_alignment_tests(use_suboptimal_rankings = False, compare_optimal = False)\n\n\n\n for vindx, verifier_name in enumerate(verifier_list):\n tester = None\n size_verification_test = None\n\n if \"state-value-critical-\" in verifier_name:\n critical_value_thresh = float(verifier_name[len(\"state-value-critical-\"):])\n #print(\"critical value\", critical_value_thresh)\n tester = ah.CriticalStateActionValueVerifier(true_world, Qopt, opt_policy, critical_value_thresh, precision=precision, debug=debug)\n \n elif verifier_name == \"arp-w\":\n tester = vav.HalfspaceVerificationTester(true_world, Qopt, opt_policy, debug = debug, precision=precision, teacher=teacher, tests=tests, halfspaces=halfspaces)\n \n elif verifier_name ==\"arp-bb\":\n tester = vav.ARPBlackBoxTester(true_world, Qopt, opt_policy, precision, debug=debug, teacher=teacher, tests=tests, halfspaces=halfspaces)\n\n elif verifier_name == \"arp-pref\":\n tester = vav.TrajectoryRankingBasedTester(true_world, Qopt, opt_policy, precision, rollout_length, debug=debug, use_suboptimal_rankings=True)\n\n \n elif verifier_name == \"scot\":\n tester = vav.SCOTVerificationTester(true_world, Qopt, opt_policy, precision, num_rollouts, rollout_length, debug=debug)\n \n else:\n print(\"invalid verifier name\")\n sys.exit()\n size_verification_test = tester.get_size_verification_test()\n print(\"number of questions\", size_verification_test)\n #checck optimal\n verified = tester.is_agent_value_aligned(opt_policy, Qopt, true_weights)\n\n #print(verified)\n if not verified:\n print(\"testing true policy\")\n \n print(\"supposed to verify the optimal policy. This is not right!\")\n input()\n\n correct = 0\n for i in range(num_eval_policies):\n \n if debug:\n print(\"\\ntesting agent\", i)\n print(\"with reward weights:\", eval_weights[i])\n print(\"agent policy\")\n world.print_map(world.to_arrows(eval_policies[i]))\n print(\"compared to \")\n print(\"optimal policy\")\n true_world.print_map(true_world.to_arrows(opt_policy))\n print(\"true reward weights:\", true_weights)\n print(\"mdp features\")\n utils.display_onehot_state_features(true_world)\n verified = tester.is_agent_value_aligned(eval_policies[i], eval_Qvalues[i], eval_weights[i])\n #print(verified)\n if verified:\n if debug:\n print(\"not supposed to be true...\")\n input()\n if not verified:\n correct += 1\n #TODO: how do I keep track of accuracy??\n verifier_accuracy = correct / num_eval_policies\n print(verifier_name)\n print(\"Accuracy = \", 100.0*verifier_accuracy)\n #input()\n \n result_writers[vindx].write(\"{},{},{}\\n\".format(correct, num_eval_policies, size_verification_test))\n for writer in result_writers:\n writer.close()\n\n #teacher = machine_teaching.RankingTeacher(world, debug=False)\n #teacher.get_optimal_value_alignment_tests(use_suboptimal_rankings = False)" ]
[ [ "numpy.random.seed", "numpy.linalg.norm", "numpy.random.randn" ] ]
qasimtariq1171/estimator
[ "e7fcffa942006fc2ecf4905523df2d8d6fcf51bd" ]
[ "tensorflow_estimator/python/estimator/tpu/tpu_estimator.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===================================================================\n\"\"\"TPUEstimator class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport enum\nimport math\nimport os\nimport signal\nimport sys\nimport threading\nimport time\n\nimport tensorflow as tf\nimport numpy as np\nimport six\nfrom six.moves import queue as Queue # pylint: disable=redefined-builtin\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.framework import variable_pb2\nfrom tensorflow.core.framework.summary_pb2 import Summary\nfrom tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result\nfrom tensorflow.python.data.util import nest as data_nest\nfrom tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import summary_ops_v2 as contrib_summary\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.tpu import functional as tpu_functional\nfrom tensorflow.python.tpu import preempted_hook\nfrom tensorflow.python.tpu import session_support\nfrom tensorflow.python.tpu import tensor_tracer\nfrom tensorflow.python.tpu import tpu\nfrom tensorflow.python.tpu import tpu_embedding_gradient\nfrom tensorflow.python.tpu import tpu_feed\nfrom tensorflow.python.tpu import tpu_function\nfrom tensorflow.python.tpu import training_loop\nfrom tensorflow.python.tpu.ops import tpu_ops\nfrom tensorflow.python.training import evaluation\nfrom tensorflow.python.util import function_utils\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import estimator_export\nfrom tensorflow_estimator.python.estimator import estimator as estimator_lib\nfrom tensorflow_estimator.python.estimator import model_fn as model_fn_lib\nfrom tensorflow_estimator.python.estimator.export import export_output as export_output_lib\nfrom tensorflow_estimator.python.estimator.tpu import _tpu_estimator_embedding\nfrom tensorflow_estimator.python.estimator.tpu import error_handling\nfrom tensorflow_estimator.python.estimator.tpu import iteration_count_estimator\nfrom tensorflow_estimator.python.estimator.tpu import tpu_config\nfrom tensorflow_estimator.python.estimator.tpu import tpu_context\nfrom tensorflow_estimator.python.estimator.tpu import util as util_lib\nfrom tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdagradParameters # pylint: disable=unused-import\nfrom tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdamParameters # pylint: disable=unused-import\nfrom tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import EmbeddingConfigSpec # pylint: disable=unused-import\nfrom tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import StochasticGradientDescentParameters # pylint: disable=unused-import\n\n_INITIAL_LOSS = 1e7\n_ZERO_LOSS = 0.\n_TPU_ESTIMATOR = 'tpu_estimator'\n_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'\n_BATCH_SIZE_KEY = 'batch_size'\n_CTX_KEY = 'context'\n_USE_TPU_KEY = 'use_tpu'\n_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'\n_ONE_GIGABYTE = 1024 * 1024 * 1024\n_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'\n_TPU_TRAIN_OP = '_tpu_train_op'\n_INFERENCE_ON_TPU_MODE = '_inference_on_tpu'\n_KEY_WHEN_PREDICTIONS_IS_A_TENSOR = '_key_when_predictions_is_a_tensor'\n_TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE = 1\n_TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP = 5\n_TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY = '_concatenated_small_features'\n\n# Ideally _USE_TPU_KEY should be reserved as well. However there are already\n# models that make use of this key, thus it can not be reserved now to prevent\n# breakage. In the long run, we would like to mitigate this by migrating models\n# off of using _USE_TPU_KEY.\n_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]\n\n# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is\n# only used for per-core based deployments. For per-host based pipelines, if a\n# user returns a Dataset instance it will be automatically wrapped in a\n# tf.while_loop (This can be disabled by returning features and labels\n# explicitly).\n_WRAP_INPUT_FN_INTO_WHILE_LOOP = False\n\n# Track the adoption of TPUEstimator\n_tpu_estimator_gauge = tf.compat.v2.__internal__.monitoring.BoolGauge(\n '/tensorflow/api/tpu_estimator',\n 'Whether the program uses tpu estimator or not.')\n\nif ops.get_to_proto_function('{}_{}'.format(_TPU_ESTIMATOR,\n _ITERATIONS_PER_LOOP_VAR)) is None:\n ops.register_proto_function(\n '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),\n proto_type=variable_pb2.VariableDef,\n to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access\n from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access\n\n\ndef _is_iterable(obj):\n \"\"\"A Python 2 and 3 compatible util to check whether `obj` is iterable.\"\"\"\n try:\n iter(obj)\n return True\n except TypeError:\n return False\n\n\nclass CatchInvalidHostcallFunctions(control_flow_ops.XLAControlFlowContext):\n\n def AddOp(self, op):\n if op.type in [\n 'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary',\n 'MergeSummary', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2'\n ]:\n raise ValueError('Please use tf.contrib.summary instead of tf.summary '\n 'inside of host_calls.')\n\n\ndef _create_global_step(graph):\n graph = graph or tf.compat.v1.get_default_graph()\n if tf.compat.v1.train.get_global_step(graph) is not None:\n raise ValueError('\"global_step\" already exists.')\n # Create in proper graph and base name_scope.\n with graph.as_default() as g, g.name_scope(None):\n return tf.compat.v1.get_variable(\n tf.compat.v1.GraphKeys.GLOBAL_STEP,\n shape=[],\n dtype=tf.dtypes.int64,\n initializer=tf.compat.v1.initializers.zeros(),\n trainable=False,\n use_resource=True,\n collections=[\n tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,\n tf.compat.v1.GraphKeys.GLOBAL_STEP\n ])\n\n\ndef _create_or_get_iterations_per_loop():\n \"\"\"Creates or gets the iterations_per_loop variable.\n\n In TPUEstimator, the user provided computation, the model_fn, is wrapped\n inside a tf.while_loop for peak performance. The iterations of the loop are\n specified by this variable, which adjusts its value on the CPU after each TPU\n program execution and before the next TPU execution.\n\n The purpose of using a variable, rather then a constant, is to allow\n TPUEstimator adapt the TPU training iterations according to the final steps\n specified by users. For example, if the user sets the iterations_per_loop as 4\n in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop\n variable will have the following value before each TPU training.\n\n - 1-th TPU execution: iterations_per_loop = 4\n - 2-th TPU execution: iterations_per_loop = 4\n - 3-th TPU execution: iterations_per_loop = 2\n\n As model_fn increases the global step once per train_op invocation, the global\n step is 10 after all TPU executions, matching the steps=10 inputs passed in by\n users.\n\n Returns:\n A TF non-trainable resource variable.\n\n Raises:\n RuntimeError: If multi iterations_per_loop variables were found.\n \"\"\"\n graph = tf.compat.v1.get_default_graph()\n collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)\n iter_vars = graph.get_collection(collection_name)\n if len(iter_vars) == 1:\n return iter_vars[0]\n elif len(iter_vars) > 1:\n raise RuntimeError('Multiple iterations_per_loop_var in collection.')\n\n with ops.colocate_with(tf.compat.v1.train.get_global_step()):\n with tf.compat.v1.variable_scope(\n _TPU_ESTIMATOR, reuse=tf.compat.v1.AUTO_REUSE):\n return tf.compat.v1.get_variable(\n _ITERATIONS_PER_LOOP_VAR,\n initializer=tf.compat.v1.initializers.zeros(),\n shape=[],\n dtype=tf.dtypes.int32,\n trainable=False,\n collections=[collection_name, tf.compat.v1.GraphKeys.LOCAL_VARIABLES],\n use_resource=True)\n\n\ndef _sync_variables_ops(ctx):\n \"\"\"Create varriables synchronization ops.\n\n Gets the variables back from TPU nodes. This means the variables updated\n by TPU will now be *synced* to host memory.\n In BROADCAST mode, we skip this sync since the variables are ususally too\n big to transmit via RPC.\n\n Args:\n ctx: A `_InternalTPUContext` instance with mode.\n\n Returns:\n A list of sync ops.\n \"\"\"\n\n if not ctx.is_input_broadcast_with_iterators():\n return [\n tf.debugging.check_numerics(v.read_value(),\n 'Gradient for %s is NaN' % v.name).op\n for v in tf.compat.v1.trainable_variables()\n ]\n else:\n return [tf.no_op()]\n\n\ndef _increase_eval_step_op(iterations_per_loop):\n \"\"\"Returns an op to increase the eval step for TPU evaluation.\n\n Args:\n iterations_per_loop: Tensor. The number of eval steps running in TPU system\n before returning to CPU host for each `Session.run`.\n\n Returns:\n An operation\n \"\"\"\n eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access\n # Estimator evaluate increases 1 by default. So, we increase the difference.\n return tf.compat.v1.assign_add(\n eval_step,\n tf.cast(iterations_per_loop - 1, dtype=eval_step.dtype),\n use_locking=True)\n\n\ndef _extract_key_names(tensor_or_dict):\n if isinstance(tensor_or_dict, dict):\n return sorted(tensor_or_dict.keys())\n return []\n\n\nclass PeriodicLogger(object):\n\n def __init__(self, seconds):\n self._log_every_n_seconds = seconds\n self._last_log_time = 0\n\n def log(self, msg, *args, **kw):\n if time.time() - self._last_log_time > self._log_every_n_seconds:\n self._last_log_time = time.time()\n tf.compat.v1.logging.info(msg, *args, **kw)\n\n\nclass _SIGNAL(object):\n \"\"\"Signal used to control the thread of infeed/outfeed.\n\n All preserved signals must be negative numbers. Positive numbers are used to\n indicate the number of iterations for next training/evaluation loop.\n \"\"\"\n NEXT_BATCH = -1\n STOP = -2\n\n\n@estimator_export(v1=['estimator.tpu.TPUEstimatorSpec'])\nclass TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access\n \"\"\"Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.\n\n See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and\n `export_outputs`.\n\n For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where\n `metric_fn` runs on CPU to generate metrics and `tensors` represents the\n `Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.\n To be precise, TPU evaluation expects a slightly different signature from the\n `tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a\n dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.\n The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The\n `tensors` usually specify the model logits, which are transferred back from\n TPU system to CPU host. All tensors must have be batch-major, i.e., the batch\n size is the first dimension. Once all tensors are available at CPU host from\n all shards, they are concatenated (on CPU) and passed as positional arguments\n to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is\n a dict. `metric_fn` takes the `tensors` and returns a dict from metric string\n name to the result of calling a metric function, namely a `(metric_tensor,\n update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the\n `eval_metrics`.\n\n `scaffold_fn` is a function running on CPU to generate the `Scaffold`. This\n function should not capture any Tensors in `model_fn`.\n\n `host_call` is a tuple of a `function` and a list or dictionary of `tensors`\n to pass to that function and returns a list of Tensors. `host_call` currently\n works for train() and evaluate(). The Tensors returned by the function is\n executed on the CPU on every step, so there is communication overhead when\n sending tensors from TPU to CPU. To reduce the overhead, try reducing the\n size of the tensors. The `tensors` are concatenated along their major (batch)\n dimension, and so must be >= rank 1. The `host_call` is useful for writing\n summaries with `tf.contrib.summary.create_file_writer`.\n \"\"\"\n\n def __new__(cls,\n mode,\n predictions=None,\n loss=None,\n train_op=None,\n eval_metrics=None,\n export_outputs=None,\n scaffold_fn=None,\n host_call=None,\n training_hooks=None,\n evaluation_hooks=None,\n prediction_hooks=None):\n \"\"\"Creates a validated `TPUEstimatorSpec` instance.\"\"\"\n cls._host_calls = {}\n if eval_metrics is not None:\n cls._host_calls['eval_metrics'] = eval_metrics\n if host_call is not None:\n cls._host_calls['host_call'] = host_call\n _OutfeedHostCall.validate(cls._host_calls)\n\n training_hooks = tuple(training_hooks or [])\n evaluation_hooks = tuple(evaluation_hooks or [])\n prediction_hooks = tuple(prediction_hooks or [])\n\n for hook in training_hooks + evaluation_hooks + prediction_hooks:\n if not isinstance(hook, tf.compat.v1.train.SessionRunHook):\n raise TypeError(\n 'All hooks must be SessionRunHook instances, given: {}'.format(\n hook))\n\n return super(TPUEstimatorSpec, cls).__new__(\n cls,\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metrics=eval_metrics,\n export_outputs=export_outputs,\n scaffold_fn=scaffold_fn,\n host_call=host_call,\n training_hooks=training_hooks,\n evaluation_hooks=evaluation_hooks,\n prediction_hooks=prediction_hooks)\n\n def as_estimator_spec(self):\n \"\"\"Creates an equivalent `EstimatorSpec` used by CPU train/eval.\"\"\"\n host_call_ret = _OutfeedHostCall.create_cpu_hostcall(self._host_calls)\n eval_metric_ops = None\n if self.eval_metrics is not None:\n eval_metric_ops = host_call_ret['eval_metrics']\n hooks = None\n if self.host_call is not None:\n hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]\n loss = self.loss\n if tensor_tracer.TensorTracer.is_enabled() \\\n and self.train_op is not None:\n tt = tensor_tracer.TensorTracer()\n loss = tt.trace_cpu(tf.compat.v1.get_default_graph(), loss, self.train_op)\n\n hooks = tuple(hooks or [])\n scaffold = self.scaffold_fn() if self.scaffold_fn else None\n return model_fn_lib.EstimatorSpec(\n mode=self.mode,\n predictions=self.predictions,\n loss=loss,\n train_op=self.train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs=self.export_outputs,\n scaffold=scaffold,\n training_hooks=self.training_hooks + hooks,\n evaluation_hooks=self.evaluation_hooks + hooks,\n prediction_hooks=self.prediction_hooks + hooks)\n\n\nclass _OpQueueContext(object):\n \"\"\"Manages work queue and thread for a infeed/outfeed thread.\"\"\"\n\n def __init__(self, name, target, args):\n self._name = name\n self._queue = Queue.Queue()\n args = (self,) + args\n self._thread = threading.Thread(name=name, target=target, args=args)\n self._thread.daemon = True\n self._thread.start()\n\n def stop(self):\n self._queue.put(_SIGNAL.STOP)\n\n def send_next_batch_signal(self, iterations):\n self._queue.put(iterations)\n\n def read_iteration_counts(self):\n while True:\n iterations = self._queue.get(block=True)\n tf.compat.v1.logging.debug('%s read iterations %s', self._name,\n iterations)\n if iterations == _SIGNAL.STOP:\n tf.compat.v1.logging.info('%s received shutdown signal, stopping.',\n self._name)\n return\n yield iterations\n\n def join(self):\n tf.compat.v1.logging.info('Shutting down %s thread.', self._name)\n self.stop()\n self._thread.join()\n\n\nclass _OpSignalOnceQueueContext(_OpQueueContext):\n \"\"\"Manages work queue and thread for a infeed/outfeed thread.\n\n This subclass only signals once.\n \"\"\"\n\n def __init__(self, name, target, args):\n super(_OpSignalOnceQueueContext, self).__init__(name, target, args)\n self._has_signaled = False\n\n def send_next_batch_signal(self, iterations):\n if not self._has_signaled:\n self._queue.put(iterations)\n self._has_signaled = True\n\n\nclass TPUInfeedOutfeedSessionHook(tf.compat.v1.train.SessionRunHook):\n \"\"\"A Session hook setting up the TPU initialization, infeed, and outfeed.\n\n This hook does two major things:\n 1. initialize and shutdown TPU system.\n 2. launch and join the threads for infeed enqueue and (optional) outfeed\n dequeue.\n \"\"\"\n\n def __init__(self,\n ctx,\n enqueue_ops,\n dequeue_ops,\n tpu_compile_op,\n run_infeed_loop_on_coordinator=True,\n rendezvous=None,\n master=None,\n session_config=None,\n tpu_init_ops=None,\n outfeed_every_n_steps=1):\n self._master_job = ctx.master_job\n self._enqueue_ops = enqueue_ops\n self._dequeue_ops = dequeue_ops\n self._rendezvous = rendezvous\n self._master = master\n self._session_config = session_config\n self._init_ops = list(tpu_init_ops or [])\n if ctx.embedding_config is None:\n self._embedding_layer_config = None\n else:\n self._embedding_layer_config = (\n ctx.embedding_config.tpu_embedding.config_proto)\n self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator\n self._initial_infeed_sleep_secs = (\n ctx.config.tpu_config.initial_infeed_sleep_secs)\n self._tpu_compile_op = tpu_compile_op\n\n # When using model parallelism, the TPU is pre-initialized at startup to\n # fetch mesh information. We skip re-initializing it here for\n # MeshTensorFlow since it places variables on TPU directly. Reinitialize tpu\n # is causing the variable corruption since the previous allocated memory\n # might be overwritten for other purpose.\n if (ctx.model_parallelism_enabled and\n (ctx.config.tpu_config.per_host_input_for_training is\n tpu_config.InputPipelineConfig.BROADCAST)):\n self._should_initialize_tpu = False\n else:\n self._should_initialize_tpu = True\n self._outfeed_every_n_steps = outfeed_every_n_steps\n\n def begin(self):\n tf.compat.v1.logging.info('TPU job name %s', self._master_job)\n self._iterations_per_loop_var = _create_or_get_iterations_per_loop()\n if self._should_initialize_tpu:\n self._finalize_ops = [\n tf.compat.v1.tpu.shutdown_system(job=self._master_job)\n ]\n else:\n self._finalize_ops = []\n\n summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()\n self._init_ops.extend(summary_writer_init_ops)\n # Get all the writer resources from the initializer, so we know what to\n # flush.\n for op in summary_writer_init_ops:\n self._finalize_ops.append(tf.compat.v2.summary.flush(writer=op.inputs[0]))\n\n def _run_infeed(self, queue_ctx, session):\n tf.compat.v1.logging.info('Starting infeed thread controller.')\n if self._initial_infeed_sleep_secs:\n tf.compat.v1.logging.info('Infeed thread sleeping for %d seconds.',\n self._initial_infeed_sleep_secs)\n time.sleep(self._initial_infeed_sleep_secs)\n tf.compat.v1.logging.info('Infeed thread starting after sleep')\n\n with self._rendezvous.catch_errors(source='infeed', session=session):\n if self._run_infeed_loop_on_coordinator:\n for count, steps in enumerate(queue_ctx.read_iteration_counts()):\n for i in xrange(steps):\n tf.compat.v1.logging.debug('Infeed enqueue for iteration (%d, %d)',\n count, i)\n session.run(self._enqueue_ops)\n else:\n for _ in queue_ctx.read_iteration_counts():\n session.run(self._enqueue_ops)\n tf.compat.v1.logging.info('Infeed thread finished, shutting down.')\n\n def _run_outfeed(self, queue_ctx, session):\n tf.compat.v1.logging.info('Starting outfeed thread controller.')\n status_logger = PeriodicLogger(seconds=60)\n with self._rendezvous.catch_errors(source='outfeed', session=session):\n for count, steps in enumerate(queue_ctx.read_iteration_counts()):\n step_counter = 0\n for i in xrange(steps):\n tf.compat.v1.logging.debug('Outfeed dequeue for iteration (%d, %d)',\n count, i)\n if step_counter % self._outfeed_every_n_steps == 0:\n session.run(self._dequeue_ops)\n step_counter += 1\n status_logger.log('Outfeed finished for iteration (%d, %d)', count, i)\n tf.compat.v1.logging.info('Outfeed thread finished, shutting down.')\n\n def _create_infeed_controller(self, name, target, args):\n return _OpQueueContext(name=name, target=target, args=args)\n\n def _assertCompilationSucceeded(self, result, coord):\n proto = tpu_compilation_result.CompilationResultProto()\n proto.ParseFromString(result)\n if proto.status_error_message:\n tf.compat.v1.logging.error('Compilation failed: {}'.format(\n proto.status_error_message))\n coord.request_stop()\n else:\n tf.compat.v1.logging.info('Compilation succeeded')\n\n def after_create_session(self, session, coord):\n if self._should_initialize_tpu:\n tf.compat.v1.logging.info('Init TPU system')\n start = time.time()\n with tf.Graph().as_default():\n with tf.compat.v1.Session(\n self._master, config=self._session_config) as sess:\n sess.run(\n tf.compat.v1.tpu.initialize_system(\n job=self._master_job,\n embedding_config=self._embedding_layer_config))\n tf.compat.v1.logging.info('Initialized TPU in %d seconds',\n time.time() - start)\n\n session.run(\n self._init_ops,\n options=tf.compat.v1.RunOptions(timeout_in_ms=30 * 60 * 1000))\n\n if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':\n tf.compat.v1.logging.info(\n 'Compiling user program: this may take a while...')\n self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)\n\n self._infeed_controller = self._create_infeed_controller(\n name='InfeedController', target=self._run_infeed, args=(session,))\n\n self._outfeed_controller = _OpQueueContext(\n name='OutfeedController', target=self._run_outfeed, args=(session,))\n\n # Enable the worker watchdog to terminate workers on coordinator exit.\n watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))\n if watchdog_timeout > 0:\n session_support.start_worker_watchdog(\n session, shutdown_timeout=watchdog_timeout)\n\n def before_run(self, run_context):\n iterations = run_context.session.run(self._iterations_per_loop_var)\n\n tf.compat.v1.logging.info('Enqueue next (%d) batch(es) of data to infeed.',\n iterations)\n self._infeed_controller.send_next_batch_signal(iterations)\n\n tf.compat.v1.logging.info(\n 'Dequeue next (%d) batch(es) of data from outfeed.', iterations)\n self._outfeed_controller.send_next_batch_signal(iterations)\n\n def end(self, session):\n tf.compat.v1.logging.info('Stop infeed thread controller')\n self._infeed_controller.join()\n self._rendezvous.record_done('infeed')\n\n tf.compat.v1.logging.info('Stop output thread controller')\n self._outfeed_controller.join()\n self._rendezvous.record_done('outfeed')\n\n tf.compat.v1.logging.info('Shutdown TPU system.')\n session.run(self._finalize_ops)\n\n\nclass TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):\n\n def __init__(self,\n ctx,\n enqueue_ops,\n dequeue_ops,\n tpu_compile_op,\n rendezvous=None,\n master=None,\n session_config=None):\n super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(\n ctx,\n enqueue_ops,\n dequeue_ops,\n tpu_compile_op=tpu_compile_op,\n run_infeed_loop_on_coordinator=False,\n rendezvous=rendezvous,\n master=master,\n session_config=session_config)\n\n def _create_infeed_controller(self, name, target, args):\n return _OpSignalOnceQueueContext(name=name, target=target, args=args)\n\n\nclass _TPUStopAtStepHook(tf.compat.v1.train.SessionRunHook):\n \"\"\"Hook that requests stop at a specified step.\n\n This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with\n following differences for TPU training:\n\n 1. This hook sets the variable for `iterations_per_loop`, which is used by\n `TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.\n If the `iterations_per_loop` value is specified as time in seconds, the\n number of iterations per `Session.run` will be estimated automatically\n based on per iteration runtime.\n\n As the hook execution order is not guaranteed, the variable update is\n handled in `after_create_session` and `after_run` as\n `TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.\n\n 2. For each training loop (session.run), the global step could be increased\n multiple times on TPU. The global step tensor value will be explicitly read\n again in `after_run` to ensure the latest value is retrieved to avoid race\n condition.\n \"\"\"\n\n def __init__(self,\n iterations_per_loop_counter,\n num_steps=None,\n final_step=None):\n \"\"\"Initializes a `TPUStopAtStepHook`.\n\n Args:\n iterations_per_loop_counter: A namedtuple of [`value',`unit`] that\n represents the number of 'iterations count' or 'time in seconds' to run\n optimizer per loop, based on the `unit` specified, `count` or `seconds`\n respectively.\n num_steps: Number of steps to execute.\n final_step: Step after which to stop.\n\n Raises:\n ValueError: If one of the arguments is invalid.\n \"\"\"\n if num_steps is None and final_step is None:\n raise ValueError('One of `num_steps` or `final_step` must be specified.')\n if num_steps is not None and final_step is not None:\n raise ValueError(\n 'Only one of `num_steps` or `final_step` can be specified.')\n self._iterations_per_loop_counter = iterations_per_loop_counter\n if self._iterations_per_loop_counter.unit not in ['seconds', 'count']:\n raise ValueError('Only `count` or `seconds` are accepted as the '\n '`iterations_per_loop_counter.unit')\n self._num_steps = num_steps\n self._final_step = final_step\n self._next_iteration_count = 1\n self._iteration_count_estimator = None\n if self._iterations_per_loop_counter.unit == 'seconds':\n self._iteration_count_estimator = (\n iteration_count_estimator.IterationCountEstimator())\n self._start_time = time.time()\n\n def _next_iterations(self, global_step, final_step):\n \"\"\"Computes the next iterations count.\n\n The next iterations count is computed by choosing the smaller of the\n remaining step count (`final_step` - `global_step`) and the estimated\n iterations count returned by the estimator.\n\n Args:\n global_step: The current step.\n final_step: Step after which to stop.\n\n Returns:\n The number of iterations count to run per loop.\n \"\"\"\n remaining_steps = final_step - global_step\n\n if self._iteration_count_estimator is not None:\n estimated_iterations = self._iteration_count_estimator.get(\n self._iterations_per_loop_counter.value)\n else:\n estimated_iterations = self._iterations_per_loop_counter.value\n\n self._next_iteration_count = min(remaining_steps, estimated_iterations)\n return self._next_iteration_count\n\n def begin(self):\n \"\"\"Initializes variables.\n\n Initializes the global step and iterations per loop variables.\n\n Raises:\n RuntimeError: An error occurred if global step variable does not exist.\n \"\"\"\n self._global_step_tensor = tf.compat.v1.train.get_global_step()\n if self._global_step_tensor is None:\n raise RuntimeError('Global step should be created.')\n\n self._iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n def after_create_session(self, session, coord):\n \"\"\"Computes and updates the first time iterations count.\n\n The iterations are computed by choosing the smaller of the (`final step` -\n `global step`), and the initial estimated iterations returned by the\n estimator (by default is 1).\n\n Args:\n session: A TensorFlow Session that has been created.\n coord: A Coordinator object which keeps track of all threads.\n \"\"\"\n global_step = session.run(self._global_step_tensor)\n if self._final_step is None:\n self._final_step = global_step + self._num_steps\n\n iterations = self._next_iterations(global_step, self._final_step)\n self._iterations_per_loop_var.load(iterations, session=session)\n\n def before_run(self, run_context):\n \"\"\"Reset the timer.\"\"\"\n if self._iteration_count_estimator is not None:\n self._start_time = time.time()\n\n def after_run(self, run_context, run_values):\n \"\"\"Computes the next iterations per loop value or terminates.\n\n Computes the elapsed time to run the last optimizer loop and if the\n `IterationCountEstimator` is used, records the elapsed time and iterations\n count. If the final step count has been reached, terminates. Otherwise,\n computes and updates the number of iterations to run the optimizer per loop.\n\n Args:\n run_context: A `SessionRunContext` object.\n run_values: A SessionRunValues object.\n \"\"\"\n if self._iteration_count_estimator is not None:\n elapsed_time = time.time() - self._start_time\n tf.compat.v1.logging.info('ElapsedTime: %.3f', elapsed_time)\n self._iteration_count_estimator.update(elapsed_time,\n self._next_iteration_count)\n\n # Global step cannot be retrieved via SessionRunArgs and before_run due to\n # race condition.\n global_step = run_context.session.run(self._global_step_tensor)\n if global_step >= self._final_step:\n run_context.request_stop()\n else:\n iterations = self._next_iterations(global_step, self._final_step)\n self._iterations_per_loop_var.load(\n iterations, session=run_context.session)\n\n\nclass _SetEvalIterationsHook(tf.compat.v1.train.SessionRunHook):\n \"\"\"Hook that requests stop at a specified step.\"\"\"\n\n def __init__(self, num_steps):\n \"\"\"Initializes a `_SetEvalIterationsHook`.\n\n Args:\n num_steps: Number of steps to execute.\n \"\"\"\n self._num_steps = num_steps\n\n def begin(self):\n self._iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n def after_create_session(self, session, coord):\n self._iterations_per_loop_var.load(self._num_steps, session=session)\n\n\nclass _StoppingPredictHook(tf.compat.v1.train.SessionRunHook):\n \"\"\"Hook that requests stop according to the stopping signal in prediction.\"\"\"\n\n def __init__(self, scalar_stopping_signal):\n self._scalar_stopping_signal = scalar_stopping_signal\n\n def begin(self):\n self._iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n def after_create_session(self, session, coord):\n # This is not necessary as we do not run infeed enqueue and outfeed dequeue\n # in side threads for prediction model. But it makes the\n # TPUInfeedOutfeedSessionHook prints nice message.\n self._iterations_per_loop_var.load(1, session=session)\n\n def before_run(self, run_context):\n return tf.compat.v1.train.SessionRunArgs(self._scalar_stopping_signal)\n\n def after_run(self, run_context, run_values):\n _ = run_context\n scalar_stopping_signal = run_values.results\n if _StopSignals.should_stop(scalar_stopping_signal):\n # NOTE(xiejw): In prediction, stopping signals are inserted for each\n # batch. And we append one more batch to signal the system it should stop.\n # The data flow might look like\n #\n # batch 0: images, labels, stop = 0 (user provided)\n # batch 1: images, labels, stop = 0 (user provided)\n # ...\n # batch 99: images, labels, stop = 0 (user provided)\n # batch 100: images, labels, stop = 1 (TPUEstimator appended)\n #\n # where the final batch (id = 100) is appended by TPUEstimator, so we\n # should drop it before returning the predictions to user.\n # To achieve that, we throw the OutOfRangeError in after_run. Once\n # Monitored Session sees this error in SessionRunHook.after_run, the\n # \"current\" prediction, i.e., batch with id=100, will be discarded\n # immediately\n raise tf.errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')\n\n\ndef generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,\n inputs_structure_recorder,\n host_device, host_id):\n \"\"\"Generates infeed enqueue ops for per-core input_fn on a single host.\"\"\"\n captured_infeed_queue = _CapturedObject()\n tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)\n\n def enqueue_ops_fn():\n \"\"\"A fn returns enqueue_ops.\"\"\"\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n user_context = tpu_context.TPUContext(\n internal_ctx=ctx,\n input_device=host_device,\n invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal,\n host_id=host_id)\n inputs = _Inputs.from_input_fn(input_fn(user_context))\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)\n return per_host_enqueue_ops\n\n return enqueue_ops_fn, captured_infeed_queue\n\n\ndef generate_per_host_enqueue_ops_fn_for_host(ctx, input_fn,\n inputs_structure_recorder,\n batch_axis, device, host_id):\n \"\"\"Generates infeed enqueue ops for per-host input_fn on a single host.\"\"\"\n captured_infeed_queue = _CapturedObject()\n\n dataset_initializer = None\n\n with tf.compat.v1.device(device):\n user_context = tpu_context.TPUContext(\n internal_ctx=ctx,\n input_device=device,\n invocation_index=host_id,\n host_id=host_id)\n inputs = _Inputs.from_input_fn(input_fn(user_context))\n\n is_dataset = inputs.is_dataset\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n if not is_dataset:\n raise TypeError(\n 'For mode PREDICT, `input_fn` must return `Dataset` instead of '\n '`features` and `labels`.')\n if batch_axis is not None:\n raise TypeError('For mode PREDICT, batch_axis is not supported yet.')\n inputs = _InputsWithStoppingSignals(\n dataset=inputs.dataset,\n batch_size=ctx.batch_size_for_input_fn,\n add_padding=True)\n\n if is_dataset:\n dataset_initializer = inputs.dataset_initializer()\n\n tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)\n\n def enqueue_ops_fn():\n \"\"\"A Fn returning the TPU infeed enqueue ops.\n\n By providing as a Fn, it can be invoked inside the tf.while_loop such that\n the input pipeline for multiple iterations can be executed by one\n Session.run call.\n\n Returns:\n list of dict of ops.\n \"\"\"\n with tf.compat.v1.device(device):\n num_of_replicas_per_host = ctx.num_of_replicas_per_host\n # Convert user input to features and labels. If the user returns a\n # dataset, it is initialized and the features and labels extracted via\n # `dataset.iterator.get_next()`\n features, labels = inputs.features_and_labels()\n signals = inputs.signals()\n\n features, labels, enqueue_datas_list = (\n _tpu_estimator_embedding.split_inputs(\n ctx,\n features,\n labels,\n num_cores_per_batch=num_of_replicas_per_host))\n\n inputs_structure_recorder.validate_and_record_structure(features, labels)\n unsharded_tensor_list = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels, signals))\n\n infeed_queue = tpu_feed.InfeedQueue(\n tuple_types=[t.dtype for t in unsharded_tensor_list],\n tuple_shapes=[t.shape for t in unsharded_tensor_list],\n shard_dimensions=batch_axis)\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_number_of_shards(num_of_replicas_per_host)\n per_host_enqueue_ops = (\n infeed_queue.split_inputs_and_generate_enqueue_ops(\n unsharded_tensor_list,\n placement_function=lambda x: device,\n tpu_ordinal_function=tpu_ordinal_function_impl))\n\n if ctx.embedding_config:\n per_host_enqueue_ops.extend(\n ctx.embedding_config.tpu_embedding.generate_enqueue_ops(\n enqueue_datas_list))\n\n if signals is None:\n return per_host_enqueue_ops\n else:\n return {\n 'ops': per_host_enqueue_ops,\n 'signals': signals,\n }\n\n return enqueue_ops_fn, captured_infeed_queue, dataset_initializer\n\n\ndef generate_per_host_v2_enqueue_ops_fn_for_host(ctx, input_fn,\n inputs_structure_recorder,\n device, host_id,\n invocation_index):\n \"\"\"Generates infeed enqueue ops for per-host input_fn on a single host.\"\"\"\n captured_infeed_queue = _CapturedObject()\n dataset_initializer = None\n\n with tf.compat.v1.device(device):\n user_context = tpu_context.TPUContext(\n internal_ctx=ctx,\n input_device=device,\n invocation_index=invocation_index,\n host_id=host_id)\n inputs = _Inputs.from_input_fn(input_fn(user_context))\n\n is_dataset = inputs.is_dataset\n if not is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '\n 'input pipeline configuration.')\n\n # Be aware that when num_cores_per_replica > num_cores_per_host,\n # ctx.num_of_replicas_per_host is 0.\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n inputs = _InputsWithStoppingSignals(\n dataset=inputs.dataset,\n batch_size=ctx.batch_size_for_input_fn,\n add_padding=True,\n num_invocations_per_step=max(1, ctx.num_of_replicas_per_host))\n\n dataset_initializer = inputs.dataset_initializer()\n\n tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)\n\n def device_function_impl(shard_id):\n if ctx.device_assignment is not None:\n # Find the replica_id of the host's logical core 0.\n # The current host_id is guaranteed to contain the logical core 0,\n # even when num_cores_per_replica > num_cores_per_host -- the function\n # caller makes sure that this host_id will must be receiving data (calls\n # input_fn).\n replica_id = ctx.device_assignment.lookup_replicas(\n task_id=host_id, logical_core=0)[shard_id]\n return ctx.tpu_host_placement_function(replica_id=replica_id)\n else:\n return None\n\n def enqueue_ops_fn():\n \"\"\"Generates the per_host enqueue ops.\"\"\"\n control_deps = []\n per_host_sharded_inputs = []\n enqueue_datas_list = []\n # Be aware that when num_cores_per_replica > num_cores_per_host,\n # ctx.num_of_replicas_per_host is 0.\n num_replicas_per_host = max(1, ctx.num_of_replicas_per_host)\n cached_signals = None\n with tf.compat.v1.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for host in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n if ctx.allow_per_host_v2_parallel_get_next:\n features, labels = inputs.features_and_labels() # Calls get_next()\n with tf.control_dependencies(control_deps):\n if not ctx.allow_per_host_v2_parallel_get_next:\n features, labels = inputs.features_and_labels() # Calls get_next()\n signals = inputs.signals()\n\n # All the replicas share the replica 0's stopping signal.\n # This avoids inconsistent state among different model replcias.\n if cached_signals:\n signals['stopping'] = cached_signals['stopping']\n else:\n cached_signals = signals\n\n features, labels, enqueue_data = (\n _tpu_estimator_embedding.split_inputs(ctx, features, labels))\n if len(enqueue_data) != 1:\n raise RuntimeError(('Missing or extra enqueue_data for host {}. '\n 'len(enqueue_data) = {}.').format(\n host, len(enqueue_data)))\n enqueue_datas_list.append(enqueue_data[0])\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels, signals))\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n if inputs_structure_recorder.flattened_input_dims:\n input_partition_dims = inputs_structure_recorder.flattened_input_dims\n if signals:\n input_partition_dims += [None] * len(signals)\n # pylint: disable=protected-access\n infeed_queue = tpu_feed._PartitionedInfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]),\n host_id=host_id,\n input_partition_dims=input_partition_dims,\n device_assignment=ctx.device_assignment)\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs)\n else:\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs,\n tpu_ordinal_function=tpu_ordinal_function_impl,\n placement_function=device_function_impl)\n\n captured_infeed_queue.capture(infeed_queue)\n\n if ctx.embedding_config:\n per_host_enqueue_ops.extend(\n ctx.embedding_config.tpu_embedding.generate_enqueue_ops(\n enqueue_datas_list))\n\n if signals is None:\n return per_host_enqueue_ops\n else:\n return {\n 'ops': per_host_enqueue_ops,\n 'signals': signals,\n }\n\n return enqueue_ops_fn, captured_infeed_queue, dataset_initializer\n\n\ndef generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,\n num_hosts):\n \"\"\"Generates infeed enqueue ops for one input_fn on all the hosts.\"\"\"\n captured_infeed_queue = _CapturedObject()\n dataset_initializer = None\n device_0 = ctx.tpu_host_placement_function(host_id=0)\n with tf.compat.v1.device(device_0):\n user_context = tpu_context.TPUContext(\n internal_ctx=ctx, input_device=device_0, invocation_index=0, host_id=0)\n inputs = _Inputs.from_input_fn(input_fn(user_context))\n\n is_dataset = inputs.is_dataset\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n if not is_dataset:\n raise TypeError(\n 'For mode PREDICT, `input_fn` must return `Dataset` instead of '\n '`features` and `labels`.')\n\n inputs = _InputsWithStoppingSignals(\n dataset=inputs.dataset,\n batch_size=ctx.batch_size_for_input_fn,\n add_padding=True)\n\n if is_dataset:\n dataset_initializer = inputs.dataset_initializer()\n num_replicas_per_host = ctx.num_of_replicas_per_host\n\n def tpu_ordinal_function_impl(shard_id):\n if ctx.device_assignment:\n return ctx.device_assignment.tpu_ordinal(replica=shard_id)\n else:\n return shard_id % num_replicas_per_host\n\n def device_function_impl(shard_id):\n # shard_id ranges from 0 to num_of_replicas_per_host - 1.\n # A shard is a replica inside a host.\n # In broadcast mode (generate_broadcast_enqueue_ops_fn), the enqueue ops\n # are always executed on the first host. Thus shard_id equals to replica_id.\n return ctx.tpu_host_placement_function(replica_id=shard_id)\n\n def enqueue_ops_fn():\n \"\"\"Generates enqueue ops for all the hosts.\"\"\"\n broadcasted_inputs = []\n flattened_inputs = None # Cache result from input_fn.\n signals = None\n num_replicas = ctx.num_replicas\n core_id = 0\n for host_id in xrange(num_hosts):\n with tf.compat.v1.device(\n ctx.tpu_host_placement_function(host_id=host_id)):\n for _ in xrange(ctx.num_of_replicas_per_host):\n # Note: input_fn is only called once at host 0 for the first replica.\n # The features and labels returned from that invocation are\n # broadcasted to other replicas(including the replicas on other\n # hosts).\n if flattened_inputs is None:\n features, labels = inputs.features_and_labels() # Calls get_next()\n signals = inputs.signals()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels, signals))\n if (ctx.config.tpu_config.eval_training_input_configuration is\n tpu_config.InputPipelineConfig.SLICED):\n input_slices = [\n tf.split(x, num_replicas) for x in flattened_inputs\n ]\n if (ctx.config.tpu_config.eval_training_input_configuration is\n tpu_config.InputPipelineConfig.SLICED):\n # for each core, slice out the flattened_inputs for each core.\n broadcasted_inputs.append([x[core_id] for x in input_slices])\n core_id += 1\n else:\n broadcasted_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(broadcasted_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n enqueue_ops = infeed_queue.generate_enqueue_ops(\n broadcasted_inputs,\n tpu_ordinal_function=tpu_ordinal_function_impl,\n placement_function=device_function_impl)\n\n if signals is None:\n return enqueue_ops\n else:\n return {\n 'ops': enqueue_ops,\n 'signals': signals,\n }\n\n return enqueue_ops_fn, captured_infeed_queue, dataset_initializer\n\n\nclass TensorPacker(object):\n \"\"\"Pack and unpack small tensors into a big one for efficiency.\"\"\"\n\n def __init__(self, small_feature_dim_size,\n minimum_num_small_features_to_group):\n self._small_feature_dim_size = small_feature_dim_size\n self._minimum_num_small_features_to_group = (\n minimum_num_small_features_to_group)\n\n def maybe_concatenate_features(self, features):\n \"\"\"If there are enough small tensors, concat them for performance.\"\"\"\n self._small_feature_names = {}\n self._small_feature_sizes = {}\n feature_names = _extract_key_names(features)\n if feature_names: # Not a single tensor.\n # First pass: see if it is worth concatenating the small features.\n for name in feature_names:\n tensor = features[name]\n # We do not handle nested inputs here.\n if not isinstance(tensor, tf.Tensor):\n return\n shape = tensor.get_shape().as_list()\n dtype = tensor.dtype\n if (len(shape) == 2 and shape[1] is not None and\n shape[1] <= self._small_feature_dim_size):\n tf.compat.v1.logging.log_first_n(\n tf.compat.v1.logging.INFO,\n 'Found small feature: %s %s', 1, name, shape)\n if tensor.dtype not in self._small_feature_names:\n self._small_feature_names[dtype] = []\n self._small_feature_sizes[dtype] = []\n self._small_feature_names[dtype].append(name)\n self._small_feature_sizes[dtype].append(shape[1])\n\n dtypes_ = list(self._small_feature_names.keys())\n for dtype in dtypes_:\n # If we could find 5 (or more) [batch_size, 1] dense features,\n # we will group them.\n if (len(self._small_feature_names[dtype]) <\n self._minimum_num_small_features_to_group):\n self._small_feature_names.pop(dtype) # reset\n self._small_feature_sizes.pop(dtype) # reset\n\n # Second pass: separate small features out\n small_feature_tensors = {}\n for dtype in self._small_feature_names:\n small_feature_tensors[dtype] = []\n for name in self._small_feature_names[dtype]:\n small_feature_tensors[dtype].append(features.pop(name))\n\n # Add the concat Tensor to features with a special key.\n for dtype in self._small_feature_names:\n key = self._get_small_feature_key(dtype)\n if key in features:\n raise ValueError('{} is reserved as feature key for concatenated'\n 'small features.')\n features[key] = (tf.concat(small_feature_tensors[dtype], axis=1))\n\n def maybe_split_features(self, maybe_concatenated_features):\n for dtype in self._small_feature_names:\n key = self._get_small_feature_key(dtype)\n concatenated_small_features = maybe_concatenated_features.pop(key)\n splits = tf.split(\n concatenated_small_features, self._small_feature_sizes[dtype], axis=1)\n for name, split in zip(self._small_feature_names[dtype], splits):\n maybe_concatenated_features[name] = split\n\n def _get_small_feature_key(self, dtype):\n return _TENSOR_PACKER_CONCATENATED_SMALL_FEATURES_KEY + '_' + str(dtype)\n\n\nclass _InputPipeline(object):\n \"\"\"`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.\n\n `_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from\n call site. To be precise, based on the configuration in\n `_InternalTPUContext`, it invokes `input_fn` for all cores (usually\n multi-host TPU training) or for one host (usually for single-host TPU\n evaluation), and sends all `features` and `labels` returned by `input_fn` to\n TPU infeed. For per-core invocation, `features` and `labels` are piped to\n infeed directly, one tuple for each core. For per-host invocation, `features`\n and `labels` are split at host (with respect to `batch_axis`) and piped to all\n cores accordingly.\n\n In addition, flatten/unflatten are handled by `_InputPipeline` also. Model\n inputs returned by the `input_fn` can have one of the following forms:\n 1. features\n 2. (features, labels)\n 3. ((arbitrarily nested structure of features), labels)\n\n Internally, form 1 is reformed to `(features, None)` as features and labels\n are passed separately to underlying methods. For TPU training, TPUEstimator\n may expect multiple `features` and `labels` tuples one for each core.\n\n TPUEstimator allows various different structures for inputs (namely `features`\n and `labels`). Both `features` and `labels` can be any nested sturcture\n supported by TF nest (namely, dict, tuples, namedtuples or any nested\n structure of such of Tensors). `labels` could be `None` as well.\n\n These are flattened before they are passed to the infeed/outfeed library\n as that expectes flattend lists.\n \"\"\"\n\n class InputsStructureRecorder(object):\n \"\"\"The recorder to record inputs structure.\"\"\"\n\n def __init__(self, input_partition_dims=None):\n # Holds the structure of inputs\n self._feature_structure = {}\n self._flattened_input_dims = None\n\n if input_partition_dims:\n # This should have been validated in TPUConfig.\n assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'\n if len(input_partition_dims) == 2:\n self._feature_dims, self._label_dims = input_partition_dims\n else:\n self._feature_dims = input_partition_dims[0]\n self._label_dims = None\n\n assert self._feature_dims is not None, ('input_partition_dims[0] must '\n 'not be None')\n else:\n self._feature_dims = None\n self._label_dims = None\n\n # Internal state.\n self._initialized = False\n\n @property\n def flattened_input_dims(self):\n assert self._initialized, 'InputsStructureRecorder is not initialized.'\n return self._flattened_input_dims\n\n def has_labels(self):\n return 'labels' in self._feature_structure\n\n def _flatten_input_dims(self, features, labels, feature_dims, label_dims):\n \"\"\"Flatten input dims with the same order as flattened input tensors.\"\"\"\n\n try:\n flattened_input_dims = data_nest.flatten_up_to(features, feature_dims)\n except TypeError as e:\n raise ValueError(\n 'TPUConfig.input_partition_dims[0] mismatched the structure of'\n ' features. input_partition_dims[0]: {}, features {}. {}'.format(\n feature_dims, features, e))\n\n if labels is not None:\n if label_dims is not None:\n try:\n flattened_input_dims.extend(\n data_nest.flatten_up_to(labels, self._label_dims))\n except TypeError as e:\n raise ValueError(\n 'TPUConfig.input_partition_dims[1] mismatched the structure of'\n ' labels. input_partition_dims[1]: {}, labels: {}. {}'.format(\n label_dims, labels, e))\n else:\n num_label_tensors = len(data_nest.flatten(labels))\n flattened_input_dims.extend([None] * num_label_tensors)\n return flattened_input_dims\n\n def validate_and_record_structure(self, features, labels):\n \"\"\"Validates and records the structure of `features` and `labels`.\"\"\"\n # Extract structure.\n feature_names = _extract_key_names(features)\n label_names = _extract_key_names(labels)\n\n if not self._initialized:\n # Record structure.\n self._initialized = True\n if self._feature_dims is not None:\n feature_dims_names = _extract_key_names(self._feature_dims)\n if feature_dims_names != feature_names:\n raise ValueError(\n 'TPUConfig.input_partition_dims[0] mismatched feature'\n ' keys. Expected {}, got {}'.format(feature_names,\n feature_dims_names))\n label_dims_names = _extract_key_names(self._label_dims)\n if self._label_dims is not None and label_dims_names != label_names:\n raise ValueError(\n 'TPUConfig.input_partition_dims[1] mismatched label'\n ' keys. Expected {}, got {}'.format(label_names,\n label_dims_names))\n self._flattened_input_dims = self._flatten_input_dims(\n features, labels, self._feature_dims, self._label_dims)\n\n def flatten_features_and_labels(self, features, labels, signals=None):\n \"\"\"Flattens the `features` and `labels` to a single tensor list.\"\"\"\n self.tensor_packer = TensorPacker(\n _TENSOR_PACKER_SMALL_FEATURE_DIM_SIZE,\n _TENSOR_PACKER_MINIMUM_NUM_SMALL_FEATURES_TO_GROUP)\n self.tensor_packer.maybe_concatenate_features(features)\n self._feature_structure['features'] = features\n if labels is not None:\n self._feature_structure['labels'] = labels\n if signals is not None:\n self._feature_structure['signals'] = signals\n return data_nest.flatten(self._feature_structure)\n\n def unflatten_features_and_labels(self, flattened_inputs):\n \"\"\"Restores the flattened inputs to original features and labels form.\n\n Args:\n flattened_inputs: Flattened inputs for each shard.\n\n Returns:\n A tuple of (`features`, `labels`), where `labels` could be None.\n Each one, if present, should have identical structure (single tensor vs\n dict) as the one returned by input_fn.\n\n Raises:\n ValueError: If the number of expected tensors from `flattened_inputs`\n mismatches the recorded structure.\n \"\"\"\n\n unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,\n flattened_inputs)\n features = unflattened_inputs['features']\n self.tensor_packer.maybe_split_features(features)\n return _Inputs(\n features,\n unflattened_inputs.get('labels'),\n signals=unflattened_inputs.get('signals'))\n\n def __init__(self, input_fn, batch_axis, ctx):\n \"\"\"Constructor.\n\n Args:\n input_fn: input fn for train or eval.\n batch_axis: A python tuple of int values describing how each tensor\n produced by the Estimator `input_fn` should be split across the TPU\n compute shards.\n ctx: A `_InternalTPUContext` instance with mode.\n\n Raises:\n ValueError: If both `sharded_features` and `num_cores` are `None`.\n \"\"\"\n self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(\n ctx.input_partition_dims)\n\n self._sharded_per_core = ctx.is_input_sharded_per_core()\n self._input_fn = input_fn\n self._infeed_queue = None\n self._ctx = ctx\n self._batch_axis = batch_axis\n\n def generate_infeed_enqueue_ops_and_dequeue_fn(self):\n \"\"\"Generates infeed enqueue ops and dequeue_fn.\"\"\"\n # While tf.while_loop is called, the body function, which invokes\n # `enqueue_fn` passed in, is called to construct the graph. So, input_fn\n # structure is recorded.\n enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (\n self._invoke_input_fn_and_record_structure())\n\n self._validate_input_pipeline()\n\n def dequeue_fn():\n \"\"\"dequeue_fn is used by TPU to retrieve the tensors.\"\"\"\n # In the model-parallel case, both the host-side and device-side\n # computations must agree on the core on which infeed takes place. We\n # choose to perform infeed on logical core 0 of each replica.\n values = self._infeed_queue.generate_dequeue_op(tpu_device=0)\n # The unflatten process uses the structure information recorded above.\n return self._inputs_structure_recorder.unflatten_features_and_labels(\n values)\n\n return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)\n\n def _invoke_input_fn_and_record_structure(self):\n \"\"\"Deploys the input pipeline and record input structure.\"\"\"\n enqueue_ops = []\n infeed_queues = []\n all_dataset_initializers = []\n num_hosts = self._ctx.num_hosts\n tpu_host_placement_fn = self._ctx.tpu_host_placement_function\n\n run_infeed_loop_on_coordinator = True\n\n if self._sharded_per_core:\n # Per-Core input pipeline deployment.\n # Invoke input pipeline for each core and placed on the corresponding\n # host.\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with tf.compat.v1.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n enqueue_ops_fn, captured_infeed_queue = (\n generate_per_core_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn, self._inputs_structure_recorder,\n host_device, host_id))\n\n if _WRAP_INPUT_FN_INTO_WHILE_LOOP:\n run_infeed_loop_on_coordinator = False\n enqueue_ops.append(\n _wrap_computation_in_while_loop(\n device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n # Infeed_queue_getter must be called after enqueue_ops_fn is called.\n infeed_queues.append(captured_infeed_queue.get())\n\n elif self._ctx.is_input_broadcast_with_iterators():\n # Only calls input_fn in host 0.\n host_device = tpu_host_placement_fn(host_id=0)\n enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (\n generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,\n self._inputs_structure_recorder,\n num_hosts))\n if dataset_initializer:\n all_dataset_initializers.append(dataset_initializer)\n run_infeed_loop_on_coordinator = False\n wrap_fn = (\n _wrap_computation_in_while_loop\n if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else\n _wrap_computation_in_while_loop_with_stopping_signals)\n enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n infeed_queues.append(captured_infeed_queue.get())\n\n else:\n # This branch handles two senarios:\n # num_cores_per_replica > num_cores_per_host\n # and num_cores_per_replica <= num_cores_per_host\n # First, get the set of host_ids, by iterating replicas.\n # We only want and will get the set of *unique* host_ids\n # *that will call input_fn*. For each replica, we only call the input_fn\n # from the CPU host that contains logical core 0.\n\n # Use a list here to ensure deterministic order.\n host_id_with_invocation_id_pair = []\n\n if not self._ctx.is_replica_across_hosts():\n for host_id in range(num_hosts):\n invocation_index = host_id\n host_id_with_invocation_id_pair.append((host_id, invocation_index))\n else:\n for replica_id in xrange(self._ctx.num_replicas):\n invocation_index = replica_id\n host_device, _ = self._ctx.device_for_replica(replica_id)\n # TODO(lehou): Get host_id in a better way.\n host_id = int(host_device.split('/task:')[1].split('/device:')[0])\n host_id_with_invocation_id_pair.append((host_id, invocation_index))\n\n for (host_id, invocation_index) in host_id_with_invocation_id_pair:\n host_device = tpu_host_placement_fn(host_id=host_id)\n with tf.compat.v1.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n if self._ctx.is_input_per_host_with_iterators():\n enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (\n generate_per_host_v2_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, host_device, host_id,\n invocation_index))\n else:\n enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (\n generate_per_host_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, self._batch_axis,\n host_device, host_id))\n\n # NOTE(xiejw): We dispatch here based on the return type of the\n # users `input_fn`.\n #\n # 1. If input_fn returns a Dataset instance, we initialize the\n # iterator outside of tf.while_loop, and call the iterator.get_next\n # inside tf.while_loop. This should be always safe.\n #\n # 2. If input_fn returns (features, labels), it is too late to wrap\n # them inside tf.while_loop, as resource initialization cannot be\n # handled in TF control flow properly. In this case, we will use\n # python loop to enqueue the data into TPU system. This may be\n # slow compared to the previous case.\n if dataset_initializer:\n all_dataset_initializers.append(dataset_initializer)\n run_infeed_loop_on_coordinator = False\n wrap_fn = (\n _wrap_computation_in_while_loop\n if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else\n _wrap_computation_in_while_loop_with_stopping_signals)\n enqueue_ops.append(\n wrap_fn(device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n infeed_queues.append(captured_infeed_queue.get())\n\n # infeed_queue is used to generate dequeue ops. The only thing it uses for\n # dequeue is dtypes and types. So, any one can be used. Here, grab the\n # first one.\n self._infeed_queue = infeed_queues[0]\n return enqueue_ops, [\n util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)\n ], run_infeed_loop_on_coordinator\n\n def _validate_input_pipeline(self):\n \"\"\"Validates the input pipeline.\n\n Perform some sanity checks to log user friendly information. We should\n error out to give users better error message. But, if\n _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break\n user code, so, log a warning.\n\n Raises:\n RuntimeError: If the validation failed.\n \"\"\"\n if tf.compat.v1.get_default_graph().get_collection(\n tf.compat.v1.GraphKeys.QUEUE_RUNNERS):\n err_msg = ('Input pipeline contains one or more QueueRunners. '\n 'It could be slow and not scalable. Please consider '\n 'converting your input pipeline to use `tf.data` instead (see '\n 'https://www.tensorflow.org/guide/datasets for '\n 'instructions.')\n if _WRAP_INPUT_FN_INTO_WHILE_LOOP:\n raise RuntimeError(err_msg)\n else:\n logging.warn(err_msg)\n\n\ndef call_computation(computation_inputs, computation, batch_config=None):\n \"\"\"Call computation.\n\n Args:\n computation_inputs: A tensor or dict of tensors, the inputs to the\n computation.\n computation: A Python function that takes no inputs and builds computation\n graph. If `computation` returns m outputs, this function will return a\n list of m Tensors.\n batch_config: A BatchConfig named tuple specifying the batching\n configuration to use for inference batching.\n\n Returns:\n A list of output tensors.\n \"\"\"\n\n # Using `TPUPartitionedCall` makes it possible to target a different\n # TPU core with every `Session.run()` call. Note that the entire inference\n # graph executes on a single core, and that invocations of this graph\n # will round-robin among the cores attached to a host.\n def tpu_partitioned_call(partition_inputs):\n\n # capture_resource_var_by_value enables variables to be mirrored on TPU\n # to avoid fetching from CPU, since variables do not change during\n # inference.\n @function.Defun(capture_resource_var_by_value=False)\n def tpu_subgraph():\n return computation(partition_inputs)\n\n return tpu_functional.TPUPartitionedCall(\n args=tpu_subgraph.captured_inputs,\n device_ordinal=tpu_ops.tpu_ordinal_selector(),\n Tout=[o.type for o in tpu_subgraph.definition.signature.output_arg],\n f=tpu_subgraph)\n\n # Not using Batching Function but use TPUPartitionedCall/all cores.\n if not batch_config:\n return tpu_partitioned_call(computation_inputs)\n\n # Use Batching Function and TPUPartitionedCall/all cores.\n # Note that BatchingFunction requires a list of tensors and doesn't support\n # a dict of tensors. So we preserve the structure by deterministically\n # flattening the dict before batching and then recomposing it after batching\n # to feed into the computation.\n ordered_inputs_list = tf.nest.flatten(computation_inputs)\n\n @tf.nondifferentiable_batch_function(\n num_batch_threads=batch_config.num_batch_threads,\n max_batch_size=batch_config.max_batch_size,\n batch_timeout_micros=batch_config.batch_timeout_micros,\n allowed_batch_sizes=batch_config.allowed_batch_sizes,\n max_enqueued_batches=batch_config.max_enqueued_batches,\n autograph=False)\n def batched_tpu_computation(*tensor_args):\n \"\"\"Recompose the input feature dict and calls the TPU computation.\"\"\"\n computation_feature_input = tf.nest.pack_sequence_as(\n computation_inputs, tensor_args)\n return tpu_partitioned_call(computation_feature_input)\n\n return batched_tpu_computation(*ordered_inputs_list)\n\n\nclass _ModelFnWrapper(object):\n \"\"\"A `model_fn` wrapper.\n\n This makes calling model_fn on CPU and TPU easier and more consistent and\n performs necessary check and mutation required by TPU training and evaluation.\n\n In addition, this wrapper manages converting the `model_fn` to a single TPU\n train and eval step.\n \"\"\"\n\n def __init__(self, model_fn, config, params, ctx):\n self._model_fn = model_fn\n self._config = config\n self._params = params\n self._ctx = ctx\n\n def call_without_tpu(self, features, labels, is_export_mode):\n return self._call_model_fn(features, labels, is_export_mode=is_export_mode)\n\n def _add_embedding_features(self, features, hook_dummy_table_variables):\n \"\"\"Add embedding features, optionally add hook to intercept gradient.\"\"\"\n if self._ctx.embedding_config:\n tpu_embedding_ = self._ctx.embedding_config.tpu_embedding\n embedding_activations = tpu_embedding_.get_activations()\n if hook_dummy_table_variables:\n new_embedding_activations = (\n tpu_embedding_gradient.hook_dummy_table_variables_to_activations(\n tpu_embedding_, embedding_activations,\n self._ctx.embedding_config.dummy_table_variables))\n features.update(new_embedding_activations)\n else:\n features.update(embedding_activations)\n\n def convert_to_single_tpu_train_step(self, dequeue_fn):\n \"\"\"Converts user provided model_fn` as a single train step on TPU.\n\n The user provided `model_fn` takes input tuple\n (features, labels) and produces the EstimatorSpec with train_op and loss for\n train `mode`. This usually represents a single train computation on CPU.\n\n For TPU training, a train (computation) step is first wrapped in a\n tf.while_loop control flow to repeat for many times and then replicated to\n all TPU shards. Besides the input should be taken from TPU infeed rather\n than input pipeline (input_fn) directly. To fit TPU loop and replicate\n pattern, the original train computation should be reformed, which is the\n returned `train_step`.\n\n Args:\n dequeue_fn: The function to retrieve inputs, features and labels, from TPU\n infeed dequeue channel.\n\n Returns:\n A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn\n representing the train step for TPU.\n \"\"\"\n\n host_call = _OutfeedHostCall(\n self._ctx,\n outfeed_every_n_steps=self._config.tpu_config\n .experimental_host_call_every_n_steps)\n captured_scaffold_fn = _CapturedObject()\n captured_training_hooks = _CapturedObject()\n\n def train_step(step):\n \"\"\"Training step function for use inside a while loop.\"\"\"\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n self._add_embedding_features(features, True)\n\n estimator_spec = self._verify_estimator_spec(\n self._call_model_fn(features, labels))\n loss, train_op = estimator_spec.loss, estimator_spec.train_op\n\n if tensor_tracer.TensorTracer.is_enabled():\n tt = tensor_tracer.TensorTracer()\n loss = tt.trace_tpu(tf.compat.v1.get_default_graph(), loss, train_op,\n self._ctx.num_replicas)\n tracer_host_call = tt.host_call_deps_and_fn()\n else:\n tracer_host_call = {}\n\n if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access\n captured_scaffold_fn.capture(estimator_spec.scaffold_fn)\n else:\n captured_scaffold_fn.capture(None)\n\n captured_training_hooks.capture(estimator_spec.training_hooks)\n\n if self._ctx.embedding_config is None:\n apply_sparse_grads = []\n else:\n tpu_embedding_ = self._ctx.embedding_config.tpu_embedding\n gradients = (\n tpu_embedding_gradient.get_gradients_through_dummy_table_variables(\n tpu_embedding_))\n grad_multiplier = self._ctx.embedding_config.get_grad_multiplier()\n if grad_multiplier is not None:\n scaled_gradients = collections.OrderedDict(\n (k, v * grad_multiplier) for k, v in six.iteritems(gradients))\n else:\n scaled_gradients = gradients\n apply_sparse_grads = [\n tpu_embedding_.generate_send_gradients_op(\n scaled_gradients, tf.compat.v1.train.get_global_step())\n ]\n\n stopping_signals = None\n user_provided_stopping_signals_name = None\n if self._ctx.feed_hook is not None:\n stopping_signals, user_provided_stopping_signals_name = \\\n self._ctx.feed_hook.get_stopping_signals_and_name(features)\n\n # We must run train_op to update the variables prior to running the\n # outfeed.\n with tf.control_dependencies([train_op] + apply_sparse_grads):\n host_call_outfeed_ops = []\n host_call_fn, host_call_args = None, []\n\n if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access\n and estimator_spec.host_call is not None):\n host_call_fn, host_call_args = estimator_spec.host_call\n\n if stopping_signals is not None:\n identity_fn = lambda **kwargs: kwargs\n tracer_host_call[user_provided_stopping_signals_name] = [\n identity_fn, stopping_signals\n ]\n\n if host_call_fn:\n # Ignore dummy hostcalls (no arguments)\n if host_call_args:\n tracer_host_call.update({'host_call': estimator_spec.host_call})\n host_call.record(tracer_host_call)\n host_call_outfeed_ops = host_call.create_enqueue_op(step)\n elif tracer_host_call:\n host_call.record(tracer_host_call)\n host_call_outfeed_ops = host_call.create_enqueue_op(step)\n else:\n # Create a host call for the loss to track execution progress\n # Without this, we don't have any indication of the state of the\n # TPU program.\n tracer_host_call.update(\n {'host_call': (lambda loss_t: loss_t, [tf.reshape(loss, [1])])})\n host_call.record(tracer_host_call)\n host_call_outfeed_ops = host_call.create_enqueue_op(step)\n\n with tf.control_dependencies(host_call_outfeed_ops):\n return tf.identity(loss)\n\n return (train_step, host_call, captured_scaffold_fn,\n captured_training_hooks)\n\n def convert_to_single_tpu_eval_step(self, dequeue_fn):\n \"\"\"Converts user provided model_fn` as a single eval step on TPU.\n\n Similar to training, the user provided `model_fn` takes input tuple\n (features, labels) and produces the TPUEstimatorSpec with eval_metrics for\n eval `mode`. This usually represents a single evaluation computation on CPU.\n\n For TPU evaluation, a eval (computation) step is first wrapped in a\n tf.while_loop control flow to repeat for many times and then replicated to\n all TPU shards. Besides the input and output are slightly different. Input,\n features and labels, should be taken from TPU infeed rather than input\n pipeline (input_fn) directly. Output is managed in two stages. First, the\n model outputs as the result of evaluation computation, usually model logits,\n should be transferred from TPU system to CPU. Then, all model outputs are\n concatenated first on CPU and sent to the metric_fn for metrics computation.\n To fit TPU evaluation pattern, the original eval computation should be\n reformed, which is the returned `eval_step`.\n\n Args:\n dequeue_fn: The function to retrieve inputs, features and labels, from TPU\n infeed dequeue channel.\n\n Returns:\n A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn\n representing the eval step for TPU.\n \"\"\"\n host_calls = _OutfeedHostCall(self._ctx)\n captured_scaffold_fn = _CapturedObject()\n captured_eval_hooks = _CapturedObject()\n\n def eval_step(total_loss):\n \"\"\"Evaluation step function for use inside a while loop.\"\"\"\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n self._add_embedding_features(features, False)\n\n tpu_estimator_spec = self._call_model_fn(features, labels)\n if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access\n raise RuntimeError(\n 'estimator_spec used by TPU evaluation must have type'\n '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))\n\n loss = tpu_estimator_spec.loss\n captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)\n captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)\n\n to_record = {}\n if tpu_estimator_spec.eval_metrics:\n to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics\n if tpu_estimator_spec.host_call is not None:\n # We assume that evaluate won't update global step, so we don't wrap\n # this host_call.\n to_record['host_call'] = tpu_estimator_spec.host_call\n host_calls.record(to_record)\n\n with tf.control_dependencies(host_calls.create_enqueue_op()):\n return tf.math.add(total_loss, loss)\n\n return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks\n\n def convert_to_single_tpu_predict_step(self, dequeue_fn):\n \"\"\"Converts user provided model_fn` as a single predict step on TPU.\n\n Args:\n dequeue_fn: The function to retrieve inputs, features and labels, from TPU\n infeed dequeue channel.\n\n Returns:\n A tuple of predict_fn, host_calls, and captured scaffold_fn. The\n predict_fn representing the predict step for TPU.\n \"\"\"\n host_calls = _OutfeedHostCall(self._ctx)\n captured_scaffold_fn = _CapturedObject()\n captured_predict_hooks = _CapturedObject()\n\n def predict_step(unused_scalar_stopping_signal):\n \"\"\"Evaluation step function for use inside a while loop.\"\"\"\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n stopping_signals = inputs.signals()\n\n assert stopping_signals is not None, (\n 'Internal Error: `signals` is missing.')\n\n tpu_estimator_spec = self._call_model_fn(\n features, labels, is_export_mode=False)\n if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access\n raise RuntimeError(\n 'estimator_spec used by TPU prediction must have type'\n '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))\n\n self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)\n\n captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)\n captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)\n to_record = {}\n identity_fn = lambda **kwargs: kwargs\n to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]\n to_record['signals'] = [identity_fn, stopping_signals]\n if tpu_estimator_spec.host_call is not None:\n to_record['host_call'] = tpu_estimator_spec.host_call\n host_calls.record(to_record)\n\n with tf.control_dependencies(host_calls.create_enqueue_op()):\n return _StopSignals.as_scalar_stopping_signal(stopping_signals)\n\n return (predict_step, host_calls, captured_scaffold_fn,\n captured_predict_hooks)\n\n def _verify_tpu_spec_predictions(self, predictions):\n \"\"\"Validates TPUEstimatorSpec.predictions dict.\"\"\"\n # TODO(xiejw): Adds validation for prediction dictionrary.\n # TODO(xiejw): Adds support for single tensor as predictions.\n if not isinstance(predictions, dict):\n raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')\n\n for (key, tensor) in predictions.items():\n if tensor.shape.dims[0].value is None:\n raise ValueError(\n 'The tensor with key ({}) in TPUEstimatorSpec.predictions has '\n 'dynamic shape (should be static). Tensor: {}'.format(key, tensor))\n return predictions\n\n def _validate_model_features_and_labels(self, features, labels,\n is_export_mode):\n \"\"\"Validates that the features and labels for the model function are valid.\n\n A valid features/labels object is the one with:\n - Type: A tensor or any nested structure of tensors supported by TF nest,\n namely nested dictionary, tuple, namedtuple, or sequence of tensors.\n - Static shape if is_export_mode is False.\n\n Args:\n features: the features that would be input to the model function.\n labels: the labels that would be input to the model function.\n is_export_mode: boolean value specifying if in export mode.\n\n Raises:\n TypeError: If features/labels are not of the correct type.\n ValueError: If features/labels have dynamic shape.\n \"\"\"\n\n def validate(obj, obj_name):\n \"\"\"Helper validate function.\"\"\"\n if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):\n return\n if isinstance(obj, tf.Tensor):\n if not obj.get_shape().is_fully_defined():\n raise ValueError(\n 'The {} to the model returned by input_fn must have static shape.'\n ' Tensor: {}'.format(obj_name, obj))\n else:\n for tensor in data_nest.flatten(obj):\n if not tensor.get_shape().is_fully_defined():\n raise ValueError(\n ('The {} to the model returned by input_fn must have static '\n 'shape. Tensor: {}').format(obj_name, tensor))\n\n validate(features, 'features')\n if labels is not None:\n validate(labels, 'labels')\n\n def _call_model_fn(self, features, labels, is_export_mode=False):\n \"\"\"Calls the model_fn with required parameters.\"\"\"\n self._validate_model_features_and_labels(features, labels, is_export_mode)\n model_fn_args = function_utils.fn_args(self._model_fn)\n kwargs = {}\n\n # Makes deep copy with `config` and params` in case user mutates them.\n config = copy.deepcopy(self._config)\n params = copy.deepcopy(self._params)\n\n if 'labels' in model_fn_args:\n kwargs['labels'] = labels\n elif labels is not None:\n raise ValueError(\n 'model_fn does not take labels, but input_fn returns labels.')\n if 'mode' in model_fn_args:\n kwargs['mode'] = self._ctx.mode\n if 'config' in model_fn_args:\n kwargs['config'] = config\n if 'params' in model_fn_args:\n kwargs['params'] = params\n\n if 'params' not in model_fn_args:\n raise ValueError('model_fn ({}) does not include params argument, '\n 'required by TPUEstimator to pass batch size as '\n 'params[\\'batch_size\\']'.format(self._model_fn))\n\n if is_export_mode:\n batch_size_for_model_fn = None\n else:\n batch_size_for_model_fn = self._ctx.batch_size_for_model_fn\n\n if batch_size_for_model_fn is not None:\n _add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)\n\n running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)\n # In export mode, params['use_tpu'] has already been set based on mode\n # (i.e. True for _REWRITE_FOR_INFERENCE_MODE, False otherwise).\n if not is_export_mode:\n _add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)\n\n if not running_on_cpu:\n user_context = tpu_context.TPUContext(\n internal_ctx=self._ctx, call_from_input_fn=False)\n _add_item_to_params(params, _CTX_KEY, user_context)\n\n estimator_spec = self._model_fn(features=features, **kwargs)\n if (running_on_cpu and\n isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access\n # The estimator_spec will be passed to `Estimator` directly, which expects\n # type `EstimatorSpec`. As we are running on the CPU, escape\n # the TPUInferenceContext.\n graph_context = tf.compat.v1.get_default_graph(\n )._get_control_flow_context()\n try:\n if isinstance(graph_context, tpu._TPUInferenceContext):\n tf.compat.v1.get_default_graph()._set_control_flow_context(\n graph_context.outer_context)\n return estimator_spec.as_estimator_spec()\n finally:\n tf.compat.v1.get_default_graph()._set_control_flow_context(\n graph_context)\n else:\n return estimator_spec\n\n def _verify_estimator_spec(self, estimator_spec):\n \"\"\"Validates the estimator_spec.\"\"\"\n if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access\n return estimator_spec\n\n err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'\n if estimator_spec.training_chief_hooks:\n raise ValueError(\n err_msg.format('training_chief_hooks') + 'If you want' +\n ' to pass training hooks, please pass via training_hooks.')\n\n if estimator_spec.scaffold:\n tf.compat.v1.logging.warn(\n 'EstimatorSpec.Scaffold is ignored by TPU train/eval. '\n 'Please use TPUEstimatorSpec.')\n return estimator_spec\n\n\nclass _OutfeedHostCall(object):\n \"\"\"Support for `eval_metrics` and `host_call` in TPUEstimatorSpec.\"\"\"\n\n def __init__(self, ctx, outfeed_every_n_steps=1):\n self._ctx = ctx\n self._names = []\n # All of these are dictionaries of lists keyed on the name.\n self._host_fns = {}\n self._tensor_keys = collections.defaultdict(list)\n self._tensors = collections.defaultdict(list)\n self._tensor_dtypes = collections.defaultdict(list)\n self._tensor_shapes = collections.defaultdict(list)\n self._outfeed_every_n_steps = outfeed_every_n_steps\n\n @staticmethod\n def validate(host_calls):\n \"\"\"Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`.\"\"\"\n\n for name, host_call in host_calls.items():\n if not isinstance(host_call, (tuple, list)):\n raise ValueError('{} should be tuple or list'.format(name))\n if len(host_call) != 2:\n raise ValueError('{} should have two elements.'.format(name))\n if not callable(host_call[0]):\n raise TypeError('{}[0] should be callable.'.format(name))\n if not isinstance(host_call[1], (tuple, list, dict)):\n raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))\n\n if isinstance(host_call[1], (tuple, list)):\n fullargspec = tf_inspect.getfullargspec(host_call[0])\n fn_args = function_utils.fn_args(host_call[0])\n # wrapped_hostcall_with_global_step uses varargs, so we allow that.\n if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):\n raise RuntimeError(\n 'In TPUEstimatorSpec.{}, length of tensors {} does not match '\n 'method args of the function, which takes {}.'.format(\n name, len(host_call[1]), len(fn_args)))\n\n @staticmethod\n def create_cpu_hostcall(host_calls):\n \"\"\"Runs on the host_call on CPU instead of TPU when use_tpu=False.\"\"\"\n\n _OutfeedHostCall.validate(host_calls)\n ret = {}\n for name, host_call in host_calls.items():\n host_fn, tensors = host_call\n if isinstance(tensors, (tuple, list)):\n ret[name] = host_fn(*tensors)\n else:\n # Must be dict.\n try:\n ret[name] = host_fn(**tensors)\n except TypeError as e:\n tf.compat.v1.logging.warn(\n 'Exception while calling %s: %s. It is likely the tensors '\n '(%s[1]) do not match the '\n 'function\\'s arguments', name, e, name)\n raise\n return ret\n\n def record(self, host_calls):\n \"\"\"Records the host_call structure.\"\"\"\n\n for name, host_call in host_calls.items():\n host_fn, tensor_list_or_dict = host_call\n self._names.append(name)\n self._host_fns[name] = host_fn\n\n if isinstance(tensor_list_or_dict, dict):\n for (key, tensor) in six.iteritems(tensor_list_or_dict):\n self._tensor_keys[name].append(key)\n self._tensors[name].append(tensor)\n self._tensor_dtypes[name].append(tensor.dtype)\n self._tensor_shapes[name].append(tensor.shape)\n else:\n # List or tuple.\n self._tensor_keys[name] = None\n for tensor in tensor_list_or_dict:\n self._tensors[name].append(tensor)\n self._tensor_dtypes[name].append(tensor.dtype)\n self._tensor_shapes[name].append(tensor.shape)\n\n def create_enqueue_op(self, step=None):\n \"\"\"Create the op to enqueue the recorded host_calls.\n\n Returns:\n A list of enqueue ops, which is empty if there are no host calls.\n \"\"\"\n if not self._names:\n return []\n\n tensors = []\n # TODO(jhseu): Consider deduping tensors.\n for name in self._names:\n tensors.extend(self._tensors[name])\n\n if self._outfeed_every_n_steps > 1 and step is None:\n raise ValueError('If outfeed is requested every n steps, you must pass '\n 'a tensor whose value is the step number within the '\n 'current training loop.')\n with tf.compat.v1.device(tf.compat.v1.tpu.core(0)):\n if self._outfeed_every_n_steps == 1:\n return [tpu_ops.outfeed_enqueue_tuple(tensors)]\n else:\n return [\n tf.compat.v1.cond(\n tf.math.equal(\n tf.math.floormod(step, self._outfeed_every_n_steps),\n 0), lambda: tpu_ops.outfeed_enqueue_tuple(tensors),\n lambda: tf.no_op())\n ]\n\n def create_tpu_hostcall(self):\n \"\"\"Sends the tensors through outfeed and runs the host_fn on CPU.\n\n The tensors are concatenated along dimension 0 to form a global tensor\n across all shards. The concatenated function is passed to the host_fn and\n executed on the first host.\n\n Returns:\n A dictionary mapping name to the return type of the host_call by that\n name.\n\n Raises:\n RuntimeError: If outfeed tensor is scalar.\n \"\"\"\n if not self._names:\n return {}\n\n ret = {}\n # For each i, dequeue_ops[i] is a list containing the tensors from all\n # shards. This list is concatenated later.\n dequeue_ops = []\n tensor_dtypes = []\n tensor_shapes = []\n for name in self._names:\n for _ in self._tensors[name]:\n dequeue_ops.append([])\n for dtype in self._tensor_dtypes[name]:\n tensor_dtypes.append(dtype)\n for shape in self._tensor_shapes[name]:\n tensor_shapes.append(shape)\n\n # Outfeed ops execute on each replica's first logical core. Note: we must\n # constraint it such that we have at most one outfeed dequeue and enqueue\n # per replica.\n for i in xrange(self._ctx.num_replicas):\n host_device, ordinal_id = self._ctx.device_for_replica(i)\n with tf.compat.v1.device(host_device):\n outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(\n dtypes=tensor_dtypes,\n shapes=tensor_shapes,\n device_ordinal=ordinal_id)\n for j, item in enumerate(outfeed_tensors):\n dequeue_ops[j].append(item)\n\n # Deconstruct dequeue ops.\n flat_dequeue_ops = []\n for l in dequeue_ops:\n flat_dequeue_ops.extend(l)\n\n dequeue_ops_by_name = {}\n pos = 0\n for name in self._names:\n dequeue_ops_by_name[name] = dequeue_ops[pos:pos +\n len(self._tensors[name])]\n pos += len(self._tensors[name])\n\n def _call_host_fn(fn, *args, **kw):\n context = CatchInvalidHostcallFunctions()\n context.Enter()\n result = fn(*args, **kw)\n context.Exit()\n context.ExitResult(result)\n return result\n\n # It is assumed evaluation always happens on single host TPU system. So,\n # place all ops on tpu host if possible.\n #\n # TODO(jhseu): Evaluate whether this is right for summaries.\n with tf.compat.v1.device(\n self._ctx.tpu_host_placement_function(replica_id=0)):\n for name in self._names:\n dequeue_ops = dequeue_ops_by_name[name]\n for i, item in enumerate(dequeue_ops):\n # TODO(xiejw): Make the specification of the outfeed combinaton\n # function more explicit and well-documented. We may want to give the\n # user the option of concatenating along any axis.\n if (self._ctx.config.tpu_config.per_host_input_for_training is\n tpu_config.InputPipelineConfig.BROADCAST):\n # If the infeed is in BROADCAST mode (each core recieving the same\n # input), then we assume that the cores also produce identical\n # copies of the same output, and we simply take the output from\n # the first core. This mode is used by Mesh-TensorFlow.\n with tf.control_dependencies(dequeue_ops[i]):\n dequeue_ops[i] = tf.identity(dequeue_ops[i][0])\n else:\n if dequeue_ops[i][0].shape.ndims == 0:\n raise RuntimeError(\n 'All tensors outfed from TPU should preserve batch size '\n 'dimension, but got scalar {}'.format(dequeue_ops[i][0]))\n # Assume that the input has been batch-split and that axis 0 of the\n # output tensors represents the batch size. Concatenate along\n # the axis 0 to re-combine the batch.\n dequeue_ops[i] = tf.concat(dequeue_ops[i], axis=0)\n\n if self._tensor_keys[name] is not None:\n # The user-provided eval_metrics[1] is a dict.\n dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))\n try:\n ret[name] = _call_host_fn(self._host_fns[name], **dequeue_ops)\n except TypeError as e:\n tf.compat.v1.logging.warn(\n 'Exception while calling %s: %s. It is likely the tensors '\n '(%s[1]) do not match the '\n 'function\\'s arguments', name, e, name)\n raise\n else:\n ret[name] = _call_host_fn(self._host_fns[name], *dequeue_ops)\n\n # force all dequeue operations to be run if not consumed by the host calls\n ret['__force_dequeue'] = tf.group(*flat_dequeue_ops)\n return ret\n\n\nclass _OutfeedHostCallHook(tf.compat.v1.train.SessionRunHook):\n \"\"\"Hook to run host calls when use_tpu=False.\"\"\"\n\n def __init__(self, tensors):\n self._tensors = tensors\n\n def begin(self):\n # We duplicate this code from the TPUInfeedOutfeedSessionHook rather than\n # create a separate hook to guarantee execution order, because summaries\n # need to be initialized before the outfeed thread starts.\n # TODO(jhseu): Make a wrapper hook instead?\n self._init_ops = contrib_summary.summary_writer_initializer_op()\n # Get all the writer resources from the initializer, so we know what to\n # flush.\n self._finalize_ops = []\n for op in self._init_ops:\n self._finalize_ops.append(tf.compat.v2.summary.flush(writer=op.inputs[0]))\n\n def after_create_session(self, session, coord):\n session.run(self._init_ops)\n\n def before_run(self, run_context):\n return tf.compat.v1.train.SessionRunArgs(self._tensors)\n\n def end(self, session):\n session.run(self._finalize_ops)\n\n\nclass _NotSaver(object):\n \"\"\"What to pass instead of a saver object if you don't want saving.\"\"\"\n\n def __init__(self, message):\n self._message = message\n\n def save(self, *args, **kwargs):\n del args, kwargs\n tf.compat.v1.logging.info(self._message)\n\n\nclass ExamplesPerSecondHook(tf.compat.v1.train.StepCounterHook):\n \"\"\"Calculate and report global_step/sec and examples/sec during runtime.\"\"\"\n\n def __init__(self,\n batch_size,\n every_n_steps=100,\n every_n_secs=None,\n output_dir=None,\n summary_writer=None):\n self._batch_size = batch_size\n super(ExamplesPerSecondHook, self).__init__(\n every_n_steps=every_n_steps,\n every_n_secs=every_n_secs,\n output_dir=output_dir,\n summary_writer=summary_writer)\n\n def _log_and_record(self, elapsed_steps, elapsed_time, global_step):\n global_step_per_sec = elapsed_steps / elapsed_time\n examples_per_sec = self._batch_size * global_step_per_sec\n if self._summary_writer is not None:\n global_step_summary = Summary(value=[\n Summary.Value(\n tag='global_step/sec', simple_value=global_step_per_sec)\n ])\n example_summary = Summary(value=[\n Summary.Value(tag='examples/sec', simple_value=examples_per_sec)\n ])\n self._summary_writer.add_summary(global_step_summary, global_step)\n self._summary_writer.add_summary(example_summary, global_step)\n tf.compat.v1.logging.info('global_step/sec: %g', global_step_per_sec)\n tf.compat.v1.logging.info('examples/sec: %g', examples_per_sec)\n\n\nclass InstallSignalHandlerHook(tf.compat.v1.train.SessionRunHook):\n \"\"\"Change SIGINT (CTRL^C) handler to force quit the process.\n\n The default behavior often results in hanging processes.\n The original handler is restored after training/evaluation.\n \"\"\"\n\n def __init__(self):\n self._signal_fn = signal.getsignal(signal.SIGINT)\n\n def before_run(self, run_context):\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n def end(self, session):\n signal.signal(signal.SIGINT, self._signal_fn)\n\n\nclass ExportSavedModelApiVersion(enum.Enum):\n V1 = 1\n V2 = 2\n\n\nclass BatchConfig(\n collections.namedtuple('BatchConfig', [\n 'num_batch_threads', 'max_batch_size', 'batch_timeout_micros',\n 'allowed_batch_sizes', 'max_enqueued_batches'\n ])):\n \"\"\"Class to handle config inputs into the batching function.\"\"\"\n\n def __new__(cls,\n num_batch_threads,\n max_batch_size,\n batch_timeout_micros,\n allowed_batch_sizes,\n max_enqueued_batches=100):\n \"\"\"Creates an BatchConfig instance.\n\n Args:\n num_batch_threads: Number of scheduling threads for processing batches of\n work. Determines the number of batches processed in parallel.\n max_batch_size: Batch sizes will never be bigger than this.\n batch_timeout_micros: Maximum number of microseconds to wait before\n outputting an incomplete batch.\n allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,\n does nothing. Otherwise, supplies a list of batch sizes, causing the op\n to pad batches up to one of those sizes. The entries must increase\n monotonically, and the final entry must equal max_batch_size.\n max_enqueued_batches: The maximum depth of the batch queue. Defaults to\n 100.\n\n Returns:\n An BatchConfig instance.\n \"\"\"\n return super(BatchConfig, cls).__new__(\n cls,\n num_batch_threads=num_batch_threads,\n max_batch_size=max_batch_size,\n batch_timeout_micros=batch_timeout_micros,\n allowed_batch_sizes=allowed_batch_sizes,\n max_enqueued_batches=max_enqueued_batches)\n\n\n@estimator_export(v1=['estimator.tpu.TPUEstimator'])\nclass TPUEstimator(estimator_lib.Estimator):\n \"\"\"Estimator with TPU support.\n\n TPUEstimator also supports training on CPU and GPU. You don't need to define\n a separate `tf.estimator.Estimator`.\n\n TPUEstimator handles many of the details of running on TPU devices, such as\n replicating inputs and models for each core, and returning to host\n periodically to run hooks.\n\n TPUEstimator transforms a global batch size in params to a per-shard batch\n size when calling the `input_fn` and `model_fn`. Users should specify\n global batch size in constructor, and then get the batch size for each shard\n in `input_fn` and `model_fn` by `params['batch_size']`.\n\n - For training, `model_fn` gets per-core batch size; `input_fn` may get\n per-core or per-host batch size depending on `per_host_input_for_training`\n in `TPUConfig` (See docstring for TPUConfig for details).\n\n - For evaluation and prediction, `model_fn` gets per-core batch size and\n `input_fn` get per-host batch size.\n\n Evaluation\n ==========\n\n `model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`\n for TPU evaluation. If eval_on_tpu is False, the evaluation will execute on\n CPU or GPU; in this case the following discussion on TPU evaluation does not\n apply.\n\n `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where\n `tensors` could be a list of any nested structure of `Tensor`s (See\n `TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns\n a dict from metric string name to the result of calling a metric function,\n namely a `(metric_tensor, update_op)` tuple.\n\n One can set `use_tpu` to `False` for testing. All training, evaluation, and\n predict will be executed on CPU. `input_fn` and `model_fn` will receive\n `train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.\n\n Current limitations:\n --------------------\n\n 1. TPU evaluation only works on a single host (one TPU worker) except\n BROADCAST mode.\n\n 2. `input_fn` for evaluation should **NOT** raise an end-of-input exception\n (`OutOfRangeError` or `StopIteration`). And all evaluation steps and all\n batches should have the same size.\n\n Example (MNIST):\n ----------------\n\n ```\n # The metric Fn which runs on CPU.\n def metric_fn(labels, logits):\n predictions = tf.argmax(logits, 1)\n return {\n 'accuracy': tf.compat.v1.metrics.precision(\n labels=labels, predictions=predictions),\n }\n\n # Your model Fn which runs on TPU (eval_metrics is list in this example)\n def model_fn(features, labels, mode, config, params):\n ...\n logits = ...\n\n if mode = tf.estimator.ModeKeys.EVAL:\n return tpu_estimator.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=(metric_fn, [labels, logits]))\n\n # or specify the eval_metrics tensors as dict.\n def model_fn(features, labels, mode, config, params):\n ...\n final_layer_output = ...\n\n if mode = tf.estimator.ModeKeys.EVAL:\n return tpu_estimator.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=(metric_fn, {\n 'labels': labels,\n 'logits': final_layer_output,\n }))\n ```\n\n Prediction\n ==========\n\n Prediction on TPU is an experimental feature to support large batch inference.\n It is not designed for latency-critical system. In addition, due to some\n usability issues, for prediction with small dataset, CPU `.predict`, i.e.,\n creating a new `TPUEstimator` instance with `use_tpu=False`, might be more\n convenient.\n\n Note: In contrast to TPU training/evaluation, the `input_fn` for prediction\n *should* raise an end-of-input exception (`OutOfRangeError` or\n `StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be\n precise, the ops created by `input_fn` produce one batch of the data.\n The `predict()` API processes one batch at a time. When reaching the end of\n the data source, an end-of-input exception should be raised by one of these\n operations. The user usually does not need to do this manually. As long as the\n dataset is not repeated forever, the `tf.data` API will raise an end-of-input\n exception automatically after the last batch has been produced.\n\n Note: Estimator.predict returns a Python generator. Please consume all the\n data from the generator so that TPUEstimator can shutdown the TPU system\n properly for user.\n\n Current limitations:\n --------------------\n 1. TPU prediction only works on a single host (one TPU worker).\n\n 2. `input_fn` must return a `Dataset` instance rather than `features`. In\n fact, .train() and .evaluate() also support Dataset as return value.\n\n Example (MNIST):\n ----------------\n ```\n height = 32\n width = 32\n total_examples = 100\n\n def predict_input_fn(params):\n batch_size = params['batch_size']\n\n images = tf.random.uniform(\n [total_examples, height, width, 3], minval=-1, maxval=1)\n\n dataset = tf.data.Dataset.from_tensor_slices(images)\n dataset = dataset.map(lambda images: {'image': images})\n\n dataset = dataset.batch(batch_size)\n return dataset\n\n def model_fn(features, labels, params, mode):\n # Generate predictions, called 'output', from features['image']\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\n 'predictions': output,\n 'is_padding': features['is_padding']\n })\n\n tpu_est = TPUEstimator(\n model_fn=model_fn,\n ...,\n predict_batch_size=16)\n\n # Fully consume the generator so that TPUEstimator can shutdown the TPU\n # system.\n for item in tpu_est.predict(input_fn=input_fn):\n # Filter out item if the `is_padding` is 1.\n # Process the 'predictions'\n ```\n\n Exporting\n =========\n\n `export_saved_model` exports 2 metagraphs, one with `saved_model.SERVING`, and\n another with `saved_model.SERVING` and `saved_model.TPU` tags. At serving\n time, these tags are used to select the appropriate metagraph to load.\n\n Before running the graph on TPU, the TPU system needs to be initialized. If\n TensorFlow Serving model-server is used, this is done automatically. If not,\n please use `session.run(tpu.initialize_system())`.\n\n There are two versions of the API: 1 or 2.\n\n In V1, the exported CPU graph is `model_fn` as it is. The exported TPU graph\n wraps `tpu.rewrite()` and `TPUPartitionedCallOp` around `model_fn` so\n `model_fn` is on TPU by default. To place ops on CPU,\n `tpu.outside_compilation(host_call, logits)` can be used.\n\n Example:\n ----------------\n\n ```\n def model_fn(features, labels, mode, config, params):\n ...\n logits = ...\n export_outputs = {\n 'logits': export_output_lib.PredictOutput(\n {'logits': logits})\n }\n\n def host_call(logits):\n class_ids = math_ops.argmax(logits)\n classes = string_ops.as_string(class_ids)\n export_outputs['classes'] =\n export_output_lib.ClassificationOutput(classes=classes)\n\n tpu.outside_compilation(host_call, logits)\n\n ...\n ```\n\n In V2, `export_saved_model()` sets up `params['use_tpu']` flag to let the user\n know if the code is exporting to TPU (or not). When `params['use_tpu']` is\n `True`, users need to call `tpu.rewrite()`, `TPUPartitionedCallOp` and/or\n `batch_function()`.\n\n TIP: V2 is recommended as it is more flexible (eg: batching, etc).\n \"\"\"\n\n def __init__(self,\n model_fn=None,\n model_dir=None,\n config=None,\n params=None,\n use_tpu=True,\n train_batch_size=None,\n eval_batch_size=None,\n predict_batch_size=None,\n batch_axis=None,\n eval_on_tpu=True,\n export_to_tpu=True,\n export_to_cpu=True,\n warm_start_from=None,\n embedding_config_spec=None,\n export_saved_model_api_version=ExportSavedModelApiVersion.V1):\n \"\"\"Constructs an `TPUEstimator` instance.\n\n Args:\n model_fn: Model function as required by `Estimator` which returns\n EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',\n and `prediction_hooks` must not capure any TPU Tensor inside the\n model_fn.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model. If `None`, the model_dir in\n `config` will be used if set. If both are set, they must be same. If\n both are `None`, a temporary directory will be used.\n config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.\n params: An optional `dict` of hyper parameters that will be passed into\n `input_fn` and `model_fn`. Keys are names of parameters, values are\n basic python types. There are reserved keys for `TPUEstimator`,\n including 'batch_size'.\n use_tpu: A bool indicating whether TPU support is enabled. Currently, -\n TPU training and evaluation respect this bit, but eval_on_tpu can\n override execution of eval. See below.\n train_batch_size: An int representing the global training batch size.\n TPUEstimator transforms this global batch size to a per-shard batch\n size, as params['batch_size'], when calling `input_fn` and `model_fn`.\n Cannot be `None` if `use_tpu` is `True`. Must be divisible by total\n number of replicas.\n eval_batch_size: An int representing evaluation batch size. Must be\n divisible by total number of replicas.\n predict_batch_size: An int representing the prediction batch size. Must be\n divisible by total number of replicas.\n batch_axis: A python tuple of int values describing how each tensor\n produced by the Estimator `input_fn` should be split across the TPU\n compute shards. For example, if your input_fn produced (images, labels)\n where the images tensor is in `HWCN` format, your shard dimensions would\n be [3, 0], where 3 corresponds to the `N` dimension of your images\n Tensor, and 0 corresponds to the dimension along which to split the\n labels to match up with the corresponding images. If None is supplied,\n and per_host_input_for_training is True, batches will be sharded based\n on the major dimension. If tpu_config.per_host_input_for_training is\n False or `PER_HOST_V2`, batch_axis is ignored.\n eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the\n model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.\n export_to_tpu: If True, `export_saved_model()` exports a metagraph for\n serving on TPU. Note that unsupported export modes such as EVAL will be\n ignored. For those modes, only a CPU model will be exported. Currently,\n export_to_tpu only supports PREDICT.\n export_to_cpu: If True, `export_saved_model()` exports a metagraph for\n serving on CPU.\n warm_start_from: Optional string filepath to a checkpoint or SavedModel to\n warm-start from, or a `tf.estimator.WarmStartSettings` object to fully\n configure warm-starting. If the string filepath is provided instead of\n a `WarmStartSettings`, then all variables are warm-started, and it is\n assumed that vocabularies and Tensor names are unchanged.\n embedding_config_spec: Optional EmbeddingConfigSpec instance to support\n using TPU embedding.\n export_saved_model_api_version: an integer: 1 or 2. 1 corresponds to V1,\n 2 corresponds to V2. (Defaults to V1). With\n V1, `export_saved_model()` adds rewrite() and TPUPartitionedCallOp() for\n user; while in v2, user is expected to add rewrite(),\n TPUPartitionedCallOp() etc in their model_fn.\n\n Raises:\n ValueError: `params` has reserved keys already.\n \"\"\"\n if config is None or not isinstance(config, tpu_config.RunConfig):\n raise ValueError(\n '`config` must be provided with type `tpu_config.RunConfig`')\n\n if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):\n raise ValueError('{} are reserved keys but existed in params {}.'.format(\n _RESERVED_PARAMS_KEYS, params))\n\n if use_tpu:\n # Perform some very basic validations. More validations will be found in\n # _InternalTPUContext.\n if train_batch_size is None:\n raise ValueError('`train_batch_size` cannot be `None`')\n util_lib.check_positive_integer(train_batch_size, 'train_batch_size')\n\n if (config.tpu_config.per_host_input_for_training is\n tpu_config.InputPipelineConfig.PER_SHARD_V1 and\n config.tpu_config.num_cores_per_replica):\n raise ValueError(\n 'Model parallelism only supports per host input for training. '\n 'Please adjust TPURunconfig.per_host_input_for_training.')\n\n if eval_batch_size is not None:\n util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')\n\n if predict_batch_size is not None:\n util_lib.check_positive_integer(predict_batch_size,\n 'predict_batch_size')\n\n if embedding_config_spec:\n if (config.tpu_config.per_host_input_for_training not in (\n tpu_config.InputPipelineConfig.PER_HOST_V1,\n tpu_config.InputPipelineConfig.PER_HOST_V2)):\n raise ValueError('Only PER_HOST_V1 and PER_HOST_V2 is supported when '\n 'using TPU Embedding; got {}.'.format(\n config.tpu_config.per_host_input_for_training))\n self._embedding_from_feature_columns = (\n embedding_config_spec.feature_columns is not None)\n\n if (not (use_tpu and eval_on_tpu) and embedding_config_spec and\n embedding_config_spec.partition_strategy == 'mod'):\n raise ValueError('Mod sharding of embedding tables not supported on '\n 'CPU.')\n _tpu_estimator_gauge.get_cell().set(True)\n # Verifies the model_fn signature according to Estimator framework.\n estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access\n # We cannot store config and params in this constructor as parent\n # constructor might change them, such as assigning a temp dir for\n # config.model_dir.\n model_function = self._augment_model_fn(model_fn, batch_axis)\n\n # Overwrite log_step_count_steps to disable TensorLoggingHook and\n # StepCounterHook from being created in Estimator. TPUEstimator already\n # added equivalent hooks in _augment_model_fn above.\n self._log_every_n_steps = config.log_step_count_steps\n config = config.replace(log_step_count_steps=None)\n\n # Passing non-None params as wrapped model_fn has it.\n params = params or {}\n super(TPUEstimator, self).__init__(\n model_fn=model_function,\n model_dir=model_dir,\n config=config,\n params=params,\n warm_start_from=warm_start_from)\n self._iterations_per_training_loop = util_lib.parse_iterations_per_loop(\n self._config.tpu_config.iterations_per_loop)\n # In absence of an explicit `log_every_n_secs` config, if the\n # `iterations_per_loop` value is specified as time in seconds, enable\n # logging every n secs based on the `iterations_per_loop` value. A trade-off\n # avoiding API change on the current release.\n # TODO(henrytan): add `log_every_n_secs` to RunConfig.\n if self._iterations_per_training_loop.unit == 'seconds':\n self._log_every_n_secs = self._iterations_per_training_loop.value\n self._log_every_n_steps = None\n elif self._iterations_per_training_loop.unit == 'count':\n if self._log_every_n_steps is not None:\n # Each session.run() lasts for iterations_per_loop. We can't log\n # in-between a session.run(), and we can only log after the\n # `iterations_per_loop` steps, so we can only approximate. If a user\n # requests to log every N steps, we actually want to roughly log every\n # N / `iterations_per_loop` steps to match the original intention.\n self._log_every_n_steps = (\n int(\n math.ceil(\n float(self._log_every_n_steps) /\n self._iterations_per_training_loop.value)))\n self._log_every_n_secs = None\n else:\n assert False, ('Invalid TPUConfig `iterations_per_loop` value. '\n 'Indicates a bug in `iterations_per_loop` '\n 'parsing.')\n\n # All properties passed to _InternalTPUContext are immutable.\n # pylint: disable=protected-access\n self._ctx = tpu_context._get_tpu_context(self._config, train_batch_size,\n eval_batch_size,\n predict_batch_size, use_tpu,\n eval_on_tpu, embedding_config_spec)\n\n self._export_to_cpu = export_to_cpu\n self._export_to_tpu = export_to_tpu\n\n if not (isinstance(export_saved_model_api_version,\n ExportSavedModelApiVersion)\n or export_saved_model_api_version == 1\n or export_saved_model_api_version == 2):\n raise ValueError('export_saved_model_api_version should be 1 or 2; '\n 'got {}.'.format(\n export_saved_model_api_version))\n self._export_saved_model_api_version = export_saved_model_api_version\n self._is_input_fn_invoked = None\n\n self._rendezvous = {}\n\n def _add_meta_graph_for_mode(self,\n builder,\n input_receiver_fn_map,\n checkpoint_path,\n save_variables=True,\n mode=model_fn_lib.ModeKeys.PREDICT,\n export_tags=None,\n check_variables=True,\n strip_default_attrs=True):\n if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:\n tf.compat.v1.logging.warn(\n 'TPUEstimator only handles mode PREDICT for exporting '\n 'when `export_to_tpu` is `True`; Mode {} will be ignored '\n 'for TPU.'.format(mode))\n\n if not self._export_to_cpu and not self._export_to_tpu:\n raise ValueError('One of export_to_cpu and export_to_tpu must be true.')\n\n if self._export_to_cpu:\n (super(TPUEstimator, self)._add_meta_graph_for_mode(\n builder,\n input_receiver_fn_map,\n checkpoint_path,\n save_variables,\n mode=mode,\n export_tags=export_tags,\n check_variables=check_variables,\n strip_default_attrs=strip_default_attrs))\n\n if self._export_to_tpu and mode == model_fn_lib.ModeKeys.PREDICT:\n input_receiver_fn_map = {\n _INFERENCE_ON_TPU_MODE: input_receiver_fn_map[mode]\n }\n export_tags = [tf.saved_model.SERVING, tf.saved_model.TPU]\n mode = _INFERENCE_ON_TPU_MODE\n\n # See b/110052256 for why `check_variables` is `False`.\n if not self._export_to_cpu:\n check_variables = save_variables = True\n else:\n check_variables = save_variables = False\n (super(TPUEstimator, self)._add_meta_graph_for_mode(\n builder,\n input_receiver_fn_map,\n checkpoint_path,\n save_variables=save_variables,\n mode=mode,\n export_tags=export_tags,\n check_variables=check_variables,\n strip_default_attrs=strip_default_attrs))\n\n def _call_model_fn(self, features, labels, mode, config):\n if mode == _INFERENCE_ON_TPU_MODE:\n context = tpu._TPUInferenceContext('tpu_inference', check_ops=False)\n try:\n context.Enter()\n if (\n (self._export_saved_model_api_version ==\n ExportSavedModelApiVersion.V1)\n or self._export_saved_model_api_version == 1):\n result = self._call_model_fn_for_inference(features, labels, mode,\n config)\n else:\n result = super(TPUEstimator,\n self)._call_model_fn(features, labels, mode, config)\n finally:\n context.Exit()\n return result\n else:\n return super(TPUEstimator, self)._call_model_fn(features, labels, mode,\n config)\n\n def _call_model_fn_for_inference(self, features, labels, mode, config):\n \"\"\"Wraps `_call_model_fn` for `export_saved_model`.\"\"\"\n if mode != _INFERENCE_ON_TPU_MODE:\n raise ValueError('mode must be {}; '\n 'got {}.'.format(_INFERENCE_ON_TPU_MODE, mode))\n return model_fn_inference_on_tpu(\n self._model_fn,\n features,\n labels,\n config,\n self._params,\n batch_config=None)\n\n def _create_global_step(self, graph):\n \"\"\"Creates a global step suitable for TPUs.\n\n Args:\n graph: The graph in which to create the global step.\n\n Returns:\n A global step `Tensor`.\n\n Raises:\n ValueError: if the global step tensor is already defined.\n \"\"\"\n return _create_global_step(graph)\n\n def _convert_train_steps_to_hooks(self, steps, max_steps):\n with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:\n if ctx.is_running_on_cpu():\n return super(TPUEstimator,\n self)._convert_train_steps_to_hooks(steps, max_steps)\n\n # On TPU.\n if steps is None and max_steps is None:\n raise ValueError(\n 'For TPU training, one of `steps` or `max_steps` must be set. '\n 'Cannot be both `None`.')\n\n # Estimator.train has explicit positiveness check.\n if steps is not None:\n util_lib.check_positive_integer(steps, 'Train steps')\n if max_steps is not None:\n util_lib.check_positive_integer(max_steps, 'Train max_steps')\n\n return [\n _TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)\n ]\n\n def _convert_eval_steps_to_hooks(self, steps):\n with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:\n if ctx.is_running_on_cpu():\n return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)\n\n if steps is None:\n raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')\n\n util_lib.check_positive_integer(steps, 'Eval steps')\n\n return [\n evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access\n num_evals=steps),\n _SetEvalIterationsHook(steps)\n ]\n\n def _call_input_fn(self, input_fn, mode, input_context=None):\n \"\"\"Calls the input function.\n\n Args:\n input_fn: The input function.\n mode: ModeKeys\n input_context: Optional instance of `tf.distribute.InputContext`.\n\n Returns:\n In TPU mode, returns an input_fn to be called later in model_fn.\n Otherwise, calls the input_fn and returns either fatures or\n (features, labels).\n\n Raises:\n ValueError: if input_fn takes invalid arguments or does not have `params`.\n \"\"\"\n input_fn_args = function_utils.fn_args(input_fn)\n config = self.config # a deep copy.\n kwargs = {}\n if 'params' in input_fn_args:\n kwargs['params'] = self.params # a deep copy.\n else:\n raise ValueError('input_fn ({}) does not include params argument, '\n 'required by TPUEstimator to pass batch size as '\n 'params[\"batch_size\"]'.format(input_fn))\n if 'config' in input_fn_args:\n kwargs['config'] = config\n\n if 'mode' in input_fn_args:\n kwargs['mode'] = mode\n\n if 'input_context' in input_fn_args:\n kwargs['input_context'] = input_context\n\n # Records the fact input_fn has been invoked.\n self._is_input_fn_invoked = True\n\n with self._ctx.with_mode(mode) as ctx:\n if (ctx.is_running_on_cpu() and\n ctx.is_input_slice_broadcast_to_all_cores()):\n raise ValueError('Invalid TPUConfig `eval_training_input_configuration`'\n ' value. SLICED mode only works on use_tpu = True.')\n # Setting the batch size in params first. This helps user to have same\n # input_fn for use_tpu=True/False.\n batch_size_for_input_fn = ctx.batch_size_for_input_fn\n if batch_size_for_input_fn is not None:\n _add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,\n batch_size_for_input_fn)\n\n # For export_saved_model, input_fn is never passed to Estimator. So,\n # `is_export_mode` must be False.\n if ctx.is_running_on_cpu(is_export_mode=False):\n with tf.compat.v1.device('/device:CPU:0'):\n return input_fn(**kwargs)\n\n # For TPU computation, input_fn should be invoked in a tf.while_loop for\n # performance. While constructing the tf.while_loop, the structure of\n # inputs returned by the `input_fn` needs to be recorded. The structure\n # includes whether features or labels is dict or single Tensor, dict keys,\n # tensor shapes, and dtypes. The recorded structure is used to create the\n # infeed dequeue ops, which must be wrapped and passed as a Fn, called\n # inside the TPU computation, as the TPU computation is wrapped inside a\n # tf.while_loop also. So, we either pass input_fn to model_fn or pass\n # dequeue_fn to model_fn. Here, `input_fn` is passed directly as\n # `features` in `model_fn` signature.\n def _input_fn(ctx):\n _add_item_to_params(kwargs['params'], _CTX_KEY, ctx)\n return input_fn(**kwargs)\n\n return _input_fn\n\n def _validate_features_in_predict_input(self, result):\n \"\"\"Skip the validation.\n\n For TPUEstimator, we do not need to check the result type. `_InputPipeline`\n has stronger check. Parent class's check generates confusing warning msg.\n\n Args:\n result: `features` returned by input_fn.\n \"\"\"\n pass\n\n def train(self,\n input_fn,\n hooks=None,\n steps=None,\n max_steps=None,\n saving_listeners=None):\n rendezvous = error_handling.ErrorRendezvous(num_sources=3)\n self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous\n try:\n return super(TPUEstimator, self).train(\n input_fn=input_fn,\n hooks=hooks,\n steps=steps,\n max_steps=max_steps,\n saving_listeners=saving_listeners)\n except Exception: # pylint: disable=broad-except\n rendezvous.record_error('training_loop', sys.exc_info())\n finally:\n rendezvous.record_done('training_loop')\n rendezvous.raise_errors()\n\n def evaluate(self,\n input_fn,\n steps=None,\n hooks=None,\n checkpoint_path=None,\n name=None):\n rendezvous = error_handling.ErrorRendezvous(num_sources=3)\n self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous\n try:\n return super(TPUEstimator, self).evaluate(\n input_fn,\n steps=steps,\n hooks=hooks,\n checkpoint_path=checkpoint_path,\n name=name)\n except Exception: # pylint: disable=broad-except\n rendezvous.record_error('evaluation_loop', sys.exc_info())\n finally:\n rendezvous.record_done('evaluation_loop')\n rendezvous.raise_errors()\n\n def predict(self,\n input_fn,\n predict_keys=None,\n hooks=None,\n checkpoint_path=None,\n yield_single_examples=True):\n rendezvous = error_handling.ErrorRendezvous(num_sources=3)\n self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous\n try:\n for result in super(TPUEstimator, self).predict(\n input_fn=input_fn,\n predict_keys=predict_keys,\n hooks=hooks,\n checkpoint_path=checkpoint_path,\n yield_single_examples=yield_single_examples):\n yield result\n except Exception: # pylint: disable=broad-except\n rendezvous.record_error('prediction_loop', sys.exc_info())\n finally:\n rendezvous.record_done('prediction_loop')\n rendezvous.raise_errors()\n\n rendezvous.record_done('prediction_loop')\n rendezvous.raise_errors()\n\n def _augment_model_fn(self, model_fn, batch_axis):\n \"\"\"Returns a new model_fn, which wraps the TPU support.\"\"\"\n\n def _model_fn(features, labels, mode, config, params):\n \"\"\"A Estimator `model_fn` for TPUEstimator.\"\"\"\n\n # `input_fn` is called in `train()`, `evaluate()`, and `predict()`,\n # but not in `export_saved_model()`.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if is_export_mode:\n if mode == _INFERENCE_ON_TPU_MODE:\n _add_item_to_params(params, _USE_TPU_KEY, True)\n mode = model_fn_lib.ModeKeys.PREDICT\n else:\n _add_item_to_params(params, _USE_TPU_KEY, False)\n\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n # examples_hook is added to training_hooks for both CPU and TPU\n # execution.\n if (self._log_every_n_steps is not None or\n self._log_every_n_secs is not None):\n examples_hook = ExamplesPerSecondHook(\n ctx.global_batch_size,\n # pylint:disable=g-long-ternary\n output_dir=(self.model_dir\n if not config or config.save_summary_steps else None),\n # pylint:enable=g-long-ternary\n every_n_steps=self._log_every_n_steps,\n every_n_secs=self._log_every_n_secs)\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n tf.compat.v1.logging.info('Running %s on CPU/GPU', mode)\n estimator_spec = model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n if (self._log_every_n_steps is not None or\n self._log_every_n_secs is not None):\n estimator_spec = estimator_spec._replace(\n training_hooks=estimator_spec.training_hooks + (examples_hook,))\n return estimator_spec\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n tpu_init_ops = []\n if ctx.embedding_config and mode == model_fn_lib.ModeKeys.TRAIN:\n dummy_table_variables, dummy_table_variables_init = (\n tpu_embedding_gradient.create_dummy_table_variables(\n ctx.embedding_config.tpu_embedding))\n ctx.embedding_config.dummy_table_variables = dummy_table_variables\n tpu_init_ops.append(dummy_table_variables_init)\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = tf.compat.v1.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n compile_op, loss, host_call, scaffold_fn, training_hooks = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n has_saver_hook = training_hooks and any(\n isinstance(hook, tf.compat.v1.train.CheckpointSaverHook)\n for hook in training_hooks)\n if ctx.embedding_config:\n g = tf.compat.v1.get_default_graph()\n table_to_config_dict = (\n ctx.embedding_config.tpu_embedding.table_to_config_dict)\n optimization_parameters = (\n ctx.embedding_config.tpu_embedding.optimization_parameters)\n if self._embedding_from_feature_columns:\n embedding_variable_name_by_table, slot_variable_names_by_table = (\n _tpu_estimator_embedding.get_full_variable_names(\n g, table_to_config_dict, optimization_parameters))\n else:\n embedding_variable_name_by_table = None\n slot_variable_names_by_table = None\n embedding_variables_and_ops = (\n ctx.embedding_config.tpu_embedding.create_variables_and_ops(\n embedding_variable_name_by_table,\n slot_variable_names_by_table))\n tpu_init_ops.extend(embedding_variables_and_ops.load_ops())\n # scaffold_fn must be called after variables for TPU embedding has\n # been created on CPU, as user might reinitialize those from some\n # checkpoint within scaffold_fn.\n scaffold = _get_scaffold(scaffold_fn)\n\n host_ops = host_call.create_tpu_hostcall()\n\n shutdown_hooks = []\n shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',\n 'reset_computation')\n if shutdown_mode:\n if shutdown_mode == 'shutdown_worker':\n finalizer_hooks = [\n session_support.ShutdownLameWorkers(),\n ]\n elif shutdown_mode == 'shutdown_all_workers':\n finalizer_hooks = [\n session_support.ShutdownAllWorkers(),\n ]\n elif shutdown_mode == 'reset_computation':\n finalizer_hooks = [\n session_support.ResetComputation(),\n ]\n elif not shutdown_mode:\n finalizer_hooks = []\n else:\n raise ValueError('Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE \"%s\"' %\n shutdown_mode)\n\n if finalizer_hooks:\n if has_saver_hook:\n saver = _NotSaver(\n 'No save on shutdown when there are user-defined '\n 'CheckpointSaverHooks')\n else:\n saver = None # Yes automatic save on shutdown.\n shutdown_hooks.append(\n session_support.GracefulShutdownHook(\n checkpoint_prefix=self.model_dir + '/model.ckpt',\n on_shutdown_hooks=finalizer_hooks,\n saver=saver))\n\n with tf.control_dependencies([loss]):\n global_step = tf.identity(tf.compat.v1.train.get_global_step())\n hooks = input_hooks + shutdown_hooks\n\n if ctx.feed_hook is not None:\n tf.compat.v1.logging.info(\n 'Use user implemented tpu infeed outfeed session hook class.')\n infeed_outfeed_session_hook_class = ctx.feed_hook\n else:\n infeed_outfeed_session_hook_class = TPUInfeedOutfeedSessionHook\n\n hooks.extend([\n infeed_outfeed_session_hook_class(\n ctx,\n enqueue_ops,\n host_ops,\n tpu_compile_op=compile_op,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator),\n rendezvous=self._rendezvous[mode],\n master=self._config.master,\n session_config=self._session_config,\n tpu_init_ops=tpu_init_ops,\n outfeed_every_n_steps=self._config.tpu_config\n .experimental_host_call_every_n_steps),\n InstallSignalHandlerHook()\n ])\n if _check_add_preemption_hook(self._config.cluster):\n hooks.extend(\n [preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])\n if (self._log_every_n_steps is not None or\n self._log_every_n_secs is not None):\n if self._iterations_per_training_loop.unit == 'count':\n examples_hook._set_steps_per_run( # pylint: disable=protected-access\n self._iterations_per_training_loop.value)\n hooks.append(\n tf.compat.v1.train.LoggingTensorHook(\n {\n 'loss': tf.identity(loss),\n 'step': global_step,\n },\n every_n_iter=self._log_every_n_steps,\n every_n_secs=self._log_every_n_secs))\n hooks.append(examples_hook)\n\n if training_hooks:\n hooks.extend(training_hooks)\n\n chief_hooks = []\n if (not has_saver_hook and\n (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps)):\n checkpoint_hook = tf.compat.v1.train.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n scaffold=scaffold,\n save_graph_def=self._config.checkpoint_save_graph_def)\n if self._iterations_per_training_loop.unit == 'count':\n checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access\n self._iterations_per_training_loop.value)\n else:\n # When estimating iterations_per_loop, set steps_per_run to an\n # arbitrarily high number to force checking the global step on\n # every call.\n # TODO(henrytan): refactor SecondOrStepTimer to do this more\n # explicitly.\n checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access\n 100000)\n chief_hooks.append(checkpoint_hook)\n else:\n tf.compat.v1.logging.info('Bypassing TPUEstimator hook')\n\n tf.compat.v1.summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with tf.control_dependencies([loss]):\n update_ops = _sync_variables_ops(ctx)\n if ctx.embedding_config:\n update_ops.extend(embedding_variables_and_ops.retrieve_ops())\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph(ctx)\n\n train_op = tf.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n compile_op, total_loss, host_calls, scaffold_fn, eval_hooks = (\n _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n if ctx.embedding_config:\n g = tf.compat.v1.get_default_graph()\n table_to_config_dict = (\n ctx.embedding_config.tpu_embedding.table_to_config_dict)\n if self._embedding_from_feature_columns:\n embedding_variable_name_by_table, _ = (\n _tpu_estimator_embedding.get_full_variable_names(\n g, table_to_config_dict))\n else:\n embedding_variable_name_by_table = None\n embedding_variables_and_ops = (\n ctx.embedding_config.tpu_embedding.create_variables_and_ops(\n embedding_variable_name_by_table))\n tpu_init_ops.extend(embedding_variables_and_ops.load_ops())\n # scaffold_fn must be called after variables for TPU embedding has\n # been created on CPU, as user might reinitialize those from some\n # checkpoint within scaffold_fn.\n scaffold = _get_scaffold(scaffold_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = tf.compat.v1.div(\n total_loss,\n tf.cast(iterations_per_loop_var, dtype=total_loss.dtype))\n\n with tf.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops(ctx)\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n\n eval_metrics = host_call_ret.get('eval_metrics', {})\n if eval_metrics:\n # Creates a dummy metric update_op for all metrics. Estimator\n # expects all metrics in `eval_metric_ops` have update_op and calls\n # them one by one. The real metric update_ops are invoked in a\n # separated thread. So, here give Estimator the dummy op for all\n # metrics.\n with tf.control_dependencies(internal_ops_to_run):\n dummy_update_op = tf.no_op()\n\n for k, v in eval_metrics.items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n else:\n # If no eval metrics are passed, create an identity node for the\n # loss and add `internal_ops_to_run` to its dependencies. So\n # `internal_ops_to_run` can be executed.\n with tf.control_dependencies(internal_ops_to_run):\n mean_loss = tf.identity(mean_loss)\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n tpu_compile_op=compile_op,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator),\n rendezvous=self._rendezvous[mode],\n master=self._config.evaluation_master,\n session_config=self._session_config,\n tpu_init_ops=tpu_init_ops)\n ] + input_hooks\n\n if _check_add_preemption_hook(self._config.cluster):\n hooks.extend(\n [preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])\n\n if eval_hooks:\n hooks.extend(eval_hooks)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n (compile_op, dummy_predict_op, host_calls, scaffold_fn,\n prediction_hooks) = _predict_on_tpu_system(ctx, model_fn_wrapper,\n dequeue_fn)\n scaffold = _get_scaffold(scaffold_fn)\n with tf.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops(ctx)\n with tf.control_dependencies(internal_ops_to_run):\n dummy_predict_op = tf.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions,\n message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with tf.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(\n ctx,\n enqueue_ops,\n host_ops,\n rendezvous=self._rendezvous[mode],\n tpu_compile_op=compile_op,\n master=self._config.master,\n session_config=self._session_config),\n ] + input_hooks\n\n if prediction_hooks:\n hooks.extend(prediction_hooks)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)\n\n return _model_fn\n\n\ndef _check_add_preemption_hook(cluster):\n return (tpu_cluster_resolver.is_running_in_gce() and cluster and isinstance(\n cluster, tf.distribute.cluster_resolver.TPUClusterResolver) and\n cluster._cloud_tpu_client.api_available())\n\n\ndef _export_output_to_tensors(export_output):\n \"\"\"Get a list of `Tensors` used in `export_output`.\n\n Args:\n export_output: an `ExportOutput` object such as `ClassificationOutput`,\n `RegressionOutput`, or `PredictOutput`.\n\n Returns:\n a list of tensors used in export_output.\n\n Raises:\n ValueError: if `export_output` is not one of `ClassificationOutput`,\n `RegressionOutput`, or `PredictOutput`.\n \"\"\"\n if isinstance(export_output, export_output_lib.ClassificationOutput):\n return [export_output.scores, export_output.classes]\n elif isinstance(export_output, export_output_lib.RegressionOutput):\n return [export_output.value]\n elif isinstance(export_output, export_output_lib.PredictOutput):\n return list(export_output.outputs.values())\n else:\n raise ValueError(\n '`export_output` must be have type `ClassificationOutput`, '\n '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))\n\n\ndef _clone_export_output_with_tensors(export_output, tensors):\n \"\"\"Clones `export_output` but with new `tensors`.\n\n Args:\n export_output: an `ExportOutput` object such as `ClassificationOutput`,\n `RegressionOutput`, or `PredictOutput`.\n tensors: a list of `Tensors` used to construct a new `export_output`.\n\n Returns:\n A dict similar to `export_output` but with `tensors`.\n\n Raises:\n ValueError: if `export_output` is not one of `ClassificationOutput`,\n `RegressionOutput`, or `PredictOutput`.\n \"\"\"\n if isinstance(export_output, export_output_lib.ClassificationOutput):\n if len(tensors) != 2:\n raise ValueError('tensors must be of length 2; '\n 'got {}.'.format(len(tensors)))\n return export_output_lib.ClassificationOutput(*tensors)\n elif isinstance(export_output, export_output_lib.RegressionOutput):\n if len(tensors) != 1:\n raise ValueError('tensors must be of length 1; '\n 'got {}'.format(len(tensors)))\n return export_output_lib.RegressionOutput(*tensors)\n elif isinstance(export_output, export_output_lib.PredictOutput):\n return export_output_lib.PredictOutput(\n dict(zip(export_output.outputs.keys(), tensors)))\n else:\n raise ValueError(\n '`export_output` must be have type `ClassificationOutput`, '\n '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))\n\n\ndef _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n \"\"\"Executes `model_fn_wrapper` multiple times on all TPU shards.\"\"\"\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n (single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks\n ) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)\n\n @tpu_function.on_device_training_loop\n def multi_tpu_eval_steps_on_single_shard(replica_id):\n # `tpu.split_compile_and_shard()` splits and passes input for each\n # replica as an array. As so, correctly reshape the input to be a\n # scalar.\n replica_id = tf.reshape(replica_id, [])\n with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access\n return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,\n [_ZERO_LOSS])\n\n # Add input that represents id for each replica in sync so that\n # _TPUEstimatorReplicaContext can be correctly entered during\n # replicated computation.\n replica_id_inputs = []\n replica_id_inputs.append([tf.constant(i) for i in range(ctx.num_replicas)])\n\n (\n compile_op,\n loss,\n ) = tpu.split_compile_and_shard(\n multi_tpu_eval_steps_on_single_shard,\n inputs=replica_id_inputs,\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n loss = loss[0]\n return (compile_op, loss, host_calls, captured_scaffold_fn,\n captured_eval_hooks.get())\n\n\ndef _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n \"\"\"Executes `model_fn_wrapper` multiple times on all TPU shards.\"\"\"\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n (single_tpu_train_step, host_call, captured_scaffold_fn,\n captured_training_hooks) = (\n model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))\n\n @tpu_function.on_device_training_loop\n def multi_tpu_train_steps_on_single_shard(replica_id):\n # `tpu.split_compile_and_shard()` splits and passes input for each\n # replica as an array. As so, correctly reshape the input to be a\n # scalar.\n replica_id = tf.reshape(replica_id, [])\n with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access\n outputs = training_loop.while_loop(\n lambda i, loss: i < iterations_per_loop_var,\n lambda i, loss: [i + 1, single_tpu_train_step(i)],\n inputs=[0, _INITIAL_LOSS])\n return outputs[1:]\n\n # Add input that represents id for each replica in sync so that\n # _TPUEstimatorReplicaContext can be correctly entered during\n # replicated computation.\n replica_id_inputs = []\n replica_id_inputs.append([tf.constant(i) for i in range(ctx.num_replicas)])\n\n (compile_op, loss) = tpu.split_compile_and_shard(\n multi_tpu_train_steps_on_single_shard,\n inputs=replica_id_inputs,\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n loss = loss[0]\n return (compile_op, loss, host_call, captured_scaffold_fn,\n captured_training_hooks.get())\n\n\ndef _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n \"\"\"Executes `model_fn_wrapper` multiple times on all TPU shards.\"\"\"\n (single_tpu_predict_step, host_calls, captured_scaffold_fn,\n captured_predict_hooks\n ) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)\n\n @tpu_function.on_device_training_loop\n def multi_tpu_predict_steps_on_single_shard(replica_id):\n # `tpu.split_compile_and_shard()` splits and passes input for each\n # replica as an array. As so, correctly reshape the input to be a\n # scalar.\n replica_id = tf.reshape(replica_id, [])\n with tpu_context._TPUEstimatorReplicaContext(replica_id): # pylint: disable=protected-access\n\n def cond(scalar_stopping_signal):\n return tf.math.logical_not(\n _StopSignals.should_stop(scalar_stopping_signal))\n\n inputs = [_StopSignals.NON_STOPPING_SIGNAL]\n outputs = training_loop.while_loop(\n cond, single_tpu_predict_step, inputs=inputs, name=b'loop')\n return outputs\n\n # Add input that represents id for each replica in sync so that\n # _TPUEstimatorReplicaContext can be correctly entered during\n # replicated computation.\n replica_id_inputs = []\n replica_id_inputs.append([tf.constant(i) for i in range(ctx.num_replicas)])\n (\n compile_op,\n dummy_predict_op,\n ) = tpu.split_compile_and_shard(\n multi_tpu_predict_steps_on_single_shard,\n inputs=replica_id_inputs,\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n dummy_predict_op = dummy_predict_op[0]\n return (compile_op, dummy_predict_op, host_calls, captured_scaffold_fn,\n captured_predict_hooks.get())\n\n\ndef _wrap_computation_in_while_loop(device, op_fn):\n \"\"\"Wraps the ops generated by `op_fn` in tf.while_loop.\"\"\"\n\n def computation(i):\n with tf.control_dependencies(op_fn()):\n return i + 1\n\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n # By setting parallel_iterations=1, the parallel execution in while_loop is\n # basically turned off.\n with tf.compat.v1.device(device):\n iterations = tf.identity(iterations_per_loop_var)\n return tf.compat.v1.while_loop(\n lambda i: i < iterations,\n computation, [tf.constant(0)],\n parallel_iterations=1)\n\n\ndef _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):\n \"\"\"Wraps the ops generated by `op_fn` in tf.while_loop.\"\"\"\n\n def cond(scalar_stopping_signal):\n return tf.math.logical_not(_StopSignals.should_stop(scalar_stopping_signal))\n\n def computation(unused_scalar_stopping_signal):\n return_value = op_fn()\n execute_ops = return_value['ops']\n signals = return_value['signals']\n with tf.control_dependencies(execute_ops):\n return _StopSignals.as_scalar_stopping_signal(signals)\n\n # By setting parallel_iterations=1, the parallel execution in while_loop is\n # basically turned off.\n with tf.compat.v1.device(device):\n return tf.compat.v1.while_loop(\n cond,\n computation, [_StopSignals.NON_STOPPING_SIGNAL],\n parallel_iterations=1)\n\n\ndef _validate_tpu_training_graph(ctx):\n \"\"\"Validate graph before running distributed training.\n\n Args:\n ctx: A `_InternalTPUContext` instance with mode.\n\n Raises:\n ValueError: If the graph seems invalid for running on device\n \"\"\"\n if control_flow_util.ENABLE_CONTROL_FLOW_V2:\n return # b/124241278\n\n operations = tf.compat.v1.get_default_graph().get_operations()\n\n # Check if there is atleast one CrossReplicaSum operation in the graph\n # This should be introduced by using the CrossShardOptimizer wrapper\n cross_replica_sum_ops = [\n o for o in operations if o.type == _CROSS_REPLICA_SUM_OP\n ]\n if not cross_replica_sum_ops and ctx.num_replicas > 1:\n raise ValueError(\n 'CrossShardOptimizer must be used for model training on TPUs.')\n\n\nclass _CapturedObject(object):\n \"\"\"A placeholder to capture an object.\n\n This is useful when we need to capture a Python object in the Tensorflow\n control flow body function and use it outside the control flow.\n \"\"\"\n\n def __init__(self):\n self._object = None\n self._captured = False\n\n def capture(self, o):\n if self._captured:\n raise RuntimeError(\n 'InternalError: Object can capture only once. Please file bug.')\n\n self._captured = True\n self._object = o\n\n def get(self):\n if not self._captured:\n raise RuntimeError(\n 'InternalError: Object is not captured properly before `get`. '\n 'Please file bug.')\n return self._object\n\n\ndef _get_scaffold(captured_scaffold_fn):\n \"\"\"Retrieves the Scaffold from `captured_scaffold_fn`.\"\"\"\n with _CapturingContext(message='Inside scaffold_fn'):\n scaffold_fn = captured_scaffold_fn.get()\n if scaffold_fn:\n scaffold = scaffold_fn()\n if scaffold is None:\n raise ValueError(\n 'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')\n else:\n scaffold = None\n\n if scaffold:\n wrapped_finalize = scaffold.finalize\n\n def _finalize():\n with _CapturingContext('Inside Scaffold.finalize'):\n wrapped_finalize()\n\n scaffold.finalize = _finalize\n return scaffold\n\n\nclass _CapturingContext(control_flow_ops.ControlFlowContext):\n \"\"\"Tracks references to Tensors defined in TPU replication.\"\"\"\n\n def __init__(self, message):\n control_flow_ops.ControlFlowContext.__init__(self)\n self._message = message\n\n def to_control_flow_context_def(self, context_def, export_scope=None):\n # pylint: disable=useless-super-delegation\n # NOTE(slebedev): the method is required by `ControlFlowContext`.\n super(_CapturingContext,\n self).to_control_flow_context_def(context_def, export_scope)\n\n def AddOp(self, op): # pylint: disable=invalid-name\n for c in op.inputs:\n if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access\n raise ValueError('{}: Op {} depends on TPU computation {}, '\n 'which is not allowed.'.format(self._message, op, c))\n\n def AddValue(self, value):\n self.AddOp(value.op)\n return value\n\n def __enter__(self):\n # pylint: disable=protected-access\n self._g = tf.compat.v1.get_default_graph()\n self._old = self._g._get_control_flow_context()\n self._g._set_control_flow_context(self)\n # pylint: enable=protected-access\n\n def __exit__(self, _, __, ___): # pylint: disable=invalid-name\n self._g._set_control_flow_context(self._old) # pylint: disable=protected-access\n\n\nclass _Inputs(object):\n \"\"\"A data structure representing the input_fn returned values.\n\n This also supports the returned value from input_fn as `Dataset`.\n \"\"\"\n\n def __init__(self, features=None, labels=None, dataset=None, signals=None):\n if dataset is not None and (features is not None or labels is not None or\n signals is not None):\n raise RuntimeError('Internal Error: Either (features and labels) or '\n 'dataset should be provided, not both. Please file '\n 'bug')\n\n self._features = features\n self._labels = labels\n self._signals = signals\n\n self._dataset = dataset\n self._iterator = None\n\n @staticmethod\n def from_input_fn(return_values):\n \"\"\"Returns an `_Inputs` instance according to `input_fn` return value.\"\"\"\n if isinstance(return_values, tf.compat.v2.data.Dataset):\n dataset = return_values\n return _Inputs(dataset=dataset)\n\n features, labels = _Inputs._parse_inputs(return_values)\n return _Inputs(features, labels)\n\n @staticmethod\n def _parse_inputs(return_values):\n if isinstance(return_values, tuple):\n features, labels = return_values\n else:\n features, labels = return_values, None\n return features, labels\n\n @property\n def is_dataset(self):\n \"\"\"Returns True if the return value from input_fn is Dataset.\"\"\"\n return self._dataset is not None\n\n def dataset_initializer(self):\n \"\"\"Returns the dataset's initializer.\n\n The initializer must be run before calling `features_and_labels`.\n \"\"\"\n self._iterator = tf.compat.v1.data.make_initializable_iterator(\n self._dataset)\n return self._iterator.initializer\n\n def features_and_labels(self):\n \"\"\"Gets `features` and `labels`.\"\"\"\n if self.is_dataset:\n if self._iterator is None:\n raise RuntimeError('Internal error: Must run dataset_initializer '\n 'before calling features_and_labels(). Please file '\n 'a bug!')\n return _Inputs._parse_inputs(self._iterator.get_next())\n\n return (self._features, self._labels)\n\n def signals(self):\n return self._signals\n\n @property\n def dataset(self):\n return self._dataset\n\n\nclass _InputsWithStoppingSignals(_Inputs):\n \"\"\"Inputs with `_StopSignals` inserted into the dataset.\"\"\"\n\n def __init__(self,\n dataset,\n batch_size,\n add_padding=False,\n num_invocations_per_step=1):\n\n assert dataset is not None\n user_provided_dataset = dataset.map(\n _InputsWithStoppingSignals.insert_stopping_signal(\n stop=False, batch_size=batch_size, add_padding=add_padding))\n if num_invocations_per_step == 1:\n final_batch_dataset = dataset.take(1).map(\n _InputsWithStoppingSignals.insert_stopping_signal(\n stop=True, batch_size=batch_size, add_padding=add_padding))\n else:\n # We append (2 * num_invocations_per_step - 1) batches for exhausting the\n # user_provided_dataset and stop properly.\n # For example, if num_invocations_per_step is 2, we append 3 additional\n # padding batches: b1, b2, b3.\n # If user_provided_dataset contains two batches: a1, a2\n # Step 1: [a1, a2]\n # Step 2: [b1, b2] -> STOP\n # If user_provided_dataset contains three batches: a1, a2, a3.\n # The training loops:\n # Step 1: [a1, a2]\n # Step 2: [a3, b1]\n # Step 3: [b2, b3] -> STOP.\n final_batch_dataset = dataset.take(1).map(\n _InputsWithStoppingSignals.insert_stopping_signal(\n stop=True, batch_size=batch_size, add_padding=add_padding))\n final_batch_dataset = final_batch_dataset.repeat(\n 2 * num_invocations_per_step - 1)\n\n def _set_mask(data_dict):\n signals = data_dict['signals']\n signals['padding_mask'] = tf.compat.v1.ones_like(\n signals['padding_mask'])\n data_dict['signals'] = signals\n return data_dict\n\n # Mask out the extra batch.\n final_batch_dataset = final_batch_dataset.map(_set_mask)\n\n dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)\n\n super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)\n self._current_inputs = None\n\n def features_and_labels(self):\n if self._current_inputs is not None:\n raise RuntimeError(\n 'Internal Error: The previous inputs have not been properly '\n 'consumed. First call features_and_labels, then call signals.')\n\n inputs_with_signals = self._iterator.get_next()\n features = inputs_with_signals['features']\n labels = inputs_with_signals.get('labels')\n\n self._current_inputs = inputs_with_signals\n return features, labels\n\n def signals(self):\n \"\"\"Returns the `Signals` from `_Inputs`.\"\"\"\n if self._current_inputs is None:\n raise RuntimeError(\n 'Internal Error: The current inputs have not been properly '\n 'generated. First call features_and_labels, then call signals.')\n signals = self._current_inputs['signals']\n self._current_inputs = None\n return signals\n\n @staticmethod\n def insert_stopping_signal(stop, batch_size, add_padding=False):\n \"\"\"Inserts stopping_signal into dataset via _map_fn.\n\n Here we change the data structure in the dataset, such that the return value\n is a dictionary now and `features`, `labels`, and `signals` are three\n distinguished keys in that dict. This provides a better structure, which\n eases the process to decompose the inputs (see `features_and_labels`).\n\n Args:\n stop: bool, state of current stopping signals.\n batch_size: int, batch size.\n add_padding: bool, whether to pad the tensor to full batch size.\n\n Returns:\n A map_fn passed to dataset.map API.\n \"\"\"\n\n def _map_fn(*args):\n \"\"\"The map fn to insert signals.\"\"\"\n if len(args) == 1:\n # Unpack the single Tensor/dict argument as features. This is required\n # for the input_fn returns no labels.\n args = args[0]\n features, labels = _Inputs._parse_inputs(args)\n new_input_dict = {}\n\n if add_padding:\n padding_mask, features, labels = (\n _PaddingSignals.pad_features_and_labels(features, labels,\n batch_size))\n\n new_input_dict['features'] = features\n if labels is not None:\n new_input_dict['labels'] = labels\n\n else:\n new_input_dict['features'] = features\n if labels is not None:\n new_input_dict['labels'] = labels\n padding_mask = None\n\n new_input_dict['signals'] = _StopSignals(\n stop=stop, batch_size=batch_size,\n padding_mask=padding_mask).as_dict()\n\n return new_input_dict\n\n return _map_fn\n\n\nclass _StopSignals(object):\n \"\"\"Signals class holding all logic to handle TPU stopping condition.\"\"\"\n\n NON_STOPPING_SIGNAL = False\n STOPPING_SIGNAL = True\n\n def __init__(self, stop, batch_size, padding_mask=None):\n self._stop = stop\n self._batch_size = batch_size\n self._padding_mask = padding_mask\n\n def as_dict(self):\n \"\"\"Returns the signals as Python dict.\"\"\"\n shape = [self._batch_size, 1]\n dtype = tf.dtypes.bool\n\n if self._stop:\n stopping = tf.ones(shape=shape, dtype=dtype)\n else:\n stopping = tf.zeros(shape=shape, dtype=dtype)\n\n signals = {'stopping': stopping}\n if self._padding_mask is not None:\n signals['padding_mask'] = self._padding_mask\n return signals\n\n @staticmethod\n def as_scalar_stopping_signal(signals):\n return tf.identity(signals['stopping'][0][0])\n\n @staticmethod\n def should_stop(scalar_stopping_signal):\n \"\"\"Detects whether scalar_stopping_signal indicates stopping.\"\"\"\n if isinstance(scalar_stopping_signal, tf.Tensor):\n # STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF\n # way to express the bool check whether scalar_stopping_signal is True.\n return tf.math.logical_and(scalar_stopping_signal,\n _StopSignals.STOPPING_SIGNAL)\n else:\n # For non Tensor case, it is used in SessionRunHook. So, we cannot modify\n # the graph anymore. Here, we use pure Python.\n return bool(scalar_stopping_signal)\n\n\nclass _PaddingSignals(object):\n \"\"\"Signals class holding all logic to handle padding.\"\"\"\n\n @staticmethod\n def pad_features_and_labels(features, labels, batch_size):\n \"\"\"Pads out the batch dimension of features and labels.\"\"\"\n real_batch_size = tf.compat.v1.shape(\n _PaddingSignals._find_any_tensor(features))[0]\n\n batch_size_tensor = tf.constant(batch_size, tf.dtypes.int32)\n\n check_greater = tf.compat.v1.debugging.assert_greater_equal(\n batch_size_tensor,\n real_batch_size,\n data=(batch_size_tensor, real_batch_size),\n message='The real batch size should not be greater than batch_size.')\n\n with tf.control_dependencies([check_greater]):\n missing_count = batch_size_tensor - real_batch_size\n\n def pad_single_tensor(tensor):\n \"\"\"Pads out the batch dimension of a tensor to the complete batch_size.\"\"\"\n rank = len(tensor.shape)\n assert rank > 0\n padding = tf.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))\n padded_shape = (batch_size,) + tuple(tensor.shape[1:])\n padded_tensor = tf.compat.v1.pad(tensor, padding)\n padded_tensor.set_shape(padded_shape)\n return padded_tensor\n\n def nest_pad(tensor_or_dict):\n return tf.nest.map_structure(pad_single_tensor, tensor_or_dict)\n\n features = nest_pad(features)\n if labels is not None:\n labels = nest_pad(labels)\n\n padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,\n batch_size)\n\n return padding_mask, features, labels\n\n @staticmethod\n def slice_tensor_or_dict(tensor_or_dict, signals):\n \"\"\"Slice the real Tensors according to padding mask in signals.\"\"\"\n\n padding_mask = signals['padding_mask']\n batch_size = tf.compat.v1.shape(padding_mask)[0]\n\n def verify_batch_size(tensor):\n check_batch_size = tf.math.equal(batch_size, tensor.shape[0])\n with tf.control_dependencies([check_batch_size]):\n return tf.identity(tensor)\n\n def slice_single_tensor(tensor):\n rank = len(tensor.shape)\n assert rank > 0\n real_batch_size = batch_size - tf.math.reduce_sum(padding_mask)\n return verify_batch_size(tensor)[0:real_batch_size]\n\n # As we split the Tensors to all TPU cores and concat them back, it is\n # important to ensure the real data is placed before padded ones, i.e.,\n # order is preserved. By that, the sliced padding mask should have all 0's.\n # If this assertion failed, # the slice logic here would not hold.\n sliced_padding_mask = slice_single_tensor(padding_mask)\n assert_padding_mask = tf.math.equal(\n tf.math.reduce_sum(sliced_padding_mask), 0)\n\n with tf.control_dependencies([assert_padding_mask]):\n should_stop = _StopSignals.should_stop(\n _StopSignals.as_scalar_stopping_signal(signals))\n\n is_full_batch = tf.math.equal(tf.math.reduce_sum(padding_mask), 0)\n\n def slice_fn(tensor):\n # If the current batch is full batch or part of stopping signals, we do\n # not need to slice to save performance.\n return tf.compat.v1.cond(\n tf.math.logical_or(should_stop, is_full_batch),\n (lambda: verify_batch_size(tensor)),\n (lambda: slice_single_tensor(tensor)))\n\n return tf.nest.map_structure(slice_fn, tensor_or_dict)\n\n @staticmethod\n def _find_any_tensor(batch_features):\n tensors = [\n x for x in tf.nest.flatten(batch_features) if isinstance(x, tf.Tensor)\n ]\n if not tensors:\n raise ValueError('Cannot find any Tensor in features dict.')\n return tensors[0]\n\n @staticmethod\n def _padding_mask(real_batch_size, missing_count, batch_size):\n padding_mask = tf.concat([\n tf.zeros((real_batch_size,), dtype=tf.dtypes.int32),\n tf.ones((missing_count,), dtype=tf.dtypes.int32)\n ],\n axis=0)\n padding_mask.set_shape((batch_size,))\n return padding_mask\n\n\ndef _verify_cross_hosts_transfer_size(tensor_dict, message):\n total_size = 0\n tensor_structure = {}\n for key, tensor in tensor_dict.items():\n shape = tensor.shape\n size = np.product(shape) * tensor.dtype.size\n tensor_structure[key] = shape\n total_size += size\n if total_size >= _ONE_GIGABYTE:\n raise ValueError(\n '{} The transfer size is larger than the protobuf limit. Please '\n 'consider to use Tensors with smaller shapes or reduce batch '\n 'size. Given:\\n'\n '{}'.format(\n message, '\\n'.join([\n ' -- Key: {}, Shape: {}'.format(k, v)\n for k, v in tensor_structure.items()\n ])))\n\n\ndef _add_item_to_params(params, key, value):\n \"\"\"Adds a new item into `params`.\"\"\"\n if hasattr(params, 'set_hparam'):\n # For HParams, we need to use special API.\n if key in params:\n params.set_hparam(key, value)\n else:\n params.add_hparam(key, value)\n else:\n # Now params is Python dict.\n params[key] = value\n\n\ndef export_estimator_savedmodel(estimator,\n export_dir_base,\n serving_input_receiver_fn,\n assets_extra=None,\n as_text=False,\n checkpoint_path=None):\n \"\"\"Export `Estimator` trained model for TPU inference.\n\n Args:\n estimator: `Estimator` with which model has been trained.\n export_dir_base: A string containing a directory in which to create\n timestamped subdirectories containing exported SavedModels.\n serving_input_receiver_fn: A function that takes no argument and returns a\n `ServingInputReceiver` or `TensorServingInputReceiver`.\n assets_extra: A dict specifying how to populate the assets.extra directory\n within the exported SavedModel, or `None` if no extra assets are needed.\n as_text: whether to write the SavedModel proto in text format.\n checkpoint_path: The checkpoint path to export. If `None` (the default),\n the most recent checkpoint found within the model directory is chosen.\n\n Returns:\n The string path to the exported directory.\n \"\"\"\n # `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use\n # `estimator.config`.\n config = tpu_config.RunConfig(model_dir=estimator.model_dir)\n est = TPUEstimator(\n estimator._model_fn, # pylint: disable=protected-access\n config=config,\n params=estimator.params,\n use_tpu=True,\n train_batch_size=2048, # Does not matter.\n eval_batch_size=2048, # Does not matter.\n )\n return est.export_saved_model(export_dir_base, serving_input_receiver_fn,\n assets_extra, as_text, checkpoint_path)\n\n\ndef model_fn_inference_on_tpu(model_fn,\n features,\n labels=None,\n config=None,\n params=None,\n batch_config=None):\n \"\"\"Convenience wrapper for export_saved_model API v2 for a model_fn.\n WARNING:THIS METHOD IS DEPRECATED AND NOT PART OF THE APIS.\n\n Make sure to set\n `export_saved_model_api_version=tpu_estimator.ExportSavedModelApiVersion.V2`\n when initializing TPUEstimator (default API version is V1). This is because\n 1) `tpu.rewrite` (or `tpu.compile`) shouldn't be called in a nested way\n (otherwise validation will throw error like\n \"NotImplementedError: tpu_shard_context cannot be nested.\")\n 2) When using V1 API, Estimator calls `tpu.rewrite` so\n using `model_fn_inference_on_tpu` will trigger a nested call.\n When using V2 API, users of Estimator needs to call `tpu.rewrite` (which\n the wrapper does).\n\n It attempts to execute the entire model function on the TPU for prediction.\n Note that this does not support features which are SparseTensors. If you have\n SparseTensor features, consider partitioning your model function further and\n use inference_on_tpu.\n\n Args:\n model_fn: the model_fn for which we want to inference on TPU.\n features: a tensor or dict of tensors, serves as the feature inputs to the\n model.\n labels: a tensor or dict of tensors, serves as the labels inputs to the\n model.\n config: auxiliary config to the Estimator.\n params: hparams that we want to pass to the model_fn.\n batch_config: a named tuple to wrap the inference batching configuration\n inputs.\n\n Returns:\n An EstimatorSpec containing the outputs in export_outputs and predictions.\n \"\"\"\n computation, capture = _build_computation_for_inference(\n model_fn, labels, config, params)\n tensors = call_computation(features, computation, batch_config=batch_config)\n estimator_spec, export_outputs_dict, predictions_dict, none_indices = (\n capture.get())\n predictions_list = tensors[:len(predictions_dict)]\n export_outputs_list_without_none = tensors[len(predictions_dict):]\n\n # Reinsert `None`s which we've taken out in\n # `_build_computation_for_inference()`.\n export_outputs_list = []\n while none_indices or export_outputs_list_without_none:\n if none_indices and none_indices[0] == len(export_outputs_list):\n export_outputs_list.append(None)\n none_indices.pop(0)\n else:\n export_outputs_list.append(export_outputs_list_without_none.pop(0))\n\n # Reconstruct `export_outputs` with updated tensors.\n new_export_outputs_dict = tf.nest.pack_sequence_as(export_outputs_dict,\n export_outputs_list)\n export_outputs = estimator_spec.export_outputs\n new_export_outputs = collections.OrderedDict(\n (k, _clone_export_output_with_tensors(export_outputs[k], v))\n for k, v in six.iteritems(new_export_outputs_dict))\n # Reconstruct `predictions` with updated tensors.\n new_predictions = tf.nest.pack_sequence_as(predictions_dict, predictions_list)\n if (len(new_predictions) == 1 and\n _KEY_WHEN_PREDICTIONS_IS_A_TENSOR in new_predictions):\n new_predictions = new_predictions[_KEY_WHEN_PREDICTIONS_IS_A_TENSOR]\n\n return estimator_spec._replace(\n export_outputs=new_export_outputs, predictions=new_predictions)\n\n\ndef _build_computation_for_inference(model_fn, labels, config, params):\n \"\"\"Builds the computation with calls the model_fn for inference.\"\"\"\n capture = _CapturedObject()\n\n def computation(computation_input):\n \"\"\"Computation to be passed to `TPUPartitionedCall()`.\"\"\"\n tpu_computation, tpu_capture = _build_tpu_computation_for_inference(\n model_fn, computation_input, labels, config, params)\n\n tensors_on_cpu = tf.compat.v1.tpu.rewrite(tpu_computation)\n tpu.prune_unconnected_ops_from_xla(tf.compat.v1.get_default_graph())\n\n (estimator_spec, export_outputs_dict, export_outputs_list,\n predictions_dict) = (\n tpu_capture.get())\n predictions_list = tensors_on_cpu[:len(predictions_dict)]\n export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):]\n\n # Reconstruct tensors used in export_outputs, with TPU tensors replaced\n # with their CPU counterpart returned from `rewrite_for_inference()`.\n # `function.Defun()` does not like `None`s in return values, so we leave\n # `None`s out but record their positions for later reconstruction.\n export_outputs_list_without_none = []\n none_indices = []\n for i, t in enumerate(export_outputs_list):\n if t is None:\n none_indices.append(i)\n else:\n export_outputs_list_without_none.append(\n export_outputs_tpu_on_cpu_list.pop(0))\n\n capture.capture(\n (estimator_spec, export_outputs_dict, predictions_dict, none_indices))\n return predictions_list + export_outputs_list_without_none\n\n return computation, capture\n\n\ndef _build_tpu_computation_for_inference(model_fn, features, labels, config,\n params):\n \"\"\"Builds the TPU computation for inference on TPU.\"\"\"\n capture = _CapturedObject()\n\n def computation():\n \"\"\"Compute tpu tensors used in export_outputs.\n\n Passed to rewrite_for_inference so that model_fn will be called under\n the rewriting contexts. Only tpu tensors are returned, but export_outputs\n and scaffold are captured.\n\n Returns:\n A list of Tensors used in export_outputs and not marked for\n outside_compilation.\n \"\"\"\n # We should only call model fn once and it should be inside `computation`\n # so that building the graph will happen under `rewrite_for_inference`.\n\n model_fn_args = function_utils.fn_args(model_fn)\n kwargs = {}\n # Makes deep copy with `config` and params` in case user mutates them.\n if 'labels' in model_fn_args:\n kwargs['labels'] = labels\n if 'mode' in model_fn_args:\n kwargs['mode'] = model_fn_lib.ModeKeys.PREDICT\n if 'config' in model_fn_args:\n kwargs['config'] = config\n if 'params' in model_fn_args:\n kwargs['params'] = params\n estimator_spec = model_fn(features, **kwargs)\n\n # We pick the TPU tensors out from `export_output` and later return them\n # from `computation` for rewriting.\n export_outputs_dict = collections.OrderedDict(\n (k, _export_output_to_tensors(v))\n for k, v in six.iteritems(estimator_spec.export_outputs))\n export_outputs_list = tf.nest.flatten(export_outputs_dict)\n export_outputs_tpu_list = [t for t in export_outputs_list if t is not None]\n\n if isinstance(estimator_spec.predictions, dict):\n predictions_dict = collections.OrderedDict(\n (k, v) for k, v in six.iteritems(estimator_spec.predictions))\n else:\n predictions_dict = {\n _KEY_WHEN_PREDICTIONS_IS_A_TENSOR: estimator_spec.predictions\n }\n predictions_list = tf.nest.flatten(predictions_dict)\n\n # We cannot return everything we want through the return values, so\n # capture the rest here for later use.\n capture.capture((estimator_spec, export_outputs_dict, export_outputs_list,\n predictions_dict))\n return predictions_list + export_outputs_tpu_list\n\n return computation, capture\n\n\ndef inference_on_tpu(computation,\n inputs_to_tpu,\n num_batch_threads,\n max_batch_size,\n batch_timeout_micros,\n allowed_batch_sizes=None,\n max_enqueued_batches=100):\n \"\"\"Convenient wrapper for export_saved_model API v2 to wrap TPU computation.\n\n WARNING: THIS METHOD IS DEPRECATED AND NOT PART OF THE APIS.\n\n Make sure to set\n `export_saved_model_api_version=tpu_estimator.ExportSavedModelApiVersion.V2`\n when initializing TPUEstimator (default API version is V1). This is because\n 1) `tpu.rewrite` (or `tpu.compile`) shouldn't be called in a nested way\n (otherwise validation will throw error like\n \"NotImplementedError: tpu_shard_context cannot be nested.\")\n 2) When using V1 API, Estimator calls `tpu.rewrite` so\n using `model_fn_inference_on_tpu` will trigger a nested call.\n When using V2 API, users of Estimator needs to call `tpu.rewrite` (which\n the wrapper does).\n\n It puts computation on TPU, add batching around it and round robin computation\n between TPU cores.\n\n See tpu_estimator_test.py for an example.\n\n Args:\n computation: computation to be put on TPU, which takes inputs_to_tpu as\n arguments.\n inputs_to_tpu: a list of tensors as input to computation.\n num_batch_threads: Number of scheduling threads for processing batches of\n work. Determines the number of batches processed in parallel.\n max_batch_size: Batch sizes will never be bigger than this. If None or 0,\n no batching will done.\n batch_timeout_micros: Maximum number of microseconds to wait before\n outputting an incomplete batch.\n allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,\n does nothing. Otherwise, supplies a list of batch sizes, causing the op to\n pad batches up to one of those sizes. The entries must increase\n monotonically, and the final entry must equal max_batch_size.\n max_enqueued_batches: The maximum depth of the batch queue. Defaults to 100.\n\n Returns:\n The unbatched computation output Tensors.\n \"\"\"\n\n def _tpu_call(args):\n \"\"\"Function to either call or feed into BatchFunction.\"\"\"\n\n @function.Defun(capture_resource_var_by_value=False)\n def tpu_computation():\n \"\"\"Function to feed into the TPUPartitionedCallOp.\"\"\"\n tensors_on_cpu = tf.compat.v1.tpu.rewrite(computation, args)\n tpu.prune_unconnected_ops_from_xla(tf.compat.v1.get_default_graph())\n return tensors_on_cpu\n\n return tpu_functional.TPUPartitionedCall(\n args=tpu_computation.captured_inputs,\n device_ordinal=tpu_ops.tpu_ordinal_selector(),\n Tout=[o.type for o in tpu_computation.definition.signature.output_arg],\n f=tpu_computation)\n\n if not max_batch_size:\n return _tpu_call(inputs_to_tpu)\n\n @tf.nondifferentiable_batch_function(num_batch_threads, max_batch_size,\n batch_timeout_micros,\n allowed_batch_sizes,\n max_enqueued_batches)\n def batched_tpu_computation(*args):\n \"\"\"Function to feed into the BatchOp.\"\"\"\n return _tpu_call(args)\n\n return batched_tpu_computation(*inputs_to_tpu)\n" ]
[ [ "tensorflow.python.tpu.ops.tpu_ops.outfeed_dequeue_tuple", "tensorflow.math.equal", "tensorflow.python.data.util.nest.flatten_up_to", "tensorflow.python.training.evaluation._StopAfterNEvalsHook", "tensorflow.identity", "tensorflow.compat.v1.logging.info", "tensorflow.python.tpu.training_loop.repeat", "tensorflow.python.tpu.tpu_feed.InfeedQueue", "tensorflow.compat.v1.pad", "tensorflow.compat.v1.tpu.core", "tensorflow.cast", "tensorflow.math.floormod", "tensorflow.core.protobuf.tpu.compilation_result_pb2.CompilationResultProto", "tensorflow.compat.v1.initializers.zeros", "tensorflow.python.data.util.nest.pack_sequence_as", "tensorflow.compat.v1.train.get_global_step", "tensorflow.python.ops.control_flow_ops.ControlFlowContext.__init__", "tensorflow.ones", "tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver.is_running_in_gce", "tensorflow.python.data.util.nest.flatten", "tensorflow.python.tpu.session_support.start_worker_watchdog", "tensorflow.compat.v2.summary.flush", "tensorflow.compat.v1.tpu.shutdown_system", "tensorflow.compat.v1.train.SessionRunArgs", "tensorflow.math.logical_and", "tensorflow.constant", "tensorflow.errors.OutOfRangeError", "tensorflow.python.tpu.tensor_tracer.TensorTracer", "tensorflow.compat.v1.logging.warn", "tensorflow.python.tpu.session_support.GracefulShutdownHook", "tensorflow.core.framework.summary_pb2.Summary.Value", "tensorflow.python.tpu.tpu.split_compile_and_shard", "tensorflow.python.tpu.tpu_embedding_gradient.create_dummy_table_variables", "tensorflow.compat.v1.logging.log_first_n", "tensorflow.compat.v1.while_loop", "tensorflow.compat.v1.variable_scope", "numpy.product", "tensorflow.python.tpu.session_support.ResetComputation", "tensorflow.python.tpu.tpu_embedding_gradient.hook_dummy_table_variables_to_activations", "tensorflow.python.tpu.session_support.ShutdownLameWorkers", "tensorflow.math.logical_or", "tensorflow.nondifferentiable_batch_function", "tensorflow.python.platform.tf_logging.warn", "tensorflow.nest.flatten", "tensorflow.split", "tensorflow.compat.v1.tpu.initialize_system", "tensorflow.compat.v1.device", "tensorflow.compat.v1.get_default_graph", "tensorflow.python.training.evaluation._get_or_create_eval_step", "tensorflow.compat.v1.Session", "tensorflow.compat.v2.__internal__.monitoring.BoolGauge", "tensorflow.python.tpu.session_support.ShutdownAllWorkers", "tensorflow.python.ops.summary_ops_v2.summary_writer_initializer_op", "tensorflow.compat.v1.trainable_variables", "tensorflow.control_dependencies", "tensorflow.math.reduce_sum", "tensorflow.compat.v1.ones_like", "tensorflow.compat.v1.summary.scalar", "tensorflow.python.util.tf_export.estimator_export", "tensorflow.compat.v1.data.make_initializable_iterator", "tensorflow.math.add", "tensorflow.python.tpu.training_loop.while_loop", "tensorflow.reshape", "tensorflow.no_op", "tensorflow.compat.v1.logging.debug", "tensorflow.compat.v1.RunOptions", "tensorflow.nest.pack_sequence_as", "tensorflow.concat", "tensorflow.python.tpu.ops.tpu_ops.tpu_ordinal_selector", "tensorflow.compat.v1.shape", "tensorflow.python.util.function_utils.fn_args", "tensorflow.python.tpu.tpu_embedding_gradient.get_gradients_through_dummy_table_variables", "tensorflow.Graph", "tensorflow.python.tpu.tensor_tracer.TensorTracer.is_enabled", "tensorflow.compat.v1.tpu.rewrite", "tensorflow.stack", "tensorflow.nest.map_structure", "tensorflow.python.tpu.preempted_hook.CloudTPUPreemptedHook", "tensorflow.compat.v1.train.CheckpointSaverHook", "tensorflow.group", "tensorflow.zeros", "tensorflow.python.tpu.ops.tpu_ops.outfeed_enqueue_tuple", "tensorflow.python.framework.ops.name_scope", "tensorflow.compat.v1.debugging.assert_greater_equal", "tensorflow.python.framework.function.Defun", "tensorflow.python.util.tf_inspect.getfullargspec", "tensorflow.python.tpu.tpu._TPUInferenceContext" ] ]
eliemichel/ReACORN
[ "74501551ecb387352271674efb2ed6240d234df6" ]
[ "pluto_gen_stats.py" ]
[ "# This file is part of ReACORN, a reimplementation by Élie Michel of the ACORN\n# paper by Martel et al. published at SIGGRAPH 2021.\n#\n# Copyright (c) 2021 -- Télécom Paris (Élie Michel <[email protected]>)\n# \n# The MIT license:\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the “Software”), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# The Software is provided “as is”, without warranty of any kind, express or\n# implied, including but not limited to the warranties of merchantability,\n# fitness for a particular purpose and non-infringement. In no event shall the\n# authors or copyright holders be liable for any claim, damages or other\n# liability, whether in an action of contract, tort or otherwise, arising\n# from, out of or in connection with the software or the use or other dealings\n# in the Software.\n\nimport torch\nimport os\nfrom argparse import Namespace\nfrom matplotlib.image import imread, imsave\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nimport acorn_eval_image\n\ntarget_image_filname = \"data/pluto.png\"\ncheckpoint_dir = \"checkpoints/pluto\"\noutput_dir = \"outputs/pluto\"\ndevice = \"cuda\" # 'cpu' or 'cuda'\n\nquadtree_output_dir = os.path.join(output_dir, \"quadtree\")\nimage_output_dir = os.path.join(output_dir, \"outputs\")\ndifference_output_dir = os.path.join(output_dir, \"difference\")\nloss_output_dir = os.path.join(output_dir, \"loss_plot\")\npsnr_output_dir = os.path.join(output_dir, \"psnr_plot\")\nos.makedirs(quadtree_output_dir, exist_ok=True)\nos.makedirs(image_output_dir, exist_ok=True)\nos.makedirs(difference_output_dir, exist_ok=True)\nos.makedirs(loss_output_dir, exist_ok=True)\nos.makedirs(psnr_output_dir, exist_ok=True)\n\ndef main():\n gen_quadtree_images()\n gen_output_images()\n measure_differences()\n gen_loss_plots()\n gen_psnr_plots()\n\ndef gen_quadtree_images():\n print(\"Generating quadtree images...\")\n for checkpoint in os.listdir(checkpoint_dir):\n name, _ = os.path.splitext(checkpoint)\n\n output_file = os.path.join(quadtree_output_dir, name + \".png\")\n if os.path.exists(output_file):\n continue\n \n acorn_eval_image.main(Namespace(\n checkpoint = os.path.join(checkpoint_dir, checkpoint),\n image = output_file,\n resolution = \"2048x2048\",\n draw_quadtree = True,\n draw_quadtree_only = True,\n device = device,\n ))\n\ndef gen_output_images():\n print(\"Generating full res output images...\")\n for checkpoint in os.listdir(checkpoint_dir):\n name, _ = os.path.splitext(checkpoint)\n\n output_file = os.path.join(image_output_dir, name + \".png\")\n if os.path.exists(output_file):\n continue\n \n acorn_eval_image.main(Namespace(\n checkpoint = os.path.join(checkpoint_dir, checkpoint),\n image = output_file,\n resolution = \"4096x4096\",\n draw_quadtree = False,\n draw_quadtree_only = False,\n device = device,\n ))\n\ndef measure_differences():\n print(\"Measuring difference to ground truth...\")\n target_image = imread(target_image_filname)\n if target_image.dtype == np.uint8:\n target_image = target_image.astype(float) / 255.\n\n for output_image_filename in os.listdir(image_output_dir):\n name, _ = os.path.splitext(output_image_filename)\n\n diff_filename = os.path.join(difference_output_dir, name + \".png\")\n psnr_filename = os.path.join(difference_output_dir, name + \".txt\")\n if os.path.exists(diff_filename):\n continue\n\n print(output_image_filename)\n output_image = imread(os.path.join(image_output_dir, output_image_filename))[:,:,:3]\n mse = np.power(output_image - target_image, 2).mean()\n psnr = 20 * np.log10(1 / np.sqrt(mse))\n with open(psnr_filename, 'w') as f:\n f.write(f\"psnr={psnr}\")\n print(f\"psnr={psnr}\")\n\n diff_image = np.ones_like(output_image)\n diff = np.abs(output_image - target_image).mean(axis=-1)\n diff_image[:,:,0] = 1\n diff_image[:,:,1] = (1 - diff).clip(0, 1)\n diff_image[:,:,2] = (1 - diff).clip(0, 1)\n imsave(diff_filename, diff_image)\n\ndef gen_loss_plots(size=(1152,256)):\n print(\"Generating loss plots...\")\n last_checkpoint = os.path.join(checkpoint_dir, os.listdir(checkpoint_dir)[-1])\n max_epochs = int(last_checkpoint.split('.')[-2])\n\n for checkpoint_filename in os.listdir(checkpoint_dir):\n name, _ = os.path.splitext(checkpoint_filename)\n\n output_file = os.path.join(loss_output_dir, name + \".png\")\n if os.path.exists(output_file):\n continue\n\n print(name)\n checkpoint = torch.load(os.path.join(checkpoint_dir, checkpoint_filename))\n loss_log = checkpoint['loss_log']\n\n dpi = 96\n fig, ax = plt.subplots()\n fig.set_size_inches(size[0]/dpi, size[1]/dpi)\n fig.patch.set_visible(False)\n #ax.axis('off')\n\n ax.plot(loss_log)\n ax.set_xlim(-max_epochs*.01, max_epochs*1.01)\n ax.set_ylim(-0.005, 0.18)\n fig.savefig(output_file, transparent=True, dpi=dpi)\n plt.close(fig)\n\ndef gen_psnr_plots(size=(550,256)):\n print(\"Generating PSNR plots...\")\n last_checkpoint = os.path.join(checkpoint_dir, os.listdir(checkpoint_dir)[-1])\n max_epochs = int(last_checkpoint.split('.')[-2])\n\n psnr_log = []\n epochs = []\n\n for i, filename in enumerate(os.listdir(difference_output_dir)):\n name, ext = os.path.splitext(filename)\n if ext != '.txt':\n continue\n\n output_file = os.path.join(psnr_output_dir, name + \".png\")\n if os.path.exists(output_file):\n continue\n\n print(name)\n with open(os.path.join(difference_output_dir, filename)) as f:\n psnr = float(f.read().split(\"=\")[-1])\n psnr_log.append(psnr)\n epochs.append(50 * i)\n\n dpi = 96\n fig, ax = plt.subplots()\n fig.set_size_inches(size[0]/dpi, size[1]/dpi)\n fig.patch.set_visible(False)\n #ax.axis('off')\n ax.get_xaxis().set_ticks([])\n\n ax.plot(epochs, psnr_log)\n ax.set_xlim(-max_epochs*.01, max_epochs*1.01)\n ax.set_ylim(0, 30)\n fig.savefig(output_file, transparent=True, dpi=dpi)\n plt.close(fig)\n\nmain()\n" ]
[ [ "matplotlib.image.imsave", "numpy.ones_like", "matplotlib.pyplot.subplots", "numpy.abs", "numpy.power", "matplotlib.pyplot.close", "numpy.sqrt", "matplotlib.image.imread" ] ]
hassenmorad/CA-Migration
[ "762434b3a013f2488c382dbdc3d2dc7b7f91c572" ]
[ "Scripts/Census/Census_5yr_CA_top50_mig_counties_0917.py" ]
[ "# Top 50 in/out migration counties for 5-yr estimates, each year b/w 05-09 to 13-17\nimport pandas as pd\nimport numpy as np\n\ncensus5yr = pd.read_csv('ca_counties_mig_5yr_0917.csv')\nca0917 = census5yr[((census5yr.County1FIPS > 6000) & (census5yr.County1FIPS < 7000)) & (census5yr.County2FIPS < 60000) & (census5yr.State2Name != 'California')].sort_values('MovedOut', ascending=False)\n\nfor year in ca0917.Year.sort_values().unique():\n print(year)\n df = ca0917[ca0917.Year == year]\n ca_out_in = pd.DataFrame()\n counter = 0\n mig_types = ['MovedOut', 'MovedIn']\n for mig in mig_types:\n series = df.groupby('County2FIPS')[mig].sum() # Calculating total CA outmig figures for each non-CA county\n ca_mig = pd.DataFrame({'FIPS':series.index, mig:series.values})[1:] # Removing first row (Int'l migration)\n counties = []\n states = []\n \n # Adding County,State col (for DataWrapper map coordinates)\n for fips in ca_mig.FIPS.unique():\n counties.append(df.County2Name[df.County2FIPS == fips].iloc[0])\n states.append(df.State2Name[df.County2FIPS == fips].iloc[0])\n\n ca_mig['County_Name'] = counties\n ca_mig['State_Name'] = states\n ca_mig['County_State'] = ca_mig.County_Name + ', ' + ca_mig.State_Name\n ca_mig = ca_mig.drop(['County_Name', 'State_Name'], axis=1)\n\n if counter == 0:\n ca_out_in = ca_mig.copy()\n elif counter == 1:\n ca_out_in = ca_out_in.merge(ca_mig, on=['FIPS', 'County_State'])\n ca_out_in = ca_out_in.rename({'MovedOut':'Outmig', 'MovedIn':'Inmig'}, axis=1)\n ca_out_in['Net_Mig'] = ca_out_in.Inmig - ca_out_in.Outmig\n ca_out_in = ca_out_in.sort_values('Net_Mig')\n counter += 1\n\n top50_out_in = pd.concat([ca_out_in.iloc[:50], ca_out_in.iloc[-50:]])\n top50_out_in['Mig_Abs'] = top50_out_in.Net_Mig.abs()\n top50_out_in['Type'] = ['Net Out']*50 + ['Net In']*50\n top50_out_in['More'] = [c.split(',')[0] for c in top50_out_in.County_State[:50].values] + list(np.full(50, 'California'))\n top50_out_in['Year'] = np.full(len(top50_out_in), year)\n top50_out_in.to_csv('Census_5yr_CA_top50_mig_counties_' + str(year) + '.csv', index=False)" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.concat", "numpy.full" ] ]
Xero64/pyvlm
[ "f373ac826cc65281245a69979eb28786fbf67bd0" ]
[ "pyvlm/classes/latticesurface.py" ]
[ "from math import sqrt\nfrom pygeom.matrix3d import zero_matrix_vector\nfrom matplotlib.pyplot import figure\nfrom .latticesheet import LatticeSheet\nfrom .latticepanel import LatticePanel\n\nclass LatticeSurface(object):\n name = None\n scts = None\n shts = None\n cspc = None\n xspace = None\n strps = None\n pnts = None\n pnls = None\n area = None\n sgrp = None\n def __init__(self, name: str, scts: list, mirror: bool, funcs: list):\n self.name = name\n self.scts = scts\n self.mirror = mirror\n self.funcs = funcs\n self.update()\n def update(self):\n if self.mirror and self.scts[0].pnt.y == 0.0:\n numsct = len(self.scts)\n newscts = []\n for i in range(numsct-1):\n sct = self.scts[numsct-1-i]\n msct = sct.return_mirror()\n newscts.append(msct)\n for sct in self.scts:\n newscts.append(sct)\n self.scts = newscts\n elif self.mirror and self.scts[0].pnt.y != 0.0:\n print(f'Warning: Cannot mirror {self.name}.')\n self.mirror = False\n def set_chord_distribution(self, cspc: list):\n from pyvlm.tools import normalise_spacing\n self.cspc = normalise_spacing(cspc)\n def set_chord_equal_distribution(self, cnum: int):\n from pyvlm.tools import equal_spacing\n csp = equal_spacing(4*cnum)\n self.cspc = [tuple(csp[i*4:i*4+5]) for i in range(cnum)]\n def set_chord_cosine_distribution(self, cnum: int):\n from pyvlm.tools import full_cosine_spacing\n if cnum > 1:\n csp = full_cosine_spacing(4*cnum+2)\n csp = [0.0]+csp[2:-2]+[1.0]\n self.cspc = [tuple(csp[i*4:i*4+5]) for i in range(cnum)]\n else:\n self.set_chord_equal_distribution(cnum)\n def mesh(self, lsid: int, lpid: int):\n from pygeom.geom3d import Vector\n from numpy.matlib import empty\n nums = len(self.scts)\n self.shts = []\n for i in range(nums-1):\n a, b = i, i+1\n scta = self.scts[a]\n sctb = self.scts[b]\n self.shts.append(LatticeSheet(scta, sctb))\n self.strps = []\n for sht in self.shts:\n lsid = sht.mesh_strips(lsid)\n self.strps += sht.strps\n pnts = [strp.pnt1 for strp in self.strps]\n pnts.append(self.strps[-1].pnt2)\n crds = [strp.crd1 for strp in self.strps]\n crds.append(self.strps[-1].crd2)\n lenb = len(pnts)\n lenc = len(self.cspc)\n self.pnts = empty((lenb, lenc+1), dtype=Vector)\n for i in range(lenb):\n minx = pnts[i].x\n y = pnts[i].y\n z = pnts[i].z\n c = crds[i]\n cd = self.cspc[0][0]\n x = minx+cd*c\n self.pnts[i, 0] = Vector(x, y, z)\n for j in range(1, lenc+1):\n cd = self.cspc[j-1][-1]\n x = minx+cd*c\n self.pnts[i, j] = Vector(x, y, z)\n self.pnls = empty((lenb-1, lenc), dtype=LatticePanel)\n for i, strp in enumerate(self.strps):\n for j in range(lenc):\n pnts = [\n self.pnts[i, j],\n self.pnts[i+1, j],\n self.pnts[i, j+1],\n self.pnts[i+1, j+1]\n ]\n cspc = self.cspc[j]\n pnl = LatticePanel(lpid, pnts, cspc, strp)\n self.pnls[i, j] = pnl\n lpid += 1\n if self.mirror:\n self.sgrp = [[], []]\n numstrp = len(self.strps)\n hlfstrp = int(numstrp/2)\n for i in range(hlfstrp):\n lstrp = self.strps[numstrp-1-i]\n mstrp = self.strps[i]\n self.sgrp[0].append(lstrp.lsid)\n self.sgrp[1].append(mstrp.lsid)\n else:\n self.sgrp = [[]]\n numstrp = len(self.strps)\n for i in range(numstrp):\n lstrp = self.strps[numstrp-1-i]\n self.sgrp[0].append(lstrp.lsid)\n bpos = [0.0]\n for sht in self.shts:\n sht.inherit_panels()\n sht.set_control_panels()\n bpos.append(bpos[-1]+sht.width)\n if self.mirror:\n numsht = len(self.shts)\n wmir = bpos[int(numsht/2)]\n for i in range(len(bpos)):\n bpos[i] = bpos[i]-wmir\n for i, sct in enumerate(self.scts):\n sct.bpos = bpos[i]\n for sht in self.shts:\n sht.set_strip_bpos()\n bmax = max(bpos)\n for func in self.funcs:\n func.set_spline(bmax)\n var = func.var\n if var == 'twist':\n var = '_ang'\n if self.mirror:\n for i in range(hlfstrp):\n strp = self.strps[numstrp-1-i]\n mstrp = self.strps[i]\n bpos = strp.bpos\n val = func.interpolate(bpos)\n strp.__dict__[var] = val\n mstrp.__dict__[var] = val\n else:\n for strp in self.strps:\n bpos = strp.bpos\n val = func.interpolate(bpos)\n strp.__dict__[var] = val\n self.area = 0.0\n for sht in self.shts:\n if not sht.noload:\n self.area += sht.area\n return lsid, lpid\n def point_xyz(self):\n from numpy.matlib import zeros\n x = zeros(self.pnts.shape)\n y = zeros(self.pnts.shape)\n z = zeros(self.pnts.shape)\n for i in range(self.pnts.shape[0]):\n for j in range(self.pnts.shape[1]):\n x[i, j] = self.pnts[i, j].x\n y[i, j] = self.pnts[i, j].y\n z[i, j] = self.pnts[i, j].z\n return x, y, z\n def return_panels(self):\n pnls = []\n shp = self.pnls.shape\n for i in range(shp[0]):\n for j in range(shp[1]):\n pnls.append(self.pnls[i, j])\n return pnls\n def plot_surface(self, ax=None):\n if ax is None:\n fig = figure(figsize=(12, 8))\n ax = fig.gca(projection='3d')\n ax.grid(True)\n x, y, z = self.point_xyz()\n ax.plot_surface(x, y, z, label=self.name)\n return ax\n @property\n def strpb(self):\n return [strp.bpos for strp in self.strps]\n @property\n def strpy(self):\n return [strp.pnti.y for strp in self.strps]\n @property\n def strpz(self):\n return [strp.pnti.z for strp in self.strps]\n @property\n def strpi(self):\n return [strp.lsid for strp in self.strps]\n @property\n def lstrpi(self):\n return self.sgrp[0]\n @property\n def mstrpi(self):\n return self.sgrp[1]\n @property\n def pnli(self):\n lpids = []\n for i in range(self.pnls.shape[0]):\n for j in range(self.pnls.shape[1]):\n lpids.append(self.pnls[i, j].lpid)\n return lpids\n def vortex_line_points(self, indp: int, nump: int):\n nums = len(self.strps)\n num = nums*nump+1\n rpt = zero_matrix_vector((num, 1))\n j = 0\n for strp in self.strps:\n pnl = strp.pnls[indp]\n for i in range(nump):\n pnt = pnl.pnta+i/nump*pnl.leni\n rpt[j, 0] = pnt\n j += 1\n rpt[j, 0] = pnl.pntb\n return rpt\n def __repr__(self):\n return '<LatticeSurface {:s}>'.format(self.name)\n\ndef latticesurface_from_json(surfdata: dict, display: bool=False):\n from .latticesection import latticesection_from_json\n name = surfdata['name']\n if 'mirror' in surfdata:\n mirror = surfdata['mirror']\n else:\n mirror = False\n if display: print(f'Loading Surface: {name:s}')\n # Read Section Variables\n scts = []\n for sectdata in surfdata['sections']:\n sct = latticesection_from_json(sectdata)\n scts.append(sct)\n # Linear Interpolate Missing Variables\n x, y, z, c, a = [], [], [], [], []\n for sct in scts:\n x.append(sct.pnt.x)\n y.append(sct.pnt.y)\n z.append(sct.pnt.z)\n c.append(sct.chord)\n a.append(sct.twist)\n if None in y:\n if None is z:\n return ValueError\n else:\n y = linear_interpolate_none(z, y)\n else:\n z = linear_interpolate_none(y, z)\n lenscts = len(scts)\n b = [0.0]\n for i in range(lenscts-1):\n bi = b[i]+sqrt((y[i+1]-y[i])**2+(z[i+1]-z[i])**2)\n b.append(bi)\n x = linear_interpolate_none(b, x)\n c = linear_interpolate_none(b, c)\n a = linear_interpolate_none(b, a)\n for i, sct in enumerate(scts):\n sct.pnt.x = x[i]\n sct.pnt.y = y[i]\n sct.pnt.z = z[i]\n sct.chord = c[i]\n sct.twist = a[i]\n # Read in Function Data\n funcs = []\n if 'functions' in surfdata:\n for funcdata in surfdata['functions']:\n func = surffunc_from_json(funcdata)\n funcs.append(func)\n # Entire Surface Position\n xpos, ypos, zpos = 0.0, 0.0, 0.0\n if 'xpos' in surfdata:\n xpos = surfdata['xpos']\n if 'ypos' in surfdata:\n ypos = surfdata['ypos']\n if 'zpos' in surfdata:\n zpos = surfdata['zpos']\n twist = 0.0\n if 'twist' in surfdata:\n twist = surfdata['twist']\n if 'ruled' in surfdata:\n ruled = surfdata['ruled']\n else:\n ruled = False\n for sct in scts:\n sct.offset_position(xpos, ypos, zpos)\n sct.offset_twist(twist)\n sct.ruled = ruled\n surf = LatticeSurface(name, scts, mirror, funcs)\n if 'cnum' in surfdata:\n cnum = surfdata['cnum']\n cspc = 'cosine'\n if 'cspc' in surfdata:\n cspc = surfdata['cspc'].lower()\n if cspc == 'equal':\n surf.set_chord_equal_distribution(cnum)\n elif cspc in ('cosine', 'full-cosine'):\n surf.set_chord_cosine_distribution(cnum)\n return surf\n\ndef linear_interpolate_none(x: list, y: list):\n for i, yi in enumerate(y):\n if yi is None:\n for j in range(i, -1, -1):\n if y[j] is not None:\n a = j\n break\n for j in range(i, len(y)):\n if y[j] is not None:\n b = j\n break\n xa, xb = x[a], x[b]\n ya, yb = y[a], y[b]\n y[i] = (yb-ya)/(xb-xa)*(x[i]-xa)+ya\n return y\n\nclass SurfaceFunction(object):\n var = None\n dist = None\n interp = None\n values = None\n spline = None\n def __init__(self, var: str, spacing: str, interp: str, values: list):\n self.var = var\n self.spacing = spacing\n self.interp = interp\n self.values = values\n def set_spline(self, bmax: float):\n if self.spacing == 'equal':\n num = len(self.values)\n from pyvlm.tools import equal_spacing\n nspc = equal_spacing(num-1)\n spc = [bmax*nspci for nspci in nspc]\n if self.interp == 'linear':\n from pygeom.geom1d import LinearSpline\n self.spline = LinearSpline(spc, self.values)\n elif self.interp == 'cubic':\n from pygeom.geom1d import CubicSpline\n self.spline = CubicSpline(spc, self.values)\n def interpolate(self, b: float):\n return self.spline.single_interpolate_spline(b)\n\ndef surffunc_from_json(funcdata: dict):\n var = funcdata[\"variable\"]\n if \"spacing\" in funcdata:\n spacing = funcdata[\"spacing\"]\n else:\n spacing = \"equal\"\n if \"interp\" in funcdata:\n interp = funcdata[\"interp\"]\n else:\n interp = \"linear\"\n values = funcdata[\"values\"]\n return SurfaceFunction(var, spacing, interp, values)\n" ]
[ [ "matplotlib.pyplot.figure", "numpy.matlib.empty", "numpy.matlib.zeros" ] ]
pysat/pysat
[ "4d12a09ea585b88d54560413e03cae9289113718" ]
[ "pysat/_files.py" ]
[ "#!/usr/bin/env python\n# Full license can be found in License.md\n# Full author list can be found in .zenodo.json file\n# DOI:10.5281/zenodo.1199703\n# ----------------------------------------------------------------------------\n\nimport copy\nimport datetime as dt\nfrom functools import partial\nimport numpy as np\nimport os\nimport weakref\n\nimport pandas as pds\n\nimport pysat # Needed to access pysat.params across reimports\nfrom pysat.utils import files as futils\nfrom pysat.utils.time import filter_datetime_input\nfrom pysat.instruments.methods import general\n\nlogger = pysat.logger\n\n\nclass Files(object):\n \"\"\"Maintains collection of files and associated methods.\n\n Parameters\n ----------\n inst : pysat.Instrument\n Instrument object\n directory_format : str or NoneType\n Directory naming structure in string format. Variables such as\n platform, name, tag, and inst_id will be filled in as needed using\n python string formatting. The default directory structure would be\n expressed as '{platform}/{name}/{tag}/{inst_id}'. If None, the default\n directory structure is used (default=None)\n update_files : boolean\n If True, immediately query filesystem for instrument files and\n store (default=False)\n file_format : str or NoneType\n File naming structure in string format. Variables such as year,\n month, day, and inst_id will be filled in as needed using python string\n formatting. The default file format structure is supplied in the\n instrument list_files routine. (default=None)\n write_to_disk : boolean\n If true, the list of Instrument files will be written to disk.\n (default=True)\n ignore_empty_files : boolean\n If True, the list of files found will be checked to ensure the\n filesizes are greater than zero. Empty files are removed from the\n stored list of files. (default=False)\n\n Attributes\n ----------\n home_path : str\n Path to the pysat information directory.\n data_path : str\n Path to the top-level directory containing instrument files,\n selected from data_paths.\n data_paths: list of str\n Available paths that pysat will use when looking for files. The\n class uses the first directory with relevant data, stored in data_path.\n files : pds.Series\n Series of data files, indexed by file start time\n inst_info : dict\n Contains pysat.Instrument parameters 'platform', 'name', 'tag',\n and 'inst_id', identifying the source of the files.\n list_files_creator : functools.partial or NoneType\n Experimental feature for Instruments that internally generate data\n and thus don't have a defined supported date range.\n list_files_rtn : method\n Method used to locate relevant files on the local system. Provided\n by associated pysat.Instrument object.\n multi_file_day : boolean\n Flag copied from associated pysat.Instrument object that indicates\n when data for day n may be found in files for days n-1, or n+1\n start_date : datetime or NoneType\n Date of first file, used as default start bound for instrument\n object, or None if no files are loaded.\n stop_date : datetime or NoneType\n Date of last file, used as default stop bound for instrument\n object, or None if no files are loaded.\n stored_file_name : str\n Name of the hidden file containing the list of archived data files\n for this instrument.\n sub_dir_path : str\n `directory_format` string formatted for the local system.\n\n\n Note\n ----\n Interfaces with the `list_files` method for a given instrument\n support module to create an ordered collection of files in time,\n used primarily by the pysat.Instrument object to identify files\n to be loaded. The Files class mediates access to the files by\n datetime and contains helper methods for determining the presence of\n new files and filtering out empty files.\n\n User should generally use the interface provided by a pysat.Instrument\n instance. Exceptions are the classmethod from_os, provided to assist\n in generating the appropriate output for an instrument routine.\n\n Examples\n --------\n ::\n\n # convenient file access\n inst = pysat.Instrument(platform=platform, name=name, tag=tag,\n inst_id=inst_id)\n # first file\n inst.files[0]\n\n # files from start up to stop (exclusive on stop)\n start = dt.datetime(2009,1,1)\n stop = dt.datetime(2009,1,3)\n print(inst.files[start:stop])\n\n # files for date\n print(inst.files[start])\n\n # files by slicing\n print(inst.files[0:4])\n\n # get a list of new files\n # new files are those that weren't present the last time\n # a given instrument's file list was stored\n new_files = inst.files.get_new()\n\n # search pysat appropriate directory for instrument files and\n # update Files instance.\n inst.files.refresh()\n\n \"\"\"\n\n # -----------------------------------------------------------------------\n # Define the magic methods\n\n def __init__(self, inst, directory_format=None, update_files=False,\n file_format=None, write_to_disk=True,\n ignore_empty_files=False):\n\n # Set the hidden variables\n self.update_files = update_files\n\n # Location of directory to store file information in\n self.home_path = os.path.join(pysat.pysat_dir, 'instruments')\n\n # Assign base default dates and an empty list of files\n self.start_date = None\n self.stop_date = None\n self.files = pds.Series(None, dtype='object')\n\n # Grab Instrument info\n self.inst_info = {'platform': inst.platform, 'name': inst.name,\n 'tag': inst.tag, 'inst_id': inst.inst_id,\n 'inst_module': inst.inst_module,\n 'inst': weakref.proxy(inst)}\n\n self.multi_file_day = inst.multi_file_day\n\n # Begin with presumption that the list_files_rtn is a typical\n # function that returns a Series of filenames. Some generated\n # data sets employ a function that creates filenames on-the-fly.\n self.list_files_creator = None\n\n # Set the location of stored files\n self.stored_file_name = '_'.join((self.inst_info['platform'],\n self.inst_info['name'],\n self.inst_info['tag'],\n self.inst_info['inst_id'],\n 'stored_file_info.txt'))\n\n # Set the path for sub-directories under pysat data path\n if directory_format is None:\n # Assign stored template if user doesn't provide one.\n directory_format = pysat.params['directory_format']\n self.directory_format = directory_format\n\n # Set the user-specified file format\n self.file_format = file_format\n\n # Construct the subdirectory path\n self.sub_dir_path = os.path.normpath(\n self.directory_format.format(**self.inst_info))\n\n # Ensure we have at least one path for pysat data directory\n if len(pysat.params['data_dirs']) == 0:\n raise NameError(\" \".join((\"pysat's `data_dirs` hasn't been set.\",\n \"Please set a top-level directory\",\n \"path to store data using\",\n \"`pysat.params['data_dirs'] = path`\")))\n\n # Get list of potential data directory paths from pysat. Construct\n # possible locations for data. Ensure path always ends with directory\n # separator.\n self.data_paths = [os.path.join(pdir, self.sub_dir_path)\n for pdir in pysat.params['data_dirs']]\n self.data_paths = [os.path.join(os.path.normpath(pdir), '')\n for pdir in self.data_paths]\n\n # Only one of the above paths will actually be used when loading data.\n # The actual value of data_path is determined in refresh().\n # If there are files present, then that path is stored along with a\n # list of found files in ~/.pysat. This stored info is retrieved by\n # _load. We start here with the first directory for cases where there\n # are no files.\n self.data_path = self.data_paths[0]\n\n # Set the preference of writing the file list to disk or not\n self.write_to_disk = write_to_disk\n if not self.write_to_disk:\n # Use blank memory rather than loading from disk\n self._previous_file_list = pds.Series([], dtype='a')\n self._current_file_list = pds.Series([], dtype='a')\n\n # Set the preference to ignore or include empty files\n self.ignore_empty_files = ignore_empty_files\n\n if self.inst_info['platform'] != '':\n # Only load filenames if this is associated with a real\n # pysat.Instrument instance, not pysat.Instrument().\n if self.update_files:\n # Refresh filenames as directed by user\n self.refresh()\n else:\n # Load stored file info\n file_info = self._load()\n if file_info.empty:\n # Didn't find stored information. Search local system.\n # If list_files_rtn returns a dict to create\n # filenames as needed that is handled in refresh.\n self.refresh()\n else:\n # Attach the data loaded\n self._attach_files(file_info)\n return\n\n def __repr__(self):\n \"\"\" Representation of the class and its current state\n \"\"\"\n inst_repr = self.inst_info['inst'].__repr__()\n\n out_str = \"\".join([\"pysat.Files(\", inst_repr, \", directory_format=\",\n \"'{:}'\".format(self.directory_format),\n \", update_files=\",\n \"{:}, file_format=\".format(self.update_files),\n \"{:}, \".format(self.file_format.__repr__()),\n \"write_to_disk={:}, \".format(self.write_to_disk),\n \"ignore_empty_files=\",\n \"{:})\".format(self.ignore_empty_files)])\n\n return out_str\n\n def __str__(self):\n \"\"\" Description of the class and its contents\n \"\"\"\n\n num_files = len(self.files)\n output_str = 'Local File Statistics\\n'\n output_str += '---------------------\\n'\n output_str += 'Number of files: {:d}\\n'.format(num_files)\n\n if num_files > 0:\n output_str += 'Date Range: '\n output_str += self.files.index[0].strftime('%d %B %Y')\n output_str += ' --- '\n output_str += self.files.index[-1].strftime('%d %B %Y')\n\n return output_str\n\n def __eq__(self, other):\n \"\"\"Perform equality check\n\n Parameters\n ----------\n other : any\n Other object to compare for equality\n\n Returns\n -------\n bool\n True if objects are identical, False if they are not\n\n \"\"\"\n # Check if the other object has the same type\n if not isinstance(other, self.__class__):\n return False\n\n # If the type is the same then check everything that is attached to\n # the Files object. Includes attributes, methods, variables, etc.\n checks = []\n key_check = []\n for key in self.__dict__.keys():\n key_check.append(key)\n # Confirm each object has the same keys\n if key in other.__dict__.keys():\n # Define default comparison.\n if key not in ['files', '_previous_file_list',\n '_current_file_list', 'inst_info']:\n test = np.all(self.__dict__[key] == other.__dict__[key])\n checks.append(test)\n\n else:\n if key not in ['inst_info']:\n # Comparing one of the stored pandas Series\n try:\n # Comparison only works for identically-labeled\n # series.\n check = np.all(self.__dict__[key]\n == other.__dict__[key])\n checks.append(check)\n except ValueError:\n # If there is an error they aren't the same.\n return False\n\n elif key == 'inst_info':\n ichecks = []\n for ii_key in self.inst_info.keys():\n if ii_key != 'inst':\n # Standard attribute check\n ichecks.append(self.inst_info[ii_key]\n == other.inst_info[ii_key])\n\n else:\n # Don't want a recursive check on 'inst', which\n # contains Files. If the string representations\n # are the same we consider them the same.\n try:\n oinst = other.inst_info[ii_key]\n ichecks.append(str(self.inst_info[ii_key])\n == str(oinst))\n except AttributeError:\n # If one object is missing a required key\n return False\n checks.append(np.all(ichecks))\n\n else:\n # other did not have an key that self did\n return False\n\n # Confirm that Files object `other` doesn't have extra terms\n for key in other.__dict__.keys():\n if key not in self.__dict__.keys():\n return False\n\n test_data = np.all(checks)\n\n return test_data\n\n def __getitem__(self, key):\n \"\"\" Retrieve items from the files attribute\n\n Parameters\n ----------\n key : int, list, slice, dt.datetime\n Key for locating files from a pandas Series indexed by time\n\n Returns\n -------\n out : pds.Series\n Subset of the files as a Series\n\n Raises\n ------\n IndexError\n If data is outside of file bounds\n\n Note\n ----\n Slicing via date and index filename is inclusive slicing, date and\n index are normal non-inclusive end point\n\n \"\"\"\n if self.list_files_creator is not None:\n # Return filename generated on demand\n out = self.list_files_creator(key)\n\n elif isinstance(key, slice):\n try:\n try:\n # Assume key is integer (including list or slice)\n out = self.files.iloc[key]\n except TypeError:\n # The key must be something else, use alternative access\n out = self.files.loc[key]\n except IndexError as err:\n raise IndexError(''.join((str(err), '\\n',\n 'Date requested outside file ',\n 'bounds.')))\n\n if isinstance(key.start, dt.datetime):\n # Enforce exclusive slicing on datetime\n if len(out) > 1:\n if out.index[-1] >= key.stop:\n out = out[:-1]\n elif len(out) == 1:\n if out.index[0] >= key.stop:\n out = pds.Series([], dtype='a')\n else:\n try:\n # Assume key is integer (including list or slice)\n out = self.files.iloc[key]\n except TypeError:\n # The key must be something else, use alternative access\n out = self.files.loc[key]\n\n return out\n\n # -----------------------------------------------------------------------\n # Define the hidden methods\n\n def _filter_empty_files(self, path):\n \"\"\"Update the file list (self.files) with empty files removed\n\n Parameters\n ----------\n path : str\n Path to top-level containing files\n\n \"\"\"\n\n keep_index = []\n for i, fname in enumerate(self.files):\n # Create full path for each file\n full_fname = os.path.join(path, fname)\n\n # Ensure the file exists\n if os.path.isfile(full_fname):\n # Check for size\n if os.path.getsize(full_fname) > 0:\n # Store if not empty\n keep_index.append(i)\n\n # Remove filenames as needed\n dropped_num = len(self.files.index) - len(keep_index)\n if dropped_num > 0:\n logger.warning(' '.join(('Removing {:d}'.format(dropped_num),\n 'empty files from Instrument list.')))\n self.files = self.files.iloc[keep_index]\n\n return\n\n def _attach_files(self, files_info):\n \"\"\"Attaches stored file lists to self.files\n\n Parameters\n ---------\n files_info : pds.Series\n Stored file information, filenames indexed by datetime\n\n Note\n ----\n Updates the file list (files), start_date, and stop_date attributes\n of the Files class object.\n\n \"\"\"\n\n if not files_info.empty:\n # Attach data\n self.files = files_info\n\n # Ensure times are unique.\n self._ensure_unique_file_datetimes()\n\n # Filter for empty files.\n if self.ignore_empty_files:\n self._filter_empty_files(path=self.data_path)\n\n # Extract date information from first and last files\n if not self.files.empty:\n self.start_date = filter_datetime_input(self.files.index[0])\n self.stop_date = filter_datetime_input(self.files.index[-1])\n else:\n # No files found\n self.start_date = None\n self.stop_date = None\n else:\n # No files found\n self.start_date = None\n self.stop_date = None\n\n # Convert to object type if Series is empty. This allows for\n # `==` equality checks with strings\n self.files = files_info.astype(np.dtype('O'))\n\n return\n\n def _ensure_unique_file_datetimes(self):\n \"\"\"Update the file list (self.files) to ensure uniqueness\"\"\"\n\n # Check if files are unique.\n unique_files = len(self.files.index.unique()) == len(self.files)\n\n if not self.multi_file_day and not unique_files:\n # Give user feedback about the issue\n estr = ''.join(['Duplicate datetimes in stored filename ',\n 'information.\\nKeeping one of each ',\n 'of the duplicates, dropping the rest. ',\n 'Please ensure the file datetimes ',\n 'are unique at the microsecond level.'])\n logger.warning(estr)\n ind = self.files.index.duplicated()\n logger.warning(self.files.index[ind].unique())\n\n # Downselect to unique file datetimes\n idx = np.unique(self.files.index, return_index=True)\n self.files = self.files.iloc[idx[1]]\n\n return\n\n def _store(self):\n \"\"\"Store currently loaded filelist for instrument onto filesystem\n \"\"\"\n\n stored_name = self.stored_file_name\n\n # Check if current file data is different than stored file list. If so,\n # move file list to previous file list, store current to file. If not,\n # do nothing\n stored_files = self._load(update_path=False)\n if len(stored_files) != len(self.files):\n # The number of items is different, things are new\n new_flag = True\n else:\n # The number of items is the same, check specifically for equality\n if stored_files.eq(self.files).all():\n new_flag = False\n else:\n # Stored and new data are not equal, there are new files\n new_flag = True\n\n if new_flag:\n if self.write_to_disk:\n # Save the previous data in a backup file\n prev_name = os.path.join(self.home_path, 'archive', stored_name)\n stored_files.to_csv(prev_name,\n date_format='%Y-%m-%d %H:%M:%S.%f',\n header=[self.data_path])\n\n # Overwrite the old reference file with the new file info\n self.files.to_csv(os.path.join(self.home_path, stored_name),\n date_format='%Y-%m-%d %H:%M:%S.%f',\n header=[self.data_path])\n else:\n # Update the hidden File attributes\n self._previous_file_list = stored_files\n self._current_file_list = self.files.copy()\n\n return\n\n def _load(self, prev_version=False, update_path=True):\n \"\"\"Load stored filelist\n\n Parameters\n ----------\n prev_version : boolean\n if True, will load previous version of file list\n update_path : boolean\n If True, the path written to stored info will be\n assigned to self.data_path. (default=True)\n\n Returns\n -------\n pandas.Series\n File path names, indexed by datetime. Series is empty if no\n files are found.\n\n \"\"\"\n\n fname = self.stored_file_name\n if prev_version:\n # Archived file list storage filename\n fname = os.path.join(self.home_path, 'archive', fname)\n else:\n # Current file list storage filename\n fname = os.path.join(self.home_path, fname)\n\n if os.path.isfile(fname) and (os.path.getsize(fname) > 0):\n if self.write_to_disk:\n # Load data stored on the local drive.\n loaded = pds.read_csv(fname, index_col=0, parse_dates=True,\n squeeze=True, header=0)\n if update_path:\n # Store the data_path from the .csv onto Files\n self.data_path = loaded.name\n\n # Ensure the name of returned Series is None for consistency\n loaded.name = None\n\n return loaded\n else:\n # Grab content from memory rather than local disk.\n if prev_version:\n return self._previous_file_list\n else:\n return self._current_file_list\n else:\n # Storage file not present.\n return pds.Series([], dtype='a')\n\n def _remove_data_dir_path(self, file_series=None):\n \"\"\"Remove the data directory path from filenames\n\n Parameters\n ----------\n file_series : pds.Series or NoneType\n Series of filenames (potentially with file paths)\n (default=None)\n\n Returns\n -------\n pds.series or None\n If `file_series` is a Series, removes the data path from the\n filename, if present. Returns None if `path_input` is None.\n\n \"\"\"\n out = None\n if file_series is not None:\n # Ensure there is a directory divider at the end of the path\n split_str = os.path.join(self.data_path, '')\n\n # Remove the data path from all filenames in the Series\n out = file_series.apply(lambda x: x.split(split_str)[-1])\n\n return out\n\n # -----------------------------------------------------------------------\n # Define the public methods and properties\n\n def copy(self):\n \"\"\"Provide a deep copy of object\n\n Returns\n -------\n Files class instance\n Copy of self\n\n \"\"\"\n # The copy module does not copy modules. Treat self.inst_info\n # differently since it possibly contains a python module, plus\n # it also contains a weakref back to Instrument. Because the Instrument\n # reference contains another Files object, it could cause the creation\n # of an infinite, recursive copy.\n saved_info = self.inst_info\n self.inst_info = None\n\n # Copy everything but the problematic info\n files_copy = copy.deepcopy(self)\n\n # Restore the saved information, then copy over items that can be copied\n self.inst_info = saved_info\n files_copy.inst_info = {}\n for key in saved_info.keys():\n if key not in ['inst', 'inst_module']:\n files_copy.inst_info[key] = copy.deepcopy(self.inst_info[key])\n\n # Can't copy the weakreference\n files_copy.inst_info['inst'] = self.inst_info['inst']\n\n # Can't copy the module\n files_copy.inst_info['inst_module'] = self.inst_info['inst_module']\n return files_copy\n\n def refresh(self):\n \"\"\"Update list of files, if there are changes.\n\n Note\n ----\n Calls underlying list_files_rtn for the particular science instrument.\n Typically, these routines search in the pysat provided path,\n pysat_data_dir/platform/name/tag/inst_id, where pysat_data_dir is set by\n pysat.utils.set_data_dir(path=path).\n\n \"\"\"\n\n # Let interested users know pysat is searching for\n info_str = '{platform} {name} {tag} {inst_id}'.format(\n **self.inst_info)\n info_str = \" \".join((\"pysat is searching for\", info_str, \"files.\"))\n info_str = \" \".join(info_str.split()) # Remove duplicate whitespace\n logger.info(info_str)\n\n # Check all potential directory locations for files.\n # Stop as soon as we find some.\n for path in self.data_paths:\n list_files_rtn = self.inst_info['inst']._list_files_rtn\n kwarg_inputs = self.inst_info['inst'].kwargs['list_files']\n new_files = list_files_rtn(tag=self.inst_info['tag'],\n inst_id=self.inst_info['inst_id'],\n data_path=path,\n format_str=self.file_format,\n **kwarg_inputs)\n\n # Check if list_files_rtn is actually returning filename or a\n # dict to be passed to filename creator function.\n if isinstance(new_files, dict):\n self.list_files_creator = partial(general.filename_creator,\n **new_files)\n\n # Instrument iteration methods require a date range.\n self.start_date = filter_datetime_input(new_files['start_date'])\n self.stop_date = filter_datetime_input(new_files['stop_date'])\n\n # To really support iteration, we may need to create a generator\n # function that'll create a fake list of files as needed.\n # It would have to function in place of self.files. Is\n # there truly a point to this?\n return\n\n # Ensure the name of returned Series is None for consistency\n new_files.name = None\n\n # If we find some files, this is the one directory we store.\n # If I don't remove the directory paths then loading by filename\n # becomes more of a challenge. Plus, more memory to store, more\n # difficult for a human to parse when browsing a list, etc. The\n # approach here provides for most of the potential functionality\n # of multiple directories while still leaving the 'single' directory\n # focus and features of the original pysat intact.\n if not new_files.empty:\n self.data_path = path\n new_files = self._remove_data_dir_path(new_files)\n break\n\n # Feedback to info on number of files located\n logger.info('Found {:d} local files.'.format(len(new_files)))\n\n if not new_files.empty:\n # Sort files to ensure they are in order\n new_files = new_files.sort_index()\n elif pysat.params['warn_empty_file_list']:\n # Warn user if no files found, if pysat.param set\n pstrs = \"\\n\".join(self.data_paths)\n estr = \"\".join((\"Unable to find any files that match the supplied \",\n \"template: \", self.file_format, \"\\n\",\n \"In the following directories: \\n\", pstrs))\n logger.warning(estr)\n\n # Attach Series of files to the class object\n self._attach_files(new_files)\n\n # Store to disk, if enabled for this class\n self._store()\n return\n\n def set_top_level_directory(self, path):\n \"\"\"Sets top-level data directory.\n\n Sets a valid self.data_path using provided top-level directory\n path and the associated pysat subdirectories derived from the\n directory_format attribute as stored in self.sub_dir_path.\n\n Parameters\n ----------\n path : str\n Top-level path to use when looking for files. Must be in\n pysat.params['data_dirs']\n\n Note\n ----\n If there are Instrument files on the system under a top-level\n directory other than `path`, then, under certain conditions,\n self.data_path may be later updated by the object to point back\n to the directory with files.\n\n \"\"\"\n\n if path not in pysat.params['data_dirs']:\n estr = \"Supplied path not in `pysat.params['data_dirs']`\"\n raise ValueError(estr)\n else:\n self.data_path = os.path.join(path, self.sub_dir_path)\n\n return\n\n def get_new(self):\n \"\"\"List new files since last recorded file state.\n\n Returns\n -------\n pandas.Series\n A datetime-index Series of all new fileanmes since the last known\n change to the files.\n\n Note\n ----\n pysat stores filenames in the user_home/.pysat directory. Filenames are\n stored if there is a change and either update_files is True at\n instrument object level or files.refresh() is called.\n\n \"\"\"\n\n # Refresh file series\n self.refresh()\n\n # Load current and previous set of files\n new_file_series = self._load(update_path=False)\n old_file_series = self._load(prev_version=True, update_path=False)\n\n # Select files that are in the new series and not the old series\n new_files = new_file_series[-new_file_series.isin(old_file_series)]\n\n return new_files\n\n def get_index(self, fname):\n \"\"\"Return index for a given filename.\n\n Parameters\n ----------\n fname : string\n Filename for the desired time index\n\n Note\n ----\n If fname not found in the file information already attached\n to the instrument.files instance, then a files.refresh() call\n is made.\n\n \"\"\"\n\n idx, = np.where(fname == self.files)\n if len(idx) == 0:\n # Filename not in index, try reloading files from disk\n self.refresh()\n idx, = np.where(fname == np.array(self.files))\n\n if len(idx) == 0:\n raise ValueError(' '.join(('Could not find \"{:}\"'.format(fname),\n 'in available file list. Valid',\n 'Example:', self.files.iloc[0])))\n\n # Return a scalar rather than array - otherwise introduces array to\n # index warnings.\n return idx[0]\n\n def get_file_array(self, start, stop):\n \"\"\"Return a list of filenames between and including start and stop.\n\n Parameters\n ----------\n start: array_like or single string\n filenames for start of returned filelist\n stop: array_like or single string\n filenames inclusive of the ending of list provided by the stop time\n\n Returns\n -------\n files : list\n A list of filenames between and including start and stop times\n over all intervals.\n\n Note\n ----\n `start` and `stop` must be of the same type: both array-like or both\n strings\n\n \"\"\"\n\n # Selection is treated differently if start/stop are iterable or not\n # so we convert them to a list as needed for consistency.\n starts = pysat.utils.listify(start)\n stops = pysat.utils.listify(stop)\n\n files = []\n for (sta, stp) in zip(starts, stops):\n id1 = self.get_index(sta)\n id2 = self.get_index(stp)\n files.extend(self.files.iloc[id1:(id2 + 1)])\n\n return files\n\n @classmethod\n def from_os(cls, data_path=None, format_str=None,\n two_digit_year_break=None, delimiter=None):\n \"\"\"\n Produces a list of files and and formats it for Files class.\n\n Parameters\n ----------\n data_path : string\n Top level directory to search files for. This directory\n is provided by pysat to the instrument_module.list_files\n functions as data_path.\n format_str : string with python format codes\n Provides the naming pattern of the instrument files and the\n locations of date information so an ordered list may be produced.\n Supports 'year', 'month', 'day', 'hour', 'minute', 'second',\n 'version', 'revision', and 'cycle'\n Ex: 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf'\n two_digit_year_break : int or None\n If filenames only store two digits for the year, then\n '1900' will be added for years >= two_digit_year_break\n and '2000' will be added for years < two_digit_year_break.\n If None, then four-digit years are assumed. (default=None)\n delimiter : string or NoneType\n Delimiter string upon which files will be split (e.g., '.'). If\n None, filenames will be parsed presuming a fixed width format.\n (default=None)\n\n Returns\n -------\n pds.Series\n A Series of filenames indexed by time. See\n `pysat.utils.files.process_parsed_filenames` for details.\n\n Note\n ----\n Requires fixed_width or delimited filename\n\n Does not produce a Files instance, but the proper output from\n instrument_module.list_files method.\n\n The '?' may be used to indicate a set number of spaces for a variable\n part of the name that need not be extracted.\n 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v??.cdf'\n\n The 'day' format keyword may be used to specify either day of month\n (if month is included) or day of year.\n\n \"\"\"\n\n if data_path is None:\n raise ValueError(\" \".join((\"Must supply instrument directory path\",\n \"(dir_path)\")))\n\n # Parse format string to figure out which search string should be used\n # to identify files in the filesystem. Different option required if\n # filename is delimited\n wildcard = False if delimiter is None else True\n search_dict = futils.construct_searchstring_from_format(\n format_str, wildcard=wildcard)\n search_str = search_dict['search_string']\n\n # Perform the local file search\n files = futils.search_local_system_formatted_filename(data_path,\n search_str)\n\n # Use the file list to extract the information. Pull data from the\n # areas identified by format_str\n if delimiter is None:\n stored = futils.parse_fixed_width_filenames(files, format_str)\n else:\n stored = futils.parse_delimited_filenames(files, format_str,\n delimiter)\n\n # Process the parsed filenames and return a properly formatted Series\n return futils.process_parsed_filenames(stored, two_digit_year_break)\n" ]
[ [ "pandas.Series", "pandas.read_csv", "numpy.dtype", "numpy.all", "numpy.array", "numpy.where", "numpy.unique" ] ]
betterenvi/QA-rank
[ "9e709e5fd85212145c98a3bf3cd5007eb76e1ffc" ]
[ "tmp.py" ]
[ "import sys, os, collections, copy\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Series\r\n\r\ndata_fn = 'data/WikiQA-train.tsv'\r\nX = pd.read_csv(data_fn, sep='\\t', header=0, dtype=str, skiprows=None, na_values='?', keep_default_na=False)\r\n" ]
[ [ "pandas.read_csv" ] ]
ys2899/DCAR
[ "154cf46fd45dec8639efb6aeb348b25db32c497b" ]
[ "data/model/dcrnn_model.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nimport pdb\n\nfrom tensorflow.contrib import legacy_seq2seq\nfrom lib.metrics import masked_mae_loss\nfrom model.dcrnn_cell import DCGRUCell\n\n\nclass DCRNNARModel(object):\n def __init__(self, is_training, batch_size, scaler, adj_mx, **model_kwargs):\n # Scaler for data normalization.\n self._scaler = scaler\n\n # Train and loss\n self._loss = None\n self._mae = None\n self._train_op = None\n\n max_diffusion_step = int(model_kwargs.get('max_diffusion_step', 2))\n cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))\n filter_type = model_kwargs.get('filter_type', 'laplacian')\n horizon = int(model_kwargs.get('horizon', 1))\n max_grad_norm = float(model_kwargs.get('max_grad_norm', 5.0))\n num_nodes = int(model_kwargs.get('num_nodes', 1))\n num_rnn_layers = int(model_kwargs.get('num_rnn_layers', 1))\n rnn_units = int(model_kwargs.get('rnn_units'))\n seq_len = int(model_kwargs.get('seq_len'))\n use_curriculum_learning = bool(model_kwargs.get('use_curriculum_learning', False))\n input_dim = int(model_kwargs.get('input_dim', 1))\n\n # output_dim = int(model_kwargs.get('output_dim', 1))\n # Input (batch_size, timesteps, num_sensor, input_dim)\n\n self._inputs = tf.placeholder(tf.float32, shape=(batch_size, seq_len, num_nodes, input_dim), name='inputs')\n # Labels: (batch_size, timesteps, num_sensor, input_dim), same format with input except the temporal dimension.\n\n self._labels = tf.placeholder(tf.float32, shape=(batch_size, horizon, num_nodes, input_dim), name='labels')\n self.train_inputs = tf.concat((self._inputs, self._labels), axis=1)\n self._targets = tf.slice(self.train_inputs, [0, 0, 0, 0], [batch_size, horizon+seq_len-1, num_nodes, input_dim], name='targets')\n\n cell = DCGRUCell(rnn_units, adj_mx, max_diffusion_step=max_diffusion_step, num_nodes=num_nodes,\n filter_type=filter_type)\n\n # We temporarily change the num_proj from output_dim to input_dim.\n cell_with_projection = DCGRUCell(rnn_units, adj_mx, max_diffusion_step=max_diffusion_step, num_nodes=num_nodes,\n num_proj=input_dim, filter_type=filter_type)\n\n decoding_cells = [cell] * (num_rnn_layers - 1) + [cell_with_projection]\n decoding_cells = tf.contrib.rnn.MultiRNNCell(decoding_cells, state_is_tuple=True)\n\n global_step = tf.train.get_or_create_global_step()\n\n with tf.variable_scope('DCRNN_SEQ'):\n\n train_inputs = tf.unstack(self.train_inputs, axis=1)\n\n def _loop_function(prev, i):\n # To do: the probability of using the previous is increasing when going towards the\n # end of the sequence.\n if is_training:\n # Return either the model's prediction or the previous ground truth in training.\n if use_curriculum_learning:\n c = tf.random_uniform((), minval=0.0, maxval=1.0)\n threshold = self._compute_sampling_threshold(global_step, cl_decay_steps)\n if i<seq_len:\n result = train_inputs[i]\n else:\n result = tf.cond(tf.less(c, threshold), lambda: train_inputs[i], lambda: prev)\n else:\n result = train_inputs[i]\n else:\n ## Return the prediction of the model in testing.\n if i < seq_len:\n result = train_inputs[i]\n else:\n result = prev\n return result\n\n initial_state = (tf.zeros(shape=(64, 13248)), tf.zeros(shape=(64, 13248)))\n state = initial_state\n outputs = []\n prev = None\n\n for i, inp in enumerate(train_inputs):\n with tf.variable_scope(\"loop_function\", reuse=True):\n if prev is not None:\n inp = _loop_function(prev, i)\n if i > 0:\n ## To Do: need to check the variable scope.\n tf.get_variable_scope().reuse_variables()\n\n output, state = decoding_cells(inp, state)\n output = tf.reshape(output, (batch_size, num_nodes, 2))\n outputs.append(output)\n prev = output\n\n outputs = tf.stack(outputs[:-1], axis=1)\n self._outputs = tf.reshape(outputs, (batch_size, horizon + seq_len - 1, num_nodes, input_dim), name='outputs')\n self._merged = tf.summary.merge_all()\n\n @staticmethod\n def _compute_sampling_threshold(global_step, k):\n \"\"\"\n Computes the sampling probability for scheduled sampling using inverse sigmoid.\n :param global_step:\n :param k:\n :return:\n \"\"\"\n return tf.cast(k / (k + tf.exp(global_step / k)), tf.float32)\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def labels(self):\n return self._labels\n\n @property\n def targets(self):\n return self._targets\n\n @property\n def loss(self):\n return self._loss\n\n @property\n def mae(self):\n return self._mae\n\n @property\n def merged(self):\n return self._merged\n\n @property\n def outputs(self):\n return self._outputs\n\n\n# class DCRNNModel(object):\n# def __init__(self, is_training, batch_size, scaler, adj_mx, **model_kwargs):\n# # Scaler for data normalization.\n# self._scaler = scaler\n#\n# # Train and loss\n# self._loss = None\n# self._mae = None\n# self._train_op = None\n#\n# max_diffusion_step = int(model_kwargs.get('max_diffusion_step', 2))\n# cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))\n# filter_type = model_kwargs.get('filter_type', 'laplacian')\n# horizon = int(model_kwargs.get('horizon', 1))\n# max_grad_norm = float(model_kwargs.get('max_grad_norm', 5.0))\n# num_nodes = int(model_kwargs.get('num_nodes', 1))\n# num_rnn_layers = int(model_kwargs.get('num_rnn_layers', 1))\n# rnn_units = int(model_kwargs.get('rnn_units'))\n# seq_len = int(model_kwargs.get('seq_len'))\n# use_curriculum_learning = bool(model_kwargs.get('use_curriculum_learning', False))\n# input_dim = int(model_kwargs.get('input_dim', 1))\n# output_dim = int(model_kwargs.get('output_dim', 1))\n#\n# # Input (batch_size, timesteps, num_sensor, input_dim)\n# self._inputs = tf.placeholder(tf.float32, shape=(batch_size, seq_len, num_nodes, input_dim), name='inputs')\n# # Labels: (batch_size, timesteps, num_sensor, input_dim), same format with input except the temporal dimension.\n# self._labels = tf.placeholder(tf.float32, shape=(batch_size, horizon, num_nodes, input_dim), name='labels')\n#\n# GO_SYMBOL = tf.zeros(shape=(batch_size, num_nodes * input_dim))\n#\n# cell = DCGRUCell(rnn_units, adj_mx, max_diffusion_step=max_diffusion_step, num_nodes=num_nodes,\n# filter_type=filter_type)\n# cell_with_projection = DCGRUCell(rnn_units, adj_mx, max_diffusion_step=max_diffusion_step, num_nodes=num_nodes,\n# num_proj=output_dim, filter_type=filter_type)\n# encoding_cells = [cell] * num_rnn_layers\n# decoding_cells = [cell] * (num_rnn_layers - 1) + [cell_with_projection]\n# encoding_cells = tf.contrib.rnn.MultiRNNCell(encoding_cells, state_is_tuple=True)\n# decoding_cells = tf.contrib.rnn.MultiRNNCell(decoding_cells, state_is_tuple=True)\n#\n# global_step = tf.train.get_or_create_global_step()\n# # Outputs: (batch_size, timesteps, num_nodes, output_dim)\n# with tf.variable_scope('DCRNN_SEQ'):\n# inputs = tf.unstack(tf.reshape(self._inputs, (batch_size, seq_len, num_nodes * input_dim)), axis=1)\n# labels = tf.unstack(\n# tf.reshape(self._labels[..., :output_dim], (batch_size, horizon, num_nodes * output_dim)), axis=1)\n# labels.insert(0, GO_SYMBOL)\n#\n# def _loop_function(prev, i):\n# if is_training:\n# # Return either the model's prediction or the previous ground truth in training.\n# if use_curriculum_learning:\n# c = tf.random_uniform((), minval=0, maxval=1.)\n# threshold = self._compute_sampling_threshold(global_step, cl_decay_steps)\n# result = tf.cond(tf.less(c, threshold), lambda: labels[i], lambda: prev)\n# else:\n# result = labels[i]\n# else:\n# # Return the prediction of the model in testing.\n# result = prev\n# return result\n#\n# _, enc_state = tf.contrib.rnn.static_rnn(encoding_cells, inputs, dtype=tf.float32)\n# outputs, final_state = legacy_seq2seq.rnn_decoder(labels, enc_state, decoding_cells,\n# loop_function=_loop_function)\n#\n#\n# # Project the output to output_dim.\n# outputs = tf.stack(outputs[:-1], axis=1)\n# self._outputs = tf.reshape(outputs, (batch_size, horizon, num_nodes, output_dim), name='outputs')\n# self._merged = tf.summary.merge_all()\n#\n# @staticmethod\n# def _compute_sampling_threshold(global_step, k):\n# \"\"\"\n# Computes the sampling probability for scheduled sampling using inverse sigmoid.\n# :param global_step:\n# :param k:\n# :return:\n# \"\"\"\n# return tf.cast(k / (k + tf.exp(global_step / k)), tf.float32)\n#\n# @property\n# def inputs(self):\n# return self._inputs\n#\n# @property\n# def labels(self):\n# return self._labels\n#\n# @property\n# def loss(self):\n# return self._loss\n#\n# @property\n# def mae(self):\n# return self._mae\n#\n# @property\n# def merged(self):\n# return self._merged\n#\n# @property\n# def outputs(self):\n# return self._outputs\n" ]
[ [ "tensorflow.placeholder", "tensorflow.zeros", "tensorflow.stack", "tensorflow.reshape", "tensorflow.summary.merge_all", "tensorflow.unstack", "tensorflow.contrib.rnn.MultiRNNCell", "tensorflow.variable_scope", "tensorflow.random_uniform", "tensorflow.exp", "tensorflow.get_variable_scope", "tensorflow.less", "tensorflow.slice", "tensorflow.train.get_or_create_global_step", "tensorflow.concat" ] ]
wang-chen/graph-action-recognition
[ "319a5287c3fb58f233a8b56ed70f5be94703aa61" ]
[ "models/mlp.py" ]
[ "#!/usr/bin/env python3\n\nimport torch\nimport torch.nn as nn\n\n\nclass MLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.feat1 = nn.Sequential(nn.Flatten(), nn.Linear(50*5*5, 32*5*5), nn.ReLU())\n self.feat2 = nn.Sequential(nn.Linear(32*5*5, 32*12), nn.ReLU())\n self.linear = nn.Sequential(nn.Linear(32*12, 13))\n\n def forward(self, x):\n x = self.feat1(x)\n x = self.feat2(x)\n return self.linear(x)\n" ]
[ [ "torch.nn.ReLU", "torch.nn.Linear", "torch.nn.Flatten" ] ]
jkhenning/ignite
[ "2485fd42c6ef4d3e97fd606a52f8c6e5d940357e" ]
[ "tests/ignite/distributed/utils/test_native.py" ]
[ "import os\n\nimport pytest\nimport torch\nimport torch.distributed as dist\n\nimport ignite.distributed as idist\nfrom ignite.distributed.utils import has_native_dist_support\nfrom tests.ignite.distributed.utils import (\n _test_distrib_all_gather,\n _test_distrib_all_reduce,\n _test_distrib_barrier,\n _test_distrib_broadcast,\n _test_distrib_config,\n _test_distrib_one_rank_only,\n _test_distrib_one_rank_only_with_engine,\n _test_sync,\n)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\ndef test_native_distrib_single_node_launch_tool_gloo(local_rank, world_size):\n import os\n from datetime import timedelta\n\n timeout = timedelta(seconds=20)\n rank = local_rank\n os.environ[\"RANK\"] = f\"{rank}\"\n\n idist.initialize(\"gloo\", timeout=timeout)\n _test_distrib_config(local_rank, \"gloo\", world_size, \"cpu\", rank)\n idist.finalize()\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_native_distrib_single_node_launch_tool_nccl(local_rank, world_size):\n import os\n\n rank = local_rank\n os.environ[\"RANK\"] = f\"{rank}\"\n\n idist.initialize(\"nccl\")\n _test_distrib_config(local_rank, \"nccl\", world_size, \"cuda\", rank)\n idist.finalize()\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](\"WORLD_SIZE\" in os.environ, reason=\"Skip if launched as multiproc\")\ndef test_native_distrib_single_node_spawn_gloo():\n\n from datetime import timedelta\n\n timeout = timedelta(seconds=20)\n\n world_size = 4\n\n idist.spawn(\n \"gloo\", _test_distrib_config, args=(\"gloo\", world_size, \"cpu\"), nproc_per_node=world_size, timeout=timeout\n )\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](\"WORLD_SIZE\" in os.environ, reason=\"Skip if launched as multiproc\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_native_distrib_single_node_spawn_nccl():\n world_size = torch.cuda.device_count()\n\n idist.spawn(\"nccl\", _test_distrib_config, args=(\"nccl\", world_size, \"cuda\"), nproc_per_node=world_size)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\ndef test_sync_as_native_gloo(distributed_context_single_node_gloo):\n from ignite.distributed.comp_models.native import _NativeDistModel\n\n _test_sync(_NativeDistModel)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_sync_as_native_nccl(distributed_context_single_node_nccl):\n from ignite.distributed.comp_models.native import _NativeDistModel\n\n _test_sync(_NativeDistModel)\n\n\ndef _test_idist_methods_in_native_context(backend, device, local_rank):\n # We explicitly set _model as _SerialModel\n # then call idist.* methods and check that they give correct values\n from ignite.distributed.utils import _set_model, _SerialModel\n\n _set_model(_SerialModel())\n\n ws = dist.get_world_size()\n rank = dist.get_rank()\n _test_distrib_config(local_rank, backend=backend, ws=ws, true_device=device, rank=rank)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\ndef test_idist_methods_in_native_gloo_context(distributed_context_single_node_gloo):\n local_rank = distributed_context_single_node_gloo[\"local_rank\"]\n _test_idist_methods_in_native_context(\"gloo\", \"cpu\", local_rank)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_idist_methods_in_native_nccl_context(distributed_context_single_node_nccl):\n local_rank = distributed_context_single_node_nccl[\"local_rank\"]\n _test_idist_methods_in_native_context(\"nccl\", \"cuda\", local_rank)\n\n\ndef _test_idist_methods_in_native_context_set_local_rank(backend, device, local_rank):\n # We explicitly set _model as _SerialModel\n # then call idist.* methods and check that they give correct values\n from ignite.distributed.utils import _set_model, _SerialModel\n\n _set_model(_SerialModel())\n\n lrank = int(os.environ[\"LOCAL_RANK\"])\n del os.environ[\"LOCAL_RANK\"]\n\n ws = dist.get_world_size()\n rank = dist.get_rank()\n\n idist.set_local_rank(local_rank)\n\n _test_distrib_config(local_rank=local_rank, backend=backend, ws=ws, true_device=device, rank=rank)\n\n os.environ[\"LOCAL_RANK\"] = str(lrank)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\ndef test_idist_methods_in_native_gloo_context_set_local_rank(distributed_context_single_node_gloo):\n local_rank = distributed_context_single_node_gloo[\"local_rank\"]\n _test_idist_methods_in_native_context_set_local_rank(\"gloo\", \"cpu\", local_rank)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_idist_methods_in_native_nccl_context_set_local_rank(distributed_context_single_node_nccl):\n local_rank = distributed_context_single_node_nccl[\"local_rank\"]\n _test_idist_methods_in_native_context_set_local_rank(\"nccl\", \"cuda\", local_rank)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_idist_all_reduce_nccl(distributed_context_single_node_nccl):\n\n device = f\"cuda:{distributed_context_single_node_nccl['local_rank']}\"\n _test_distrib_all_reduce(device)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\ndef test_idist_all_reduce_gloo(distributed_context_single_node_gloo):\n\n device = \"cpu\"\n _test_distrib_all_reduce(device)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_idist_all_gather_nccl(distributed_context_single_node_nccl):\n\n device = f\"cuda:{distributed_context_single_node_nccl['local_rank']}\"\n _test_distrib_all_gather(device)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\ndef test_idist_all_gather_gloo(distributed_context_single_node_gloo):\n\n device = \"cpu\"\n _test_distrib_all_gather(device)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_idist_broadcast_nccl(distributed_context_single_node_nccl):\n\n device = f\"cuda:{distributed_context_single_node_nccl['local_rank']}\"\n _test_distrib_broadcast(device)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\ndef test_idist_broadcast_gloo(distributed_context_single_node_gloo):\n\n device = \"cpu\"\n _test_distrib_broadcast(device)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_idist_barrier_nccl(distributed_context_single_node_nccl):\n\n device = f\"cuda:{distributed_context_single_node_nccl['local_rank']}\"\n _test_distrib_barrier(device)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\ndef test_idist_barrier_gloo(distributed_context_single_node_gloo):\n\n device = \"cpu\"\n _test_distrib_barrier(device)\n\n\ndef _test_idist_methods_overhead(ok_factor):\n import time\n\n n = 100000\n m = 5\n\n t2 = 0.0\n t1 = 0.0\n for j in range(m):\n start = time.time()\n for _ in range(n):\n _ = dist.get_world_size()\n _ = dist.get_rank()\n elapsed = time.time() - start\n t2 += elapsed / n / m\n\n start = time.time()\n for _ in range(n):\n _ = idist.get_world_size()\n _ = idist.get_rank()\n elapsed = time.time() - start\n t1 += elapsed / n / m\n\n overhead_factor = t1 / t2\n assert overhead_factor < ok_factor, f\"{overhead_factor} vs {ok_factor} | {t2} vs {t1}\"\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](\n not torch.cuda.is_available(), reason=\"Do not want to run this test on Github or Travis, but CircleCI\"\n)\ndef test_idist_methods_overhead_gloo(distributed_context_single_node_gloo):\n _test_idist_methods_overhead(2.5)\n\n idist.sync()\n from ignite.distributed.utils import _model\n from ignite.distributed.comp_models.native import _NativeDistModel\n\n assert isinstance(_model, _NativeDistModel)\n\n _test_idist_methods_overhead(1.7)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_idist_methods_overhead_nccl(distributed_context_single_node_nccl):\n _test_idist_methods_overhead(2.5)\n\n idist.sync()\n from ignite.distributed.utils import _model\n from ignite.distributed.comp_models.native import _NativeDistModel\n\n assert isinstance(_model, _NativeDistModel)\n\n _test_idist_methods_overhead(1.7)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\ndef test_idist_one_rank_only_gloo(distributed_context_single_node_gloo):\n device = \"cpu\"\n _test_distrib_one_rank_only(device=device)\n _test_distrib_one_rank_only_with_engine(device=device)\n\n\[email protected]\[email protected](not has_native_dist_support, reason=\"Skip if no native dist support\")\[email protected](torch.cuda.device_count() < 1, reason=\"Skip if no GPU\")\ndef test_idist_one_rank_only_nccl(local_rank, distributed_context_single_node_nccl):\n device = f\"cuda:{local_rank}\"\n _test_distrib_one_rank_only(device=device)\n _test_distrib_one_rank_only_with_engine(device=device)\n" ]
[ [ "torch.distributed.get_rank", "torch.cuda.is_available", "torch.distributed.get_world_size", "torch.cuda.device_count" ] ]
ediphy-dwild/gpytorch
[ "559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a" ]
[ "gpytorch/utils/linear_cg.py" ]
[ "import torch\nfrom .. import settings\n\n\ndef _default_preconditioner(x):\n return x.clone()\n\n\ndef linear_cg(matmul_closure, rhs, n_tridiag=0, tolerance=1e-6, eps=1e-20, max_iter=None,\n initial_guess=None, preconditioner=None):\n \"\"\"\n Implements the linear conjugate gradients method for (approximately) solving systems of the form\n\n lhs result = rhs\n\n for positive definite and symmetric matrices.\n\n Args:\n - matmul_closure - a function which performs a left matrix multiplication with lhs_mat\n - rhs - the right-hand side of the equation\n - n_tridiag - returns a tridiagonalization of the first n_tridiag columns of rhs\n - tolerance - stop the solve when the max residual is less than this\n - eps - noise to add to prevent division by zero\n - max_iter - the maximum number of CG iterations\n - initial_guess - an initial guess at the solution `result`\n - precondition_closure - a functions which left-preconditions a supplied vector\n\n Returns:\n result - a solution to the system (if n_tridiag is 0)\n result, tridiags - a solution to the system, and corresponding tridiagonal matrices (if n_tridiag > 0)\n \"\"\"\n # Unsqueeze, if necesasry\n is_vector = rhs.ndimension() == 1\n if is_vector:\n rhs = rhs.unsqueeze(-1)\n\n # Some default arguments\n if max_iter is None:\n max_iter = settings.max_cg_iterations.value()\n if initial_guess is None:\n initial_guess = rhs.new(rhs.size()).zero_()\n if preconditioner is None:\n preconditioner = _default_preconditioner\n\n # Check matmul_closure object\n if torch.is_tensor(matmul_closure):\n matmul_closure = matmul_closure.matmul\n elif not callable(matmul_closure):\n raise RuntimeError('matmul_closure must be a tensor, or a callable object!')\n\n # Get some constants\n n_rows = rhs.size(-2)\n n_iter = min(max_iter, n_rows)\n\n # result <- x_{0}\n result = initial_guess\n\n # residual: residual_{0} = b_vec - lhs x_{0}\n residual = rhs - matmul_closure(result)\n\n # Sometime we're lucky and the preconditioner solves the system right away\n residual_norm = residual.norm(2, dim=-2)\n if not torch.sum(residual_norm > tolerance):\n n_iter = 0 # Skip the iteration!\n\n # Otherwise, let's define precond_residual and curr_conjugate_vec\n else:\n # precon_residual{0} = M^-1 residual_{0}\n precond_residual = preconditioner(residual)\n curr_conjugate_vec = precond_residual\n residual_inner_prod = precond_residual.mul(residual).sum(-2, keepdim=True)\n\n # Define storage matrices\n mul_storage = residual.new(residual.size())\n alpha = residual.new(rhs.size(0), 1, rhs.size(-1)) if rhs.ndimension() == 3 else residual.new(1, rhs.size(-1))\n beta = alpha.new(alpha.size())\n\n # Define tridiagonal matrices, if applicable\n if n_tridiag:\n if rhs.ndimension() == 3:\n t_mat = residual.new(n_iter, n_iter, rhs.size(0), n_tridiag).zero_()\n alpha_reciprocal = alpha.new(rhs.size(0), n_tridiag)\n else:\n t_mat = residual.new(n_iter, n_iter, n_tridiag).zero_()\n alpha_reciprocal = alpha.new(n_tridiag)\n\n prev_alpha_reciprocal = alpha.new(alpha_reciprocal.size())\n prev_beta = alpha.new(alpha_reciprocal.size())\n\n # Start the iteration\n for k in range(n_iter):\n # Get next alpha\n # alpha_{k} = (residual_{k-1}^T precon_residual{k-1}) / (p_vec_{k-1}^T mat p_vec_{k-1})\n mvms = matmul_closure(curr_conjugate_vec)\n torch.mul(curr_conjugate_vec, mvms, out=mul_storage)\n torch.sum(mul_storage, -2, keepdim=True, out=alpha)\n alpha.add_(eps)\n torch.div(residual_inner_prod, alpha, out=alpha)\n\n # Update result\n # result_{k} = result_{k-1} + alpha_{k} p_vec_{k-1}\n torch.addcmul(result, alpha, curr_conjugate_vec, out=result)\n\n # Update residual\n # residual_{k} = residual_{k-1} - alpha_{k} mat p_vec_{k-1}\n torch.addcmul(residual, -1, alpha, mvms, out=residual)\n\n # If residual are sufficiently small, then exit loop\n # Alternatively, exit if this is our last iteration\n torch.norm(residual, 2, dim=-2, out=residual_norm)\n if not (torch.sum(residual_norm > tolerance)) and not n_tridiag:\n break\n\n # Update precond_residual\n # precon_residual{k} = M^-1 residual_{k}\n precond_residual = preconditioner(residual)\n\n # beta_{k} = (precon_residual{k}^T r_vec_{k}) / (precon_residual{k-1}^T r_vec_{k-1})\n residual_inner_prod.add_(eps)\n torch.reciprocal(residual_inner_prod, out=beta)\n torch.mul(residual, precond_residual, out=mul_storage)\n torch.sum(mul_storage, -2, keepdim=True, out=residual_inner_prod)\n beta.mul_(residual_inner_prod)\n\n # Update curr_conjugate_vec\n # curr_conjugate_vec_{k} = precon_residual{k} + beta_{k} curr_conjugate_vec_{k-1}\n curr_conjugate_vec.mul_(beta).add_(precond_residual)\n\n # Update tridiagonal matrices, if applicable\n if n_tridiag:\n alpha_tridiag = alpha.squeeze_(-2).narrow(-1, 0, n_tridiag)\n beta_tridiag = beta.squeeze_(-2).narrow(-1, 0, n_tridiag)\n torch.reciprocal(alpha_tridiag, out=alpha_reciprocal)\n\n if k == 0:\n t_mat[k, k].copy_(alpha_reciprocal)\n else:\n torch.addcmul(alpha_reciprocal, prev_beta, prev_alpha_reciprocal, out=t_mat[k, k])\n torch.mul(prev_beta.sqrt_(), prev_alpha_reciprocal, out=t_mat[k, k - 1])\n t_mat[k - 1, k].copy_(t_mat[k, k - 1])\n\n prev_alpha_reciprocal.copy_(alpha_reciprocal)\n prev_beta.copy_(beta_tridiag)\n\n if is_vector:\n result = result.squeeze(-1)\n\n if n_tridiag:\n if rhs.ndimension() == 3:\n return result, t_mat.permute(3, 2, 0, 1).contiguous()\n else:\n return result, t_mat.permute(2, 0, 1).contiguous()\n else:\n return result\n" ]
[ [ "torch.sum", "torch.addcmul", "torch.reciprocal", "torch.div", "torch.mul", "torch.norm", "torch.is_tensor" ] ]
Madlhawa/Real-time-Edge-analytics
[ "9e3e7be1c32f6d33f81ffe27c7eed63f8bbb6f39" ]
[ "Past data/GetPastData.py" ]
[ "# Import packages\nimport pandas as pd\nfrom datetime import datetime\nimport numpy as np\n\n#Reading predicted data and changing date column data type\npdata = pd.read_csv('/home/pi/LABS/Asingment/Real-time-Edge-analytics/PredictionDataset.csv', skiprows=0)\npdata['Date'] = pd.to_datetime(pdata['Date'])\n\n#Selecting data according to date range\nmask = (pdata['Date'] > '2013-6-1') & (pdata['Date'] < '2014-6-10')\npoutput = pdata.loc[mask]\n\nfor index, row in poutput.iterrows():\n print(row['Date'], row['PredictionUntilThisMonth'])\n \n \n\n\n\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime" ] ]
bvanaken/FARM
[ "09767092457e73860c3a604b5060562c2004f03d" ]
[ "farm/modeling/language_model.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. Team and deepset Team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Acknowledgements: Many of the modeling parts here come from the great transformers repository: https://github.com/huggingface/transformers.\nThanks for the great work! \"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport logging\nimport os\nimport io\nfrom pathlib import Path\nfrom collections import OrderedDict\n\nfrom dotmap import DotMap\nfrom tqdm import tqdm\nimport copy\nimport numpy as np\nimport torch\nfrom torch import nn\n\nlogger = logging.getLogger(__name__)\n\nfrom transformers.modeling_bert import BertModel, BertConfig\nfrom transformers.modeling_roberta import RobertaModel, RobertaConfig\nfrom transformers.modeling_xlnet import XLNetModel, XLNetConfig\nfrom transformers.modeling_albert import AlbertModel, AlbertConfig\nfrom transformers.modeling_xlm_roberta import XLMRobertaModel, XLMRobertaConfig\nfrom transformers.modeling_distilbert import DistilBertModel, DistilBertConfig\nfrom transformers.modeling_electra import ElectraModel, ElectraConfig\nfrom transformers.modeling_camembert import CamembertModel, CamembertConfig\nfrom transformers.modeling_utils import SequenceSummary\nfrom transformers.tokenization_bert import load_vocab\n\nfrom farm.modeling import wordembedding_utils\nfrom farm.modeling.wordembedding_utils import s3e_pooling\n\n# These are the names of the attributes in various model configs which refer to the number of dimensions\n# in the output vectors\nOUTPUT_DIM_NAMES = [\"dim\", \"hidden_size\", \"d_model\"]\n\n\nclass LanguageModel(nn.Module):\n \"\"\"\n The parent class for any kind of model that can embed language into a semantic vector space. Practically\n speaking, these models read in tokenized sentences and return vectors that capture the meaning of sentences\n or of tokens.\n \"\"\"\n\n subclasses = {}\n\n def __init_subclass__(cls, **kwargs):\n \"\"\" This automatically keeps track of all available subclasses.\n Enables generic load() or all specific LanguageModel implementation.\n \"\"\"\n super().__init_subclass__(**kwargs)\n cls.subclasses[cls.__name__] = cls\n\n def forward(self, input_ids, padding_mask, **kwargs):\n raise NotImplementedError\n\n @classmethod\n def from_scratch(cls, model_type, vocab_size):\n if model_type.lower() == \"bert\":\n model = Bert\n return model.from_scratch(vocab_size)\n\n @classmethod\n def load(cls, pretrained_model_name_or_path, n_added_tokens=0, language_model_class=None, **kwargs):\n \"\"\"\n Load a pretrained language model either by\n\n 1. specifying its name and downloading it\n 2. or pointing to the directory it is saved in.\n\n Available remote models:\n\n * bert-base-uncased\n * bert-large-uncased\n * bert-base-cased\n * bert-large-cased\n * bert-base-multilingual-uncased\n * bert-base-multilingual-cased\n * bert-base-chinese\n * bert-base-german-cased\n * roberta-base\n * roberta-large\n * xlnet-base-cased\n * xlnet-large-cased\n * xlm-roberta-base\n * xlm-roberta-large\n * albert-base-v2\n * albert-large-v2\n * distilbert-base-german-cased\n * distilbert-base-multilingual-cased\n * google/electra-small-discriminator\n * google/electra-base-discriminator\n * google/electra-large-discriminator\n\n See all supported model variations here: https://huggingface.co/models\n\n The appropriate language model class is inferred automatically from `pretrained_model_name_or_path`\n or can be manually supplied via `language_model_class`.\n\n :param pretrained_model_name_or_path: The path of the saved pretrained model or its name.\n :type pretrained_model_name_or_path: str\n :param language_model_class: (Optional) Name of the language model class to load (e.g. `Bert`)\n :type language_model_class: str\n\n \"\"\"\n config_file = Path(pretrained_model_name_or_path) / \"language_model_config.json\"\n if os.path.exists(config_file):\n # it's a local directory in FARM format\n config = json.load(open(config_file))\n language_model = cls.subclasses[config[\"name\"]].load(pretrained_model_name_or_path)\n else:\n if language_model_class is None:\n # it's transformers format (either from model hub or local)\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n if \"xlm\" in pretrained_model_name_or_path and \"roberta\" in pretrained_model_name_or_path:\n language_model_class = 'XLMRoberta'\n elif 'roberta' in pretrained_model_name_or_path:\n language_model_class = 'Roberta'\n elif 'camembert' in pretrained_model_name_or_path or 'umberto' in pretrained_model_name_or_path:\n language_model_class = \"Camembert\"\n elif 'albert' in pretrained_model_name_or_path:\n language_model_class = 'Albert'\n elif 'distilbert' in pretrained_model_name_or_path:\n language_model_class = 'DistilBert'\n elif 'bert' in pretrained_model_name_or_path:\n language_model_class = 'Bert'\n elif 'xlnet' in pretrained_model_name_or_path:\n language_model_class = 'XLNet'\n elif 'electra' in pretrained_model_name_or_path:\n language_model_class = 'Electra'\n elif \"word2vec\" in pretrained_model_name_or_path.lower() or \"glove\" in pretrained_model_name_or_path.lower():\n language_model_class = 'WordEmbedding_LM'\n\n if language_model_class:\n language_model = cls.subclasses[language_model_class].load(pretrained_model_name_or_path, **kwargs)\n else:\n language_model = None\n\n if not language_model:\n raise Exception(\n f\"Model not found for {pretrained_model_name_or_path}. Either supply the local path for a saved \"\n f\"model or one of bert/roberta/xlnet/albert/distilbert models that can be downloaded from remote. \"\n f\"Ensure that the model class name can be inferred from the directory name when loading a \"\n f\"Transformers' model. Here's a list of available models: \"\n f\"https://farm.deepset.ai/api/modeling.html#farm.modeling.language_model.LanguageModel.load\"\n )\n\n # resize embeddings in case of custom vocab\n if n_added_tokens != 0:\n # TODO verify for other models than BERT\n model_emb_size = language_model.model.resize_token_embeddings(new_num_tokens=None).num_embeddings\n vocab_size = model_emb_size + n_added_tokens\n logger.info(\n f\"Resizing embedding layer of LM from {model_emb_size} to {vocab_size} to cope with custom vocab.\")\n language_model.model.resize_token_embeddings(vocab_size)\n # verify\n model_emb_size = language_model.model.resize_token_embeddings(new_num_tokens=None).num_embeddings\n assert vocab_size == model_emb_size\n\n return language_model\n\n def get_output_dims(self):\n config = self.model.config\n for odn in OUTPUT_DIM_NAMES:\n if odn in dir(config):\n return getattr(config, odn)\n else:\n raise Exception(\"Could not infer the output dimensions of the language model\")\n\n def freeze(self, layers):\n \"\"\" To be implemented\"\"\"\n raise NotImplementedError()\n\n def unfreeze(self):\n \"\"\" To be implemented\"\"\"\n raise NotImplementedError()\n\n def save_config(self, save_dir):\n save_filename = Path(save_dir) / \"language_model_config.json\"\n with open(save_filename, \"w\") as file:\n setattr(self.model.config, \"name\", self.__class__.__name__)\n setattr(self.model.config, \"language\", self.language)\n string = self.model.config.to_json_string()\n file.write(string)\n\n def save(self, save_dir):\n \"\"\"\n Save the model state_dict and its config file so that it can be loaded again.\n\n :param save_dir: The directory in which the model should be saved.\n :type save_dir: str\n \"\"\"\n # Save Weights\n save_name = Path(save_dir) / \"language_model.bin\"\n model_to_save = (\n self.model.module if hasattr(self.model, \"module\") else self.model\n ) # Only save the model it-self\n torch.save(model_to_save.state_dict(), save_name)\n self.save_config(save_dir)\n\n @classmethod\n def _get_or_infer_language_from_name(cls, language, name):\n if language is not None:\n return language\n else:\n return cls._infer_language_from_name(name)\n\n @classmethod\n def _infer_language_from_name(cls, name):\n known_languages = (\n \"german\",\n \"english\",\n \"chinese\",\n \"indian\",\n \"french\",\n \"polish\",\n \"spanish\",\n \"multilingual\",\n )\n matches = [lang for lang in known_languages if lang in name]\n if \"camembert\" in name:\n language = \"french\"\n logger.info(\n f\"Automatically detected language from language model name: {language}\"\n )\n elif \"umberto\" in name:\n language = \"italian\"\n logger.info(\n f\"Automatically detected language from language model name: {language}\"\n )\n elif len(matches) == 0:\n language = \"english\"\n logger.warning(\n \"Could not automatically detect from language model name what language it is. \\n\"\n \"\\t We guess it's an *ENGLISH* model ... \\n\"\n \"\\t If not: Init the language model by supplying the 'language' param.\"\n )\n elif len(matches) > 1:\n logger.warning(\n \"Could not automatically detect from language model name what language it is.\\n\"\n f\"\\t Found multiple matches: {matches}\\n\"\n \"\\t Please init the language model by manually supplying the 'language' as a parameter.\\n\"\n f\"\\t Using {matches[0]} as language parameter for now.\\n\"\n )\n language = matches[0]\n else:\n language = matches[0]\n logger.info(\n f\"Automatically detected language from language model name: {language}\"\n )\n\n return language\n\n def formatted_preds(self, logits, samples, ignore_first_token=True,\n padding_mask=None, input_ids=None, **kwargs):\n \"\"\"\n Extracting vectors from language model (e.g. for extracting sentence embeddings).\n Different pooling strategies and layers are available and will be determined from the object attributes\n `extraction_layer` and `extraction_strategy`. Both should be set via the Inferencer:\n Example: Inferencer(extraction_strategy='cls_token', extraction_layer=-1)\n\n :param logits: Tuple of (sequence_output, pooled_output) from the language model.\n Sequence_output: one vector per token, pooled_output: one vector for whole sequence\n :param samples: For each item in logits we need additional meta information to format the prediction (e.g. input text).\n This is created by the Processor and passed in here from the Inferencer.\n :param ignore_first_token: Whether to include the first token for pooling operations (e.g. reduce_mean).\n Many models have here a special token like [CLS] that you don't want to include into your average of token embeddings.\n :param padding_mask: Mask for the padding tokens. Those will also not be included in the pooling operations to prevent a bias by the number of padding tokens.\n :param input_ids: ids of the tokens in the vocab\n :param kwargs: kwargs\n :return: list of dicts containing preds, e.g. [{\"context\": \"some text\", \"vec\": [-0.01, 0.5 ...]}]\n \"\"\"\n\n if not hasattr(self, \"extraction_layer\") or not hasattr(self, \"extraction_strategy\"):\n raise ValueError(\"`extraction_layer` or `extraction_strategy` not specified for LM. \"\n \"Make sure to set both, e.g. via Inferencer(extraction_strategy='cls_token', extraction_layer=-1)`\")\n\n # unpack the tuple from LM forward pass\n sequence_output = logits[0][0]\n pooled_output = logits[0][1]\n\n # aggregate vectors\n if self.extraction_strategy == \"pooled\":\n if self.extraction_layer != -1:\n raise ValueError(f\"Pooled output only works for the last layer, but got extraction_layer = {self.extraction_layer}. Please set `extraction_layer=-1`.)\")\n vecs = pooled_output.cpu().numpy()\n elif self.extraction_strategy == \"per_token\":\n vecs = sequence_output.cpu().numpy()\n elif self.extraction_strategy == \"reduce_mean\":\n vecs = self._pool_tokens(sequence_output, padding_mask, self.extraction_strategy, ignore_first_token=ignore_first_token)\n elif self.extraction_strategy == \"reduce_max\":\n vecs = self._pool_tokens(sequence_output, padding_mask, self.extraction_strategy, ignore_first_token=ignore_first_token)\n elif self.extraction_strategy == \"cls_token\":\n vecs = sequence_output[:, 0, :].cpu().numpy()\n elif self.extraction_strategy == \"s3e\":\n vecs = self._pool_tokens(sequence_output, padding_mask, self.extraction_strategy,\n ignore_first_token=ignore_first_token,\n input_ids=input_ids, s3e_stats=self.s3e_stats)\n else:\n raise NotImplementedError\n\n preds = []\n for vec, sample in zip(vecs, samples):\n pred = {}\n pred[\"context\"] = sample.tokenized[\"tokens\"]\n pred[\"vec\"] = vec\n preds.append(pred)\n return preds\n\n def _pool_tokens(self, sequence_output, padding_mask, strategy, ignore_first_token, input_ids=None, s3e_stats=None):\n\n token_vecs = sequence_output.cpu().numpy()\n # we only take the aggregated value of non-padding tokens\n padding_mask = padding_mask.cpu().numpy()\n ignore_mask_2d = padding_mask == 0\n # sometimes we want to exclude the CLS token as well from our aggregation operation\n if ignore_first_token:\n ignore_mask_2d[:, 0] = True\n ignore_mask_3d = np.zeros(token_vecs.shape, dtype=bool)\n ignore_mask_3d[:, :, :] = ignore_mask_2d[:, :, np.newaxis]\n if strategy == \"reduce_max\":\n pooled_vecs = np.ma.array(data=token_vecs, mask=ignore_mask_3d).max(axis=1).data\n if strategy == \"reduce_mean\":\n pooled_vecs = np.ma.array(data=token_vecs, mask=ignore_mask_3d).mean(axis=1).data\n if strategy == \"s3e\":\n input_ids = input_ids.cpu().numpy()\n pooled_vecs = s3e_pooling(token_embs=token_vecs,\n token_ids=input_ids,\n token_weights=s3e_stats[\"token_weights\"],\n centroids=s3e_stats[\"centroids\"],\n token_to_cluster=s3e_stats[\"token_to_cluster\"],\n svd_components=s3e_stats.get(\"svd_components\", None),\n mask=padding_mask == 0)\n return pooled_vecs\n\n\nclass Bert(LanguageModel):\n \"\"\"\n A BERT model that wraps HuggingFace's implementation\n (https://github.com/huggingface/transformers) to fit the LanguageModel class.\n Paper: https://arxiv.org/abs/1810.04805\n\n \"\"\"\n\n def __init__(self):\n super(Bert, self).__init__()\n self.model = None\n self.name = \"bert\"\n\n @classmethod\n def from_scratch(cls, vocab_size, name=\"bert\", language=\"en\"):\n bert = cls()\n bert.name = name\n bert.language = language\n config = BertConfig(vocab_size=vocab_size)\n bert.model = BertModel(config)\n return bert\n\n @classmethod\n def load(cls, pretrained_model_name_or_path, language=None, **kwargs):\n \"\"\"\n Load a pretrained model by supplying\n\n * the name of a remote model on s3 (\"bert-base-cased\" ...)\n * OR a local path of a model trained via transformers (\"some_dir/huggingface_model\")\n * OR a local path of a model trained via FARM (\"some_dir/farm_model\")\n\n :param pretrained_model_name_or_path: The path of the saved pretrained model or its name.\n :type pretrained_model_name_or_path: str\n\n \"\"\"\n\n bert = cls()\n if \"farm_lm_name\" in kwargs:\n bert.name = kwargs[\"farm_lm_name\"]\n else:\n bert.name = pretrained_model_name_or_path\n # We need to differentiate between loading model using FARM format and Pytorch-Transformers format\n farm_lm_config = Path(pretrained_model_name_or_path) / \"language_model_config.json\"\n if os.path.exists(farm_lm_config):\n # FARM style\n bert_config = BertConfig.from_pretrained(farm_lm_config)\n farm_lm_model = Path(pretrained_model_name_or_path) / \"language_model.bin\"\n bert.model = BertModel.from_pretrained(farm_lm_model, config=bert_config, **kwargs)\n bert.language = bert.model.config.language\n else:\n # Pytorch-transformer Style\n bert.model = BertModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)\n bert.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)\n return bert\n\n def forward(\n self,\n input_ids,\n segment_ids,\n padding_mask,\n **kwargs,\n ):\n \"\"\"\n Perform the forward pass of the BERT model.\n\n :param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]\n :type input_ids: torch.Tensor\n :param segment_ids: The id of the segment. For example, in next sentence prediction, the tokens in the\n first sentence are marked with 0 and those in the second are marked with 1.\n It is a tensor of shape [batch_size, max_seq_len]\n :type segment_ids: torch.Tensor\n :param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens\n of shape [batch_size, max_seq_len]\n :return: Embeddings for each token in the input sequence.\n\n \"\"\"\n output_tuple = self.model(\n input_ids,\n token_type_ids=segment_ids,\n attention_mask=padding_mask,\n )\n if self.model.encoder.output_hidden_states == True:\n sequence_output, pooled_output, all_hidden_states = output_tuple[0], output_tuple[1], output_tuple[2]\n return sequence_output, pooled_output, all_hidden_states\n else:\n sequence_output, pooled_output = output_tuple[0], output_tuple[1]\n return sequence_output, pooled_output\n\n def enable_hidden_states_output(self):\n self.model.encoder.output_hidden_states = True\n\n def disable_hidden_states_output(self):\n self.model.encoder.output_hidden_states = False\n\n\nclass Albert(LanguageModel):\n \"\"\"\n An ALBERT model that wraps the HuggingFace's implementation\n (https://github.com/huggingface/transformers) to fit the LanguageModel class.\n\n \"\"\"\n\n def __init__(self):\n super(Albert, self).__init__()\n self.model = None\n self.name = \"albert\"\n\n @classmethod\n def load(cls, pretrained_model_name_or_path, language=None, **kwargs):\n \"\"\"\n Load a language model either by supplying\n\n * the name of a remote model on s3 (\"albert-base\" ...)\n * or a local path of a model trained via transformers (\"some_dir/huggingface_model\")\n * or a local path of a model trained via FARM (\"some_dir/farm_model\")\n\n :param pretrained_model_name_or_path: name or path of a model\n :param language: (Optional) Name of language the model was trained for (e.g. \"german\").\n If not supplied, FARM will try to infer it from the model name.\n :return: Language Model\n\n \"\"\"\n albert = cls()\n if \"farm_lm_name\" in kwargs:\n albert.name = kwargs[\"farm_lm_name\"]\n else:\n albert.name = pretrained_model_name_or_path\n # We need to differentiate between loading model using FARM format and Pytorch-Transformers format\n farm_lm_config = Path(pretrained_model_name_or_path) / \"language_model_config.json\"\n if os.path.exists(farm_lm_config):\n # FARM style\n config = AlbertConfig.from_pretrained(farm_lm_config)\n farm_lm_model = Path(pretrained_model_name_or_path) / \"language_model.bin\"\n albert.model = AlbertModel.from_pretrained(farm_lm_model, config=config, **kwargs)\n albert.language = albert.model.config.language\n else:\n # Huggingface transformer Style\n albert.model = AlbertModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)\n albert.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)\n return albert\n\n def forward(\n self,\n input_ids,\n segment_ids,\n padding_mask,\n **kwargs,\n ):\n \"\"\"\n Perform the forward pass of the Albert model.\n\n :param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]\n :type input_ids: torch.Tensor\n :param segment_ids: The id of the segment. For example, in next sentence prediction, the tokens in the\n first sentence are marked with 0 and those in the second are marked with 1.\n It is a tensor of shape [batch_size, max_seq_len]\n :type segment_ids: torch.Tensor\n :param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens\n of shape [batch_size, max_seq_len]\n :return: Embeddings for each token in the input sequence.\n\n \"\"\"\n output_tuple = self.model(\n input_ids,\n token_type_ids=segment_ids,\n attention_mask=padding_mask,\n )\n if self.model.encoder.output_hidden_states == True:\n sequence_output, pooled_output, all_hidden_states = output_tuple[0], output_tuple[1], output_tuple[2]\n return sequence_output, pooled_output, all_hidden_states\n else:\n sequence_output, pooled_output = output_tuple[0], output_tuple[1]\n return sequence_output, pooled_output\n\n def enable_hidden_states_output(self):\n self.model.encoder.output_hidden_states = True\n\n def disable_hidden_states_output(self):\n self.model.encoder.output_hidden_states = False\n\n\nclass Roberta(LanguageModel):\n \"\"\"\n A roberta model that wraps the HuggingFace's implementation\n (https://github.com/huggingface/transformers) to fit the LanguageModel class.\n Paper: https://arxiv.org/abs/1907.11692\n\n \"\"\"\n\n def __init__(self):\n super(Roberta, self).__init__()\n self.model = None\n self.name = \"roberta\"\n\n @classmethod\n def load(cls, pretrained_model_name_or_path, language=None, **kwargs):\n \"\"\"\n Load a language model either by supplying\n\n * the name of a remote model on s3 (\"roberta-base\" ...)\n * or a local path of a model trained via transformers (\"some_dir/huggingface_model\")\n * or a local path of a model trained via FARM (\"some_dir/farm_model\")\n\n :param pretrained_model_name_or_path: name or path of a model\n :param language: (Optional) Name of language the model was trained for (e.g. \"german\").\n If not supplied, FARM will try to infer it from the model name.\n :return: Language Model\n\n \"\"\"\n roberta = cls()\n if \"farm_lm_name\" in kwargs:\n roberta.name = kwargs[\"farm_lm_name\"]\n else:\n roberta.name = pretrained_model_name_or_path\n # We need to differentiate between loading model using FARM format and Pytorch-Transformers format\n farm_lm_config = Path(pretrained_model_name_or_path) / \"language_model_config.json\"\n if os.path.exists(farm_lm_config):\n # FARM style\n config = RobertaConfig.from_pretrained(farm_lm_config)\n farm_lm_model = Path(pretrained_model_name_or_path) / \"language_model.bin\"\n roberta.model = RobertaModel.from_pretrained(farm_lm_model, config=config, **kwargs)\n roberta.language = roberta.model.config.language\n else:\n # Huggingface transformer Style\n roberta.model = RobertaModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)\n roberta.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)\n return roberta\n\n def forward(\n self,\n input_ids,\n segment_ids,\n padding_mask,\n **kwargs,\n ):\n \"\"\"\n Perform the forward pass of the Roberta model.\n\n :param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]\n :type input_ids: torch.Tensor\n :param segment_ids: The id of the segment. For example, in next sentence prediction, the tokens in the\n first sentence are marked with 0 and those in the second are marked with 1.\n It is a tensor of shape [batch_size, max_seq_len]\n :type segment_ids: torch.Tensor\n :param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens\n of shape [batch_size, max_seq_len]\n :return: Embeddings for each token in the input sequence.\n\n \"\"\"\n output_tuple = self.model(\n input_ids,\n token_type_ids=segment_ids,\n attention_mask=padding_mask,\n )\n if self.model.encoder.output_hidden_states == True:\n sequence_output, pooled_output, all_hidden_states = output_tuple[0], output_tuple[1], output_tuple[2]\n return sequence_output, pooled_output, all_hidden_states\n else:\n sequence_output, pooled_output = output_tuple[0], output_tuple[1]\n return sequence_output, pooled_output\n\n def enable_hidden_states_output(self):\n self.model.encoder.output_hidden_states = True\n\n def disable_hidden_states_output(self):\n self.model.encoder.output_hidden_states = False\n\n\nclass XLMRoberta(LanguageModel):\n \"\"\"\n A roberta model that wraps the HuggingFace's implementation\n (https://github.com/huggingface/transformers) to fit the LanguageModel class.\n Paper: https://arxiv.org/abs/1907.11692\n\n \"\"\"\n\n def __init__(self):\n super(XLMRoberta, self).__init__()\n self.model = None\n self.name = \"xlm_roberta\"\n\n @classmethod\n def load(cls, pretrained_model_name_or_path, language=None, **kwargs):\n \"\"\"\n Load a language model either by supplying\n\n * the name of a remote model on s3 (\"xlm-roberta-base\" ...)\n * or a local path of a model trained via transformers (\"some_dir/huggingface_model\")\n * or a local path of a model trained via FARM (\"some_dir/farm_model\")\n\n :param pretrained_model_name_or_path: name or path of a model\n :param language: (Optional) Name of language the model was trained for (e.g. \"german\").\n If not supplied, FARM will try to infer it from the model name.\n :return: Language Model\n\n \"\"\"\n xlm_roberta = cls()\n if \"farm_lm_name\" in kwargs:\n xlm_roberta.name = kwargs[\"farm_lm_name\"]\n else:\n xlm_roberta.name = pretrained_model_name_or_path\n # We need to differentiate between loading model using FARM format and Pytorch-Transformers format\n farm_lm_config = Path(pretrained_model_name_or_path) / \"language_model_config.json\"\n if os.path.exists(farm_lm_config):\n # FARM style\n config = XLMRobertaConfig.from_pretrained(farm_lm_config)\n farm_lm_model = Path(pretrained_model_name_or_path) / \"language_model.bin\"\n xlm_roberta.model = XLMRobertaModel.from_pretrained(farm_lm_model, config=config, **kwargs)\n xlm_roberta.language = xlm_roberta.model.config.language\n else:\n # Huggingface transformer Style\n xlm_roberta.model = XLMRobertaModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)\n xlm_roberta.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)\n return xlm_roberta\n\n def forward(\n self,\n input_ids,\n segment_ids,\n padding_mask,\n **kwargs,\n ):\n \"\"\"\n Perform the forward pass of the XLMRoberta model.\n\n :param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]\n :type input_ids: torch.Tensor\n :param segment_ids: The id of the segment. For example, in next sentence prediction, the tokens in the\n first sentence are marked with 0 and those in the second are marked with 1.\n It is a tensor of shape [batch_size, max_seq_len]\n :type segment_ids: torch.Tensor\n :param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens\n of shape [batch_size, max_seq_len]\n :return: Embeddings for each token in the input sequence.\n\n \"\"\"\n output_tuple = self.model(\n input_ids,\n token_type_ids=segment_ids,\n attention_mask=padding_mask,\n )\n if self.model.encoder.output_hidden_states == True:\n sequence_output, pooled_output, all_hidden_states = output_tuple[0], output_tuple[1], output_tuple[2]\n return sequence_output, pooled_output, all_hidden_states\n else:\n sequence_output, pooled_output = output_tuple[0], output_tuple[1]\n return sequence_output, pooled_output\n\n def enable_hidden_states_output(self):\n self.model.encoder.output_hidden_states = True\n\n def disable_hidden_states_output(self):\n self.model.encoder.output_hidden_states = False\n\n\nclass DistilBert(LanguageModel):\n \"\"\"\n A DistilBERT model that wraps HuggingFace's implementation\n (https://github.com/huggingface/transformers) to fit the LanguageModel class.\n\n NOTE:\n - DistilBert doesn’t have token_type_ids, you don’t need to indicate which\n token belongs to which segment. Just separate your segments with the separation\n token tokenizer.sep_token (or [SEP])\n - Unlike the other BERT variants, DistilBert does not output the\n pooled_output. An additional pooler is initialized.\n\n \"\"\"\n\n def __init__(self):\n super(DistilBert, self).__init__()\n self.model = None\n self.name = \"distilbert\"\n self.pooler = None\n\n @classmethod\n def load(cls, pretrained_model_name_or_path, language=None, **kwargs):\n \"\"\"\n Load a pretrained model by supplying\n\n * the name of a remote model on s3 (\"distilbert-base-german-cased\" ...)\n * OR a local path of a model trained via transformers (\"some_dir/huggingface_model\")\n * OR a local path of a model trained via FARM (\"some_dir/farm_model\")\n\n :param pretrained_model_name_or_path: The path of the saved pretrained model or its name.\n :type pretrained_model_name_or_path: str\n\n \"\"\"\n\n distilbert = cls()\n if \"farm_lm_name\" in kwargs:\n distilbert.name = kwargs[\"farm_lm_name\"]\n else:\n distilbert.name = pretrained_model_name_or_path\n # We need to differentiate between loading model using FARM format and Pytorch-Transformers format\n farm_lm_config = Path(pretrained_model_name_or_path) / \"language_model_config.json\"\n if os.path.exists(farm_lm_config):\n # FARM style\n config = AlbertConfig.from_pretrained(farm_lm_config)\n farm_lm_model = Path(pretrained_model_name_or_path) / \"language_model.bin\"\n distilbert.model = DistilBertModel.from_pretrained(farm_lm_model, config=config, **kwargs)\n distilbert.language = distilbert.model.config.language\n else:\n # Pytorch-transformer Style\n distilbert.model = DistilBertModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)\n distilbert.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)\n config = distilbert.model.config\n\n # DistilBERT does not provide a pooled_output by default. Therefore, we need to initialize an extra pooler.\n # The pooler takes the first hidden representation & feeds it to a dense layer of (hidden_dim x hidden_dim).\n # We don't want a dropout in the end of the pooler, since we do that already in the adaptive model before we\n # feed everything to the prediction head\n config.summary_last_dropout = 0\n config.summary_type = 'first'\n config.summary_activation = 'tanh'\n distilbert.pooler = SequenceSummary(config)\n distilbert.pooler.apply(distilbert.model._init_weights)\n return distilbert\n\n def forward(\n self,\n input_ids,\n padding_mask,\n **kwargs,\n ):\n \"\"\"\n Perform the forward pass of the DistilBERT model.\n\n :param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]\n :type input_ids: torch.Tensor\n :param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens\n of shape [batch_size, max_seq_len]\n :return: Embeddings for each token in the input sequence.\n\n \"\"\"\n output_tuple = self.model(\n input_ids,\n attention_mask=padding_mask,\n )\n # We need to manually aggregate that to get a pooled output (one vec per seq)\n pooled_output = self.pooler(output_tuple[0])\n if self.model.config.output_hidden_states == True:\n sequence_output, all_hidden_states = output_tuple[0], output_tuple[1]\n return sequence_output, pooled_output\n else:\n sequence_output = output_tuple[0]\n return sequence_output, pooled_output\n\n def enable_hidden_states_output(self):\n self.model.config.output_hidden_states = True\n\n def disable_hidden_states_output(self):\n self.model.config.output_hidden_states = False\n\n\nclass XLNet(LanguageModel):\n \"\"\"\n A XLNet model that wraps the HuggingFace's implementation\n (https://github.com/huggingface/transformers) to fit the LanguageModel class.\n Paper: https://arxiv.org/abs/1906.08237\n \"\"\"\n\n def __init__(self):\n super(XLNet, self).__init__()\n self.model = None\n self.name = \"xlnet\"\n self.pooler = None\n\n @classmethod\n def load(cls, pretrained_model_name_or_path, language=None, **kwargs):\n \"\"\"\n Load a language model either by supplying\n\n * the name of a remote model on s3 (\"xlnet-base-cased\" ...)\n * or a local path of a model trained via transformers (\"some_dir/huggingface_model\")\n * or a local path of a model trained via FARM (\"some_dir/farm_model\")\n\n :param pretrained_model_name_or_path: name or path of a model\n :param language: (Optional) Name of language the model was trained for (e.g. \"german\").\n If not supplied, FARM will try to infer it from the model name.\n :return: Language Model\n\n \"\"\"\n xlnet = cls()\n if \"farm_lm_name\" in kwargs:\n xlnet.name = kwargs[\"farm_lm_name\"]\n else:\n xlnet.name = pretrained_model_name_or_path\n # We need to differentiate between loading model using FARM format and Pytorch-Transformers format\n farm_lm_config = Path(pretrained_model_name_or_path) / \"language_model_config.json\"\n if os.path.exists(farm_lm_config):\n # FARM style\n config = XLNetConfig.from_pretrained(farm_lm_config)\n farm_lm_model = Path(pretrained_model_name_or_path) / \"language_model.bin\"\n xlnet.model = XLNetModel.from_pretrained(farm_lm_model, config=config, **kwargs)\n xlnet.language = xlnet.model.config.language\n else:\n # Pytorch-transformer Style\n xlnet.model = XLNetModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)\n xlnet.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)\n config = xlnet.model.config\n # XLNet does not provide a pooled_output by default. Therefore, we need to initialize an extra pooler.\n # The pooler takes the last hidden representation & feeds it to a dense layer of (hidden_dim x hidden_dim).\n # We don't want a dropout in the end of the pooler, since we do that already in the adaptive model before we\n # feed everything to the prediction head\n config.summary_last_dropout = 0\n xlnet.pooler = SequenceSummary(config)\n xlnet.pooler.apply(xlnet.model._init_weights)\n return xlnet\n\n def forward(\n self,\n input_ids,\n segment_ids,\n padding_mask,\n **kwargs,\n ):\n \"\"\"\n Perform the forward pass of the XLNet model.\n\n :param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]\n :type input_ids: torch.Tensor\n :param segment_ids: The id of the segment. For example, in next sentence prediction, the tokens in the\n first sentence are marked with 0 and those in the second are marked with 1.\n It is a tensor of shape [batch_size, max_seq_len]\n :type segment_ids: torch.Tensor\n :param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens\n of shape [batch_size, max_seq_len]\n :return: Embeddings for each token in the input sequence.\n \"\"\"\n\n # Note: XLNet has a couple of special input tensors for pretraining / text generation (perm_mask, target_mapping ...)\n # We will need to implement them, if we wanna support LM adaptation\n\n output_tuple = self.model(\n input_ids,\n token_type_ids=segment_ids,\n attention_mask=padding_mask,\n )\n # XLNet also only returns the sequence_output (one vec per token)\n # We need to manually aggregate that to get a pooled output (one vec per seq)\n # TODO verify that this is really doing correct pooling\n pooled_output = self.pooler(output_tuple[0])\n\n if self.model.output_hidden_states == True:\n sequence_output, all_hidden_states = output_tuple[0], output_tuple[1]\n return sequence_output, pooled_output, all_hidden_states\n else:\n sequence_output = output_tuple[0]\n return sequence_output, pooled_output\n\n def enable_hidden_states_output(self):\n self.model.output_hidden_states = True\n\n def disable_hidden_states_output(self):\n self.model.output_hidden_states = False\n\nclass EmbeddingConfig():\n \"\"\"\n Config for Word Embeddings Models.\n Necessary to work with Bert and other LM style functionality\n \"\"\"\n def __init__(self,\n name=None,\n embeddings_filename=None,\n vocab_filename=None,\n vocab_size=None,\n hidden_size=None,\n language=None,\n **kwargs):\n \"\"\"\n :param name: Name of config\n :param embeddings_filename:\n :param vocab_filename:\n :param vocab_size:\n :param hidden_size:\n :param language:\n :param kwargs:\n \"\"\"\n self.name = name\n self.embeddings_filename = embeddings_filename\n self.vocab_filename = vocab_filename\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.language = language\n if len(kwargs) > 0:\n logger.info(f\"Passed unused params {str(kwargs)} to the EmbeddingConfig. Might not be a problem.\")\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary.\n\n Returns:\n :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n if hasattr(self.__class__, \"model_type\"):\n output[\"model_type\"] = self.__class__.model_type\n return output\n\n def to_json_string(self):\n \"\"\"\n Serializes this instance to a JSON string.\n\n Returns:\n :obj:`string`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\n\nclass EmbeddingModel():\n \"\"\"\n Embedding Model that combines\n - Embeddings\n - Config Object\n - Vocab\n Necessary to work with Bert and other LM style functionality\n \"\"\"\n\n def __init__(self,\n embedding_file,\n config_dict,\n vocab_file):\n \"\"\"\n\n :param embedding_file: filename of embeddings. Usually in txt format, with the word and associated vector on each line\n :type embedding_file: str\n :param config_dict: dictionary containing config elements\n :type config_dict: dict\n :param vocab_file: filename of vocab, each line contains a word\n :type vocab_file: str\n \"\"\"\n self.config = EmbeddingConfig(**config_dict)\n self.vocab = load_vocab(vocab_file)\n temp = wordembedding_utils.load_embedding_vectors(embedding_file=embedding_file, vocab=self.vocab)\n self.embeddings = torch.from_numpy(temp).float()\n assert \"[UNK]\" in self.vocab, \"No [UNK] symbol in Wordembeddingmodel! Aborting\"\n self.unk_idx = self.vocab[\"[UNK]\"]\n\n def save(self,save_dir):\n # Save Weights\n save_name = Path(save_dir) / self.config.embeddings_filename\n embeddings = self.embeddings.cpu().numpy()\n with open(save_name, \"w\") as f:\n for w, vec in tqdm(zip(self.vocab, embeddings), desc=\"Saving embeddings\", total=embeddings.shape[0]):\n f.write(w + \" \" + \" \".join([\"%.6f\" % v for v in vec]) + \"\\n\")\n f.close()\n\n # Save vocab\n save_name = Path(save_dir) / self.config.vocab_filename\n with open(save_name, \"w\") as f:\n for w in self.vocab:\n f.write(w + \"\\n\")\n f.close()\n\n\n def resize_token_embeddings(self, new_num_tokens=None):\n # function is called as a vocab length validation inside FARM\n # fast way of returning an object with num_embeddings attribute (needed for some checks)\n # TODO add functionality to add words/tokens to a wordembeddingmodel after initialization\n temp = {}\n temp[\"num_embeddings\"] = len(self.vocab)\n temp = DotMap(temp)\n return temp\n\n\n\nclass WordEmbedding_LM(LanguageModel):\n \"\"\"\n A Language Model based only on word embeddings\n - Inside FARM, WordEmbedding Language Models must have a fixed vocabulary\n - Each (known) word in some text input is projected to its vector representation\n - Pooling operations can be applied for representing whole text sequences\n\n \"\"\"\n\n def __init__(self):\n super(WordEmbedding_LM, self).__init__()\n self.model = None\n self.name = \"WordEmbedding_LM\"\n self.pooler = None\n\n\n @classmethod\n def load(cls, pretrained_model_name_or_path, language=None, **kwargs):\n \"\"\"\n Load a language model either by supplying\n\n * a local path of a model trained via FARM (\"some_dir/farm_model\")\n * the name of a remote model on s3\n\n :param pretrained_model_name_or_path: name or path of a model\n :param language: (Optional) Name of language the model was trained for (e.g. \"german\").\n If not supplied, FARM will try to infer it from the model name.\n :return: Language Model\n\n \"\"\"\n wordembedding_LM = cls()\n if \"farm_lm_name\" in kwargs:\n wordembedding_LM.name = kwargs[\"farm_lm_name\"]\n else:\n wordembedding_LM.name = pretrained_model_name_or_path\n # We need to differentiate between loading model from local or remote\n farm_lm_config = Path(pretrained_model_name_or_path) / \"language_model_config.json\"\n if os.path.exists(farm_lm_config):\n # local dir\n config = json.load(open(farm_lm_config,\"r\"))\n farm_lm_model = Path(pretrained_model_name_or_path) / config[\"embeddings_filename\"]\n vocab_filename = Path(pretrained_model_name_or_path) / config[\"vocab_filename\"]\n wordembedding_LM.model = EmbeddingModel(embedding_file=str(farm_lm_model), config_dict=config, vocab_file=str(vocab_filename))\n wordembedding_LM.language = config.get(\"language\", None)\n else:\n # from remote or cache\n config_dict, resolved_vocab_file, resolved_model_file = wordembedding_utils.load_model(pretrained_model_name_or_path, **kwargs)\n model = EmbeddingModel(embedding_file=resolved_model_file,\n config_dict=config_dict,\n vocab_file=resolved_vocab_file)\n wordembedding_LM.model = model\n wordembedding_LM.language = model.config.language\n\n\n # taking the mean for getting the pooled representation\n # TODO: extend this to other pooling operations or remove\n wordembedding_LM.pooler = lambda x: torch.mean(x, dim=0)\n return wordembedding_LM\n\n def save(self, save_dir):\n \"\"\"\n Save the model embeddings and its config file so that it can be loaded again.\n # TODO make embeddings trainable and save trained embeddings\n # TODO save model weights as pytorch model bin for more efficient loading and saving\n :param save_dir: The directory in which the model should be saved.\n :type save_dir: str\n \"\"\"\n #save model\n self.model.save(save_dir=save_dir)\n #save config\n self.save_config(save_dir=save_dir)\n\n\n def forward(self, input_ids, **kwargs,):\n \"\"\"\n Perform the forward pass of the wordembedding model.\n This is just the mapping of words to their corresponding embeddings\n \"\"\"\n sequence_output = []\n pooled_output = []\n # TODO do not use padding items in pooled output\n for sample in input_ids:\n sample_embeddings = []\n for index in sample:\n #if index != self.model.unk_idx:\n sample_embeddings.append(self.model.embeddings[index])\n sample_embeddings = torch.stack(sample_embeddings)\n sequence_output.append(sample_embeddings)\n pooled_output.append(self.pooler(sample_embeddings))\n\n sequence_output = torch.stack(sequence_output)\n pooled_output = torch.stack(pooled_output)\n m = nn.BatchNorm1d(pooled_output.shape[1])\n # use batchnorm for more stable learning\n # but disable it, if we have batch size of one (cannot compute batchnorm stats with only one sample)\n if pooled_output.shape[0] > 1:\n pooled_output = m(pooled_output)\n return sequence_output, pooled_output\n\n def trim_vocab(self, token_counts, processor, min_threshold):\n \"\"\" Remove embeddings for rare tokens in your corpus (< `min_threshold` occurrences) to reduce model size\"\"\"\n logger.info(f\"Removing tokens with less than {min_threshold} occurrences from model vocab\")\n new_vocab = OrderedDict()\n valid_tok_indices = []\n cnt = 0\n old_num_emb = self.model.embeddings.shape[0]\n for token, tok_idx in self.model.vocab.items():\n if token_counts.get(token, 0) >= min_threshold or token in (\"[CLS]\",\"[SEP]\",\"[UNK]\",\"[PAD]\",\"[MASK]\"):\n new_vocab[token] = cnt\n valid_tok_indices.append(tok_idx)\n cnt += 1\n\n self.model.vocab = new_vocab\n self.model.embeddings = self.model.embeddings[valid_tok_indices, :]\n\n # update tokenizer vocab in place\n processor.tokenizer.vocab = self.model.vocab\n processor.tokenizer.ids_to_tokens = OrderedDict()\n for k, v in processor.tokenizer.vocab.items():\n processor.tokenizer.ids_to_tokens[v] = k\n\n logger.info(f\"Reduced vocab from {old_num_emb} to {self.model.embeddings.shape[0]}\")\n\n def normalize_embeddings(self, zero_mean=True, pca_removal=False, pca_n_components=300, pca_n_top_components=10,\n use_mean_vec_for_special_tokens=True, n_special_tokens=5):\n \"\"\" Normalize word embeddings as in https://arxiv.org/pdf/1808.06305.pdf\n (e.g. used for S3E Pooling of sentence embeddings)\n \n :param zero_mean: Whether to center embeddings via subtracting mean\n :type zero_mean: bool\n :param pca_removal: Whether to remove PCA components\n :type pca_removal: bool\n :param pca_n_components: Number of PCA components to use for fitting\n :type pca_n_components: int\n :param pca_n_top_components: Number of PCA components to remove\n :type pca_n_top_components: int\n :param use_mean_vec_for_special_tokens: Whether to replace embedding of special tokens with the mean embedding\n :type use_mean_vec_for_special_tokens: bool\n :param n_special_tokens: Number of special tokens like CLS, UNK etc. (used if `use_mean_vec_for_special_tokens`). \n Note: We expect the special tokens to be the first `n_special_tokens` entries of the vocab.\n :type n_special_tokens: int\n :return: None\n \"\"\"\n\n if zero_mean:\n logger.info('Removing mean from embeddings')\n # self.model.embeddings[:n_special_tokens, :] = torch.zeros((n_special_tokens, 300))\n mean_vec = torch.mean(self.model.embeddings, 0)\n self.model.embeddings = self.model.embeddings - mean_vec\n\n if use_mean_vec_for_special_tokens:\n self.model.embeddings[:n_special_tokens, :] = mean_vec\n\n if pca_removal:\n from sklearn.decomposition import PCA\n logger.info('Removing projections on top PCA components from embeddings (see https://arxiv.org/pdf/1808.06305.pdf)')\n pca = PCA(n_components=pca_n_components)\n pca.fit(self.model.embeddings.cpu().numpy())\n\n U1 = pca.components_\n explained_variance = pca.explained_variance_\n\n # Removing projections on top components\n PVN_dims = pca_n_top_components\n for emb_idx in tqdm(range(self.model.embeddings.shape[0]), desc=\"Removing projections\"):\n for pca_idx, u in enumerate(U1[0:PVN_dims]):\n ratio = (explained_variance[pca_idx] - explained_variance[PVN_dims]) / explained_variance[pca_idx]\n self.model.embeddings[emb_idx] = self.model.embeddings[emb_idx] - ratio * np.dot(u.transpose(), self.model.embeddings[emb_idx]) * u\n\n\nclass Electra(LanguageModel):\n \"\"\"\n ELECTRA is a new pre-training approach which trains two transformer models:\n the generator and the discriminator. The generator replaces tokens in a sequence,\n and is therefore trained as a masked language model. The discriminator, which is\n the model we're interested in, tries to identify which tokens were replaced by\n the generator in the sequence.\n\n The ELECTRA model here wraps HuggingFace's implementation\n (https://github.com/huggingface/transformers) to fit the LanguageModel class.\n\n NOTE:\n - Electra does not output the pooled_output. An additional pooler is initialized.\n\n \"\"\"\n\n def __init__(self):\n super(Electra, self).__init__()\n self.model = None\n self.name = \"electra\"\n self.pooler = None\n\n @classmethod\n def load(cls, pretrained_model_name_or_path, language=None, **kwargs):\n \"\"\"\n Load a pretrained model by supplying\n\n * the name of a remote model on s3 (\"google/electra-base-discriminator\" ...)\n * OR a local path of a model trained via transformers (\"some_dir/huggingface_model\")\n * OR a local path of a model trained via FARM (\"some_dir/farm_model\")\n\n :param pretrained_model_name_or_path: The path of the saved pretrained model or its name.\n :type pretrained_model_name_or_path: str\n\n \"\"\"\n\n electra = cls()\n if \"farm_lm_name\" in kwargs:\n electra.name = kwargs[\"farm_lm_name\"]\n else:\n electra.name = pretrained_model_name_or_path\n # We need to differentiate between loading model using FARM format and Transformers format\n farm_lm_config = Path(pretrained_model_name_or_path) / \"language_model_config.json\"\n if os.path.exists(farm_lm_config):\n # FARM style\n config = ElectraConfig.from_pretrained(farm_lm_config)\n farm_lm_model = Path(pretrained_model_name_or_path) / \"language_model.bin\"\n electra.model = ElectraModel.from_pretrained(farm_lm_model, config=config, **kwargs)\n electra.language = electra.model.config.language\n else:\n # Transformers Style\n electra.model = ElectraModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)\n electra.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)\n config = electra.model.config\n\n # ELECTRA does not provide a pooled_output by default. Therefore, we need to initialize an extra pooler.\n # The pooler takes the first hidden representation & feeds it to a dense layer of (hidden_dim x hidden_dim).\n # We don't want a dropout in the end of the pooler, since we do that already in the adaptive model before we\n # feed everything to the prediction head.\n # Note: ELECTRA uses gelu as activation (BERT uses tanh instead)\n config.summary_last_dropout = 0\n config.summary_type = 'first'\n config.summary_activation = 'gelu'\n electra.pooler = SequenceSummary(config)\n electra.pooler.apply(electra.model._init_weights)\n return electra\n\n def forward(\n self,\n input_ids,\n segment_ids,\n padding_mask,\n **kwargs,\n ):\n \"\"\"\n Perform the forward pass of the ELECTRA model.\n\n :param input_ids: The ids of each token in the input sequence. Is a tensor of shape [batch_size, max_seq_len]\n :type input_ids: torch.Tensor\n :param padding_mask: A mask that assigns a 1 to valid input tokens and 0 to padding tokens\n of shape [batch_size, max_seq_len]\n :return: Embeddings for each token in the input sequence.\n\n \"\"\"\n output_tuple = self.model(\n input_ids,\n token_type_ids=segment_ids,\n attention_mask=padding_mask,\n )\n\n # We need to manually aggregate that to get a pooled output (one vec per seq)\n pooled_output = self.pooler(output_tuple[0])\n\n if self.model.config.output_hidden_states == True:\n sequence_output, all_hidden_states = output_tuple[0], output_tuple[1]\n return sequence_output, pooled_output\n else:\n sequence_output = output_tuple[0]\n return sequence_output, pooled_output\n\n def enable_hidden_states_output(self):\n self.model.config.output_hidden_states = True\n\n def disable_hidden_states_output(self):\n self.model.config.output_hidden_states = False\n\n\nclass Camembert(Roberta):\n \"\"\"\n A Camembert model that wraps the HuggingFace's implementation\n (https://github.com/huggingface/transformers) to fit the LanguageModel class.\n \"\"\"\n def __init__(self):\n super(Camembert, self).__init__()\n self.model = None\n self.name = \"camembert\"\n\n @classmethod\n def load(cls, pretrained_model_name_or_path, language=None, **kwargs):\n \"\"\"\n Load a language model either by supplying\n\n * the name of a remote model on s3 (\"camembert-base\" ...)\n * or a local path of a model trained via transformers (\"some_dir/huggingface_model\")\n * or a local path of a model trained via FARM (\"some_dir/farm_model\")\n\n :param pretrained_model_name_or_path: name or path of a model\n :param language: (Optional) Name of language the model was trained for (e.g. \"german\").\n If not supplied, FARM will try to infer it from the model name.\n :return: Language Model\n\n \"\"\"\n camembert = cls()\n if \"farm_lm_name\" in kwargs:\n camembert.name = kwargs[\"farm_lm_name\"]\n else:\n camembert.name = pretrained_model_name_or_path\n # We need to differentiate between loading model using FARM format and Pytorch-Transformers format\n farm_lm_config = Path(pretrained_model_name_or_path) / \"language_model_config.json\"\n if os.path.exists(farm_lm_config):\n # FARM style\n config = CamembertConfig.from_pretrained(farm_lm_config)\n farm_lm_model = Path(pretrained_model_name_or_path) / \"language_model.bin\"\n camembert.model = CamembertModel.from_pretrained(farm_lm_model, config=config, **kwargs)\n camembert.language = camembert.model.config.language\n else:\n # Huggingface transformer Style\n camembert.model = CamembertModel.from_pretrained(str(pretrained_model_name_or_path), **kwargs)\n camembert.language = cls._get_or_infer_language_from_name(language, pretrained_model_name_or_path)\n return camembert\n" ]
[ [ "torch.stack", "numpy.zeros", "torch.nn.BatchNorm1d", "sklearn.decomposition.PCA", "numpy.ma.array", "torch.from_numpy", "torch.mean" ] ]
matfija/Projektivna-geometrija
[ "1d0df7e6009dffd45ff0b892cb1d3e5a8053f5c6" ]
[ "Izometrije-prostora/izvor/izometrije.py" ]
[ "#!/usr/bin/env python3\r\n\r\n# Ukljucivanje modula za matematiku\r\nimport numpy as np\r\nimport numpy.linalg as LA\r\n\r\n# Ukljucivanje modula za upozorenja\r\nimport warnings\r\n\r\n# NAPOMENA: svi razmatrani uglovi zadati su u radijanima,\r\n# sto je u skladu sa uobicajenom informatickom praksom\r\n\r\n# Matrica rotacije koja odgovara sopstvenim rotacijama\r\n# za ugao phi oko x-ose, theta oko y-ose, psi oko z-ose\r\n# odnosno obrnuto iz tacke gledista polaznog repera\r\ndef Euler2A(phi, theta, psi):\r\n # Greska ako nisu brojevi\r\n if not isinstance(phi, (int, float)) or\\\r\n not isinstance(theta, (int, float)) or\\\r\n not isinstance(psi, (int, float)):\r\n raise ValueError\r\n \r\n # Rotacija za ugao phi oko x-ose\r\n Rx = np.array([[ 1, 0, 0 ],\r\n [ 0, np.cos(phi), -np.sin(phi)],\r\n [ 0, np.sin(phi), np.cos(phi)]])\r\n\r\n # Rotacija za ugao theta oko y-ose\r\n Ry = np.array([[ np.cos(theta), 0, np.sin(theta)],\r\n [ 0, 1, 0 ],\r\n [-np.sin(theta), 0, np.cos(theta)]])\r\n\r\n # Rotacija za ugao psi oko z-ose\r\n Rz = np.array([[np.cos(psi), -np.sin(psi), 0 ],\r\n [np.sin(psi), np.cos(psi), 0 ],\r\n [ 0, 0, 1 ]])\r\n\r\n # Kompozicija u suprotnom redosledu,\r\n # prema odgovarajucoj teoremi\r\n return Rz @ Ry @ Rx\r\n\r\n# Jedinicni vektor i ugao takvi da ulazna matrica\r\n# rotacije odgovara rotaciji oko rezultujuceg vektora\r\n# za rezultujuci ugao, ogranicen izmedju 0 i pi\r\ndef AxisAngle(A):\r\n # Greska ako matrica nije ortogonalna\r\n # ili joj determinantna nije jedan\r\n if not np.allclose(A @ A.T, np.eye(3))\\\r\n or not np.isclose(LA.det(A), 1):\r\n raise ValueError\r\n\r\n # Racunanje sopstvenih vrednosti i vektora\r\n w, v = LA.eig(A)\r\n\r\n # Izvlacenje indeksa sopstvene vrednosti 1\r\n ind = np.argwhere(np.isclose(w, 1))[0,0]\r\n\r\n # Izvlacenje odgovarajuceg sopstvenog vektora; on\r\n # je zapravo trazena osa rotacije; usput privremeno\r\n # iskljucivanje upozorenja o odbacivanju imaginarnog\r\n # dela kompleksnog broja posto je on garantovano nula\r\n # u ovom slucaju; zarad ocuvanja preciznosti neophodna\r\n # je upotreba realnih brojeva sa dvostrukom tacnoscu\r\n warnings.filterwarnings('ignore')\r\n p = np.array(v[:, ind], dtype = np.float64)\r\n warnings.resetwarnings()\r\n\r\n # Proizvoljan jedinicni vektor normalan na prethodni;\r\n # pazi se na slucaj(eve) kada su neke koordinate nule\r\n # p ~ [0, y, z] && (y!=0 || z!=0)\r\n if np.isclose(p[0], 0):\r\n u = np.array([0, -p[2], p[1]])\r\n # p ~ [x, y, z] && x!=0\r\n else:\r\n u = np.array([-p[1], p[0], 0])\r\n u = u/LA.norm(u)\r\n\r\n # Zarotirani vektor i odredjivanje ugla\r\n up = A @ u\r\n phi = np.arccos(u @ up)\r\n\r\n # Eventualna promena znaka ose, kako bi rotacija uvek\r\n # bila u pozitivnom smeru, prema pravilu desne ruke\r\n if LA.det(np.array([u, up, p])) < 0:\r\n p = -p\r\n\r\n # Vracanje odredjene ose i ugla\r\n return p, phi\r\n\r\n# Matrica rotacije oko orijentisane ose\r\n# tj. vektora sa ulaza za ugao sa ulaza\r\ndef Rodrigez(p, phi):\r\n # Greska ako nije broj\r\n if not isinstance(phi, (int, float)):\r\n raise ValueError\r\n \r\n # Greska ako je nula-vektor\r\n n = LA.norm(p)\r\n if np.isclose(n, 0):\r\n raise ValueError\r\n \r\n # Normalizacija ulaznog vektora\r\n p = p/n\r\n\r\n # Matrica ose rotacije\r\n ppt = p.reshape(3, 1) @ p.reshape(1, 3)\r\n \r\n # Matrica vektorskog mnozenja\r\n px = np.array([[ 0, -p[2], p[1]],\r\n [ p[2], 0, -p[0]],\r\n [-p[1], p[0], 0 ]])\r\n\r\n # Vracanje matrice prema formuli Rodrigeza\r\n return ppt + np.cos(phi)*(np.eye(3)-ppt) + np.sin(phi)*px\r\n\r\n# Ojlerovi uglovi koji odgovaraju\r\n# ulaznoj matrici rotacije\r\ndef A2Euler(A):\r\n # Greska ako matrica nije ortogonalna\r\n # ili nije sa determinantnom jedan\r\n if not np.allclose(A @ A.T, np.eye(3))\\\r\n or not np.isclose(LA.det(A), 1):\r\n raise ValueError\r\n\r\n # 'Zakljucani ziroskop', pa ima beskonacno\r\n # mnogo resenja; bira se ono sa phi = 0\r\n if np.isclose(np.abs(A[2,0]), 1):\r\n phi = 0.\r\n theta = -np.sign(A[2,0]) * np.pi/2\r\n psi = np.arctan2(-A[0,1], A[1,1])\r\n # Jedinstveno resenje\r\n else:\r\n phi = np.arctan2(A[2,1], A[2,2])\r\n theta = np.arcsin(-A[2,0])\r\n psi = np.arctan2(A[1,0], A[0,0])\r\n\r\n # Vracanje izracunatih uglova\r\n return phi, theta, psi\r\n\r\n# Jedinicni kvaternion koji predstavlja\r\n# rotaciju oko ulazne ose za ulazni ugao\r\ndef AxisAngle2Q(p, phi):\r\n # Greska ako nije broj\r\n if not isinstance(phi, (int, float)):\r\n raise ValueError\r\n \r\n # Realni deo kvaterniona\r\n w = np.cos(phi/2)\r\n\r\n # Greska ako je nula-vektor\r\n n = LA.norm(p)\r\n if np.isclose(n, 0):\r\n raise ValueError\r\n\r\n # Normalizacija ose\r\n p = p/n\r\n\r\n # Imaginarni deo kvaterniona\r\n x, y, z = np.sin(phi/2) * p\r\n\r\n # Vracanje izracunatog kvaterniona\r\n return np.array([w, x, y, z])\r\n\r\n# Jedinicni vektor i ugao takvi da ulazni\r\n# kvaternion odgovara rotaciji oko rezultujuce ose\r\n# za rezultujuci ugao, ogranicen izmedju 0 i pi\r\ndef Q2AxisAngle(q):\r\n # Greska ako je nula-kvaternion\r\n n = LA.norm(q)\r\n if np.isclose(n, 0):\r\n raise ValueError\r\n \r\n # Normalizacija kvaterniona\r\n q = q/n\r\n\r\n # Eventualna negacija kvaterniona kako bi\r\n # rezultujuci ugao bio u zeljenom rasponu\r\n if q[0] < 0: q = -q\r\n\r\n # Izvlacenje svih koeficijenata\r\n w, x, y, z = q\r\n\r\n # Nulta rotacija u slucaju identiteta\r\n if np.isclose(w, 1):\r\n return np.array([1., 0., 0.]), 0.\r\n\r\n # Ugao rotacije\r\n phi = 2 * np.arccos(w)\r\n\r\n # Osa rotacije\r\n p = np.array([x, y, z])\r\n p = p/LA.norm(p)\r\n\r\n # Vracanje odredjene ose i ugla\r\n return p, phi\r\n\r\n# Linearna interpolacija polozaja\r\n# izmedju c1 i c2 za vreme [0, tu]\r\ndef linear(c1, c2, tu, t):\r\n # Greska ako su losi ulazni podaci\r\n if not isinstance(tu, (int, float)) or \\\r\n not isinstance(t, (int, float)) or \\\r\n not 0 <= t <= tu:\r\n raise ValueError\r\n\r\n # Vracanje interpoliranog polozaja\r\n return (1 - t/tu) * c1 + t/tu * c2\r\n\r\n# Jedinicni kvaternion koji predstavlja linearnu\r\n# interpolaciju (Lerp) izmedju ulaznih q1 i q2\r\n# u trenutku t iz diskretnog intervala [0, tu]\r\ndef Lerp(q1, q2, tu, t):\r\n # Greska ako su losi ulazni podaci\r\n if len(q1) != 4 or len(q2) != 4 or \\\r\n not isinstance(tu, (int, float)) or \\\r\n not isinstance(t, (int, float)) or \\\r\n not 0 <= t <= tu:\r\n raise ValueError\r\n \r\n # Greska ako su nula-kvaternioni\r\n n1 = LA.norm(q1)\r\n n2 = LA.norm(q2)\r\n if np.isclose(n1, 0) or np.isclose(n2, 0):\r\n raise ValueError\r\n \r\n # Normalizacija kvaterniona\r\n q1 = q1/n1\r\n q2 = q2/n2\r\n\r\n # Interpolirani kvaternion\r\n q = linear(q1, q2, tu, t)\r\n\r\n # Normalizacija interpoliranog\r\n n = LA.norm(q)\r\n q = q/n\r\n\r\n # Vracanje interpoliranog kvaterniona\r\n return q\r\n\r\n# Jedinicni kvaternion koji predstavlja sfernu\r\n# lin. interp. (SLerp) izmedju ulaznih q1 i q2\r\n# u trenutku t iz diskretnog intervala [0, tu]\r\ndef SLerp(q1, q2, tu, t):\r\n # Greska ako su losi ulazni podaci\r\n if len(q1) != 4 or len(q2) != 4 or \\\r\n not isinstance(tu, (int, float)) or \\\r\n not isinstance(t, (int, float)) or \\\r\n not 0 <= t <= tu:\r\n raise ValueError\r\n \r\n # Greska ako su nula-kvaternioni\r\n n1 = LA.norm(q1)\r\n n2 = LA.norm(q2)\r\n if np.isclose(n1, 0) or np.isclose(n2, 0):\r\n raise ValueError\r\n \r\n # Normalizacija kvaterniona\r\n q1 = q1/n1\r\n q2 = q2/n2\r\n\r\n # Kosinus ugla izmedju kvaterniona\r\n cos = np.inner(q1, q2)\r\n\r\n # Obrtanje u cilju kretanja po kracem\r\n # luku sfere po kojoj je interpolacija\r\n if cos < 0:\r\n q1 = -q1\r\n cos = -cos\r\n\r\n # Lerp u slucaju prebliskih kvaterniona\r\n if cos > 0.95:\r\n return Lerp(q1, q2, tu, t)\r\n\r\n # Ugao izmedju kvaterniona\r\n phi = np.arccos(cos)\r\n\r\n # Vracanje interpoliranog kvaterniona\r\n # koji je ovde garantovano jedinicni\r\n return np.sin(phi * (1 - t/tu)) / np.sin(phi) * q1 \\\r\n + np.sin(phi * t/tu) / np.sin(phi) * q2\r\n\r\n# Pomocna fja za pretvaranje Ojlerovih\r\n# uglova u kvaternion tog polozaja\r\ndef Euler2Q(phi, theta, psi):\r\n return AxisAngle2Q(*AxisAngle(Euler2A(phi, theta, psi)))\r\n\r\n# Pomocna fja za pretvaranje kvaterniona\r\n# u Ojlerove uglove tog polozaja\r\ndef Q2Euler(q):\r\n return A2Euler(Rodrigez(*Q2AxisAngle(q)))\r\n\r\n# Fja za testiranje\r\ndef test():\r\n # Profesorov test primer\r\n #phi = -np.arctan(1/4)\r\n #theta = -np.arcsin(8/9)\r\n #psi = np.arctan(4)\r\n\r\n # Moj test primer\r\n phi = np.pi/3\r\n theta = np.pi/3\r\n psi = np.pi/3\r\n\r\n # Ojlerovi uglovi\r\n print('Ojlerovi uglovi:')\r\n print('\\u03D5 =', phi)\r\n print('\\u03B8 =', theta)\r\n print('\\u03C8 =', psi)\r\n print()\r\n\r\n # Matrica rotacije\r\n print('Euler2A:')\r\n A = Euler2A(phi, theta, psi)\r\n print('A =')\r\n print(A)\r\n print()\r\n\r\n # Osa i ugao\r\n print('AxisAngle:')\r\n p, phi0 = AxisAngle(A)\r\n print('p =', p)\r\n print('\\u03D5 =', phi0)\r\n print()\r\n\r\n # Vracanje na matricu\r\n print('Rodrigez:')\r\n A = Rodrigez(p, phi0)\r\n print('A =')\r\n print(A)\r\n print()\r\n\r\n # Vracanje na uglove\r\n print('A2Euler:')\r\n phi, theta, psi = A2Euler(A)\r\n print('\\u03D5 =', phi)\r\n print('\\u03B8 =', theta)\r\n print('\\u03C8 =', psi)\r\n print()\r\n\r\n # Kvaternion\r\n print('AxisAngle2Q:')\r\n q = AxisAngle2Q(p, phi0)\r\n w, x, y, z = q\r\n print(f'q = {w:f} {x:+f}i {y:+f}j {z:+f}k')\r\n print()\r\n\r\n # Vracanje na osu i ugao\r\n print('Q2AxisAngle:')\r\n p, phi0 = Q2AxisAngle(q)\r\n print('p =', p)\r\n print('\\u03D5 =', phi0)\r\n\r\n# Poziv test funkcije ukoliko\r\n# je modul direktno izvrsen\r\nif __name__ == '__main__':\r\n test()\r\n" ]
[ [ "numpy.arctan2", "numpy.eye", "numpy.arcsin", "numpy.sign", "numpy.linalg.det", "numpy.isclose", "numpy.arccos", "numpy.cos", "numpy.abs", "numpy.array", "numpy.sin", "numpy.inner", "numpy.linalg.norm", "numpy.linalg.eig" ] ]
ludkinm/pyro
[ "d24c808a9d86d79c43a99990fe9e418ce5976613" ]
[ "pyro/infer/autoguide/initialization.py" ]
[ "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nr\"\"\"\nThe pyro.infer.autoguide.initialization module contains initialization functions for\nautomatic guides.\n\nThe standard interface for initialization is a function that inputs a Pyro\ntrace ``site`` dict and returns an appropriately sized ``value`` to serve\nas an initial constrained value for a guide estimate.\n\"\"\"\nimport torch\nfrom torch.distributions import transform_to\n\nfrom pyro.distributions.torch import Independent\nfrom pyro.distributions.torch_distribution import MaskedDistribution\nfrom pyro.infer.util import is_validation_enabled\nfrom pyro.poutine.messenger import Messenger\nfrom pyro.util import torch_isnan\n\n\ndef _is_multivariate(d):\n while isinstance(d, (Independent, MaskedDistribution)):\n d = d.base_dist\n return any(size > 1 for size in d.event_shape)\n\n\ndef init_to_feasible(site):\n \"\"\"\n Initialize to an arbitrary feasible point, ignoring distribution\n parameters.\n \"\"\"\n value = site[\"fn\"].sample().detach()\n t = transform_to(site[\"fn\"].support)\n return t(torch.zeros_like(t.inv(value)))\n\n\ndef init_to_sample(site):\n \"\"\"\n Initialize to a random sample from the prior.\n \"\"\"\n return site[\"fn\"].sample().detach()\n\n\ndef init_to_median(site, num_samples=15):\n \"\"\"\n Initialize to the prior median; fallback to a feasible point if median is\n undefined.\n \"\"\"\n # The median undefined for multivariate distributions.\n if _is_multivariate(site[\"fn\"]):\n return init_to_feasible(site)\n try:\n # Try to compute empirical median.\n samples = site[\"fn\"].sample(sample_shape=(num_samples,))\n value = samples.median(dim=0)[0]\n if torch_isnan(value):\n raise ValueError\n if hasattr(site[\"fn\"], \"_validate_sample\"):\n site[\"fn\"]._validate_sample(value)\n return value\n except (RuntimeError, ValueError):\n # Fall back to feasible point.\n return init_to_feasible(site)\n\n\ndef init_to_mean(site):\n \"\"\"\n Initialize to the prior mean; fallback to median if mean is undefined.\n \"\"\"\n try:\n # Try .mean() method.\n value = site[\"fn\"].mean.detach()\n if torch_isnan(value):\n raise ValueError\n if hasattr(site[\"fn\"], \"_validate_sample\"):\n site[\"fn\"]._validate_sample(value)\n return value\n except (NotImplementedError, ValueError):\n # Fall back to a median.\n # This is requred for distributions with infinite variance, e.g. Cauchy.\n return init_to_median(site)\n\n\nclass InitMessenger(Messenger):\n \"\"\"\n Initializes a site by replacing ``.sample()`` calls with values\n drawn from an initialization strategy. This is mainly for internal use by\n autoguide classes.\n\n :param callable init_fn: An initialization function.\n \"\"\"\n def __init__(self, init_fn):\n self.init_fn = init_fn\n super(InitMessenger, self).__init__()\n\n def _pyro_sample(self, msg):\n if msg[\"done\"] or msg[\"is_observed\"] or type(msg[\"fn\"]).__name__ == \"_Subsample\":\n return\n with torch.no_grad():\n value = self.init_fn(msg)\n if is_validation_enabled() and msg[\"value\"] is not None:\n if not isinstance(value, type(msg[\"value\"])):\n raise ValueError(\n \"{} provided invalid type for site {}:\\nexpected {}\\nactual {}\"\n .format(self.init_fn, msg[\"name\"], type(msg[\"value\"]), type(value)))\n if value.shape != msg[\"value\"].shape:\n raise ValueError(\n \"{} provided invalid shape for site {}:\\nexpected {}\\nactual {}\"\n .format(self.init_fn, msg[\"name\"], msg[\"value\"].shape, value.shape))\n msg[\"value\"] = value\n msg[\"done\"] = True\n" ]
[ [ "torch.no_grad", "torch.distributions.transform_to" ] ]
sebas095/imageFilter
[ "7059b7abfaceffa8f03f27947e5059e3173954d1" ]
[ "filters/gaussian.py" ]
[ "import scipy\nfrom scipy import ndimage\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nl = scipy.misc.ascent()\nl = l[230:290, 220:320]\n\nnoisy = l + 0.4 * l.std() * np.random.random(l.shape)\ngauss_denoised = ndimage.gaussian_filter(noisy, 2)\n\nplt.subplot(121)\nplt.imshow(noisy, cmap=plt.cm.gray, vmin=40, vmax=220)\nplt.xticks([]), plt.yticks([])\nplt.title('Noisy', fontsize=20)\n\nplt.subplot(122)\nplt.imshow(gauss_denoised, cmap=plt.cm.gray, vmin=40, vmax=220)\nplt.xticks([]), plt.yticks([])\nplt.title('Gaussian filter', fontsize=20)\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.xticks", "matplotlib.pyplot.title", "numpy.random.random", "matplotlib.pyplot.subplot", "scipy.ndimage.gaussian_filter", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "scipy.misc.ascent", "matplotlib.pyplot.yticks" ] ]
vutuanhai237/Braces2TeethUtilities
[ "6dd480edb09d05ac9d6f48a013649f92796549aa" ]
[ "createPix2Pix (facial)/createTeeth2Dataset.py" ]
[ "from imutils import face_utils\nimport numpy as np\nimport argparse\nimport imutils\nimport dlib\nimport cv2\nimport copy\nimport colorsys\nimport math\nimport os\nimport shutil\nimport collections \nfrom convexHull import convexHull, convexRectangle \nfrom processBar import progressbar\nfrom genTeethColor import findTeethColor, readTeethShade, getListPixelMayBeTeeth\n\nfrom genSuperpixel import preProcessing\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n\ndef getFacial(image):\n \n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n rects = detector(gray, 1)\n for (i, rect) in enumerate(rects):\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n (x, y, w, h) = face_utils.rect_to_bb(rect)\n return shape[60:68]\n \n\n\n\n\ndef distance(p1, p2):\n upperCos = p1[0]*p2[0] + p1[1] * p2[1] + p1[2]*p2[2]\n lowerCos = (p1[0]**2+p1[1]**2+p1[2]**2)**(1/2) * (p2[0]**2+p2[1]**2+p2[2]**2)**(1/2) \n acos = math.acos((upperCos/lowerCos))*180/math.pi\n return acos\n\ndef calculateThreshhold(image, color):\n distances = []\n for i in range(0, image.shape[0]):\n for j in range(0, image.shape[1]):\n pixel = image[i][j]\n distances.append(distance(pixel, color))\n distances.sort()\n return distances[int(len(distances)*0.5)]\n\n\n\ndef shiftShapeAfterCrop(shape, point):\n result = []\n for p in shape:\n result.append([p[0] - point[0], p[1] - point[1]])\n return np.array([result], np.int32)\n\n\ndef reInpainting(image, groundTruth, teethColor):\n \"\"\"\n if pixel has pink color (marked for teeth) and not in range of teeth => fill by teethColor\n \"\"\"\n preProcessedImage = preProcessing(image, teethColor)\n\n #print(list(preProcessedImage[0][0]))\n for i in range(0, image.shape[0]):\n for j in range(0, image.shape[1]):\n pixel = image[i][j]\n pink = [255, 0, 255]\n if collections.Counter(pixel) == collections.Counter(pink):\n #print( preProcessedImage[i][j])\n print(groundTruth[i, j])\n groundTruth[i, j] = [preProcessedImage[i, j][0], preProcessedImage[i, j][1], preProcessedImage[i, j][2]]\n return groundTruth\n\ndef createFacial(image):\n try:\n shape = getFacial(image) # points of mouth\n if shape is None:\n return None\n else:\n [topLeft, botRight] = convexRectangle(shape) # 2 point for crop mouth\n needed_image = copy.copy(image)\n if topLeft[1] - botRight[1] > topLeft[0] - botRight[0]:\n deltaXY = abs(abs(topLeft[1] - botRight[1]) - abs(topLeft[0] - botRight[0]))\n newTopLeft = [topLeft[0], topLeft[1] - int(deltaXY/2)]\n newBotRight = [botRight[0], botRight[1] + int(deltaXY/2)]\n upper_needed_image = needed_image[newTopLeft[1] : topLeft[1] + 1, newTopLeft[0] : botRight[0] + 1]\n bottom_needed_image = needed_image[botRight[1] : newBotRight[1] + 1, newTopLeft[0] : botRight[0] + 1]\n needed_image = needed_image[newTopLeft[1] : newBotRight[1] + 1, newTopLeft[0] : newBotRight[0] + 1]\n image = image[topLeft[1] : botRight[1] + 1, topLeft[0] : botRight[0] + 1] # mouth\n shape = shiftShapeAfterCrop(shape, topLeft) # new point of mouth after crop\n groundTruth = copy.copy(image)\n pixelMayBeTeeths = getListPixelMayBeTeeth(image) # color on +\n teethShades = readTeethShade() # list of teeth shade\n teethColor = findTeethColor(pixelMayBeTeeths,teethShades).getColor() # color of teeth\n image = convexHull(image, shape)\n groundTruth = reInpainting(image, groundTruth, teethColor)\n image = cv2.resize(image, (256,256), interpolation = cv2.INTER_CUBIC)\n res = np.concatenate((upper_needed_image, groundTruth, bottom_needed_image), axis=0) \n res = cv2.resize(res, (256,256), interpolation = cv2.INTER_CUBIC )\n needed_image = cv2.resize(needed_image, (256,256), interpolation = cv2.INTER_CUBIC )\n out = np.concatenate((needed_image, res), axis=1)\n return out\n except:\n return\ndef make_directory_if_not_exists(path):\n while not os.path.isdir(path):\n try:\n os.makedirs(path)\n break \n except WindowsError:\n print(\"got WindowsError\")\n pass \ndef main():\n path = \"C:/Users/haime/Downloads/test1\"\n shutil.rmtree(path + \"/result\", ignore_errors=True)\n os.mkdir(path + \"/result\")\n files = [file for file in os.listdir(path) if os.path.isfile(os.path.join(path, file))]\n for i in progressbar(range(len(files)), \"Computing: \", 10):\n file = files[i]\n filename = file.split(\".\")\n images = cv2.imread(path + '/' + file)\n out = createFacial(images)\n if out is not None:\n cv2.imwrite(f\"{path}/result/{filename[0]}.png\", out)\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.array", "numpy.concatenate" ] ]
lukemerrick/pytorch-forecasting
[ "000ea41bea4ab7a47a0e610841d4fd88fdfead1e" ]
[ "examples/ar.py" ]
[ "from pathlib import Path\nimport pickle\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.common import SettingWithCopyWarning\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor\nfrom pytorch_lightning.loggers import TensorBoardLogger\nimport torch\n\nfrom pytorch_forecasting import EncoderNormalizer, GroupNormalizer, TimeSeriesDataSet\nfrom pytorch_forecasting.data import NaNLabelEncoder\nfrom pytorch_forecasting.data.examples import generate_ar_data\nfrom pytorch_forecasting.metrics import NormalDistributionLoss\nfrom pytorch_forecasting.models.deepar import DeepAR\nfrom pytorch_forecasting.utils import profile\n\nwarnings.simplefilter(\"error\", category=SettingWithCopyWarning)\n\n\ndata = generate_ar_data(seasonality=10.0, timesteps=400, n_series=100)\ndata[\"static\"] = \"2\"\ndata[\"date\"] = pd.Timestamp(\"2020-01-01\") + pd.to_timedelta(data.time_idx, \"D\")\nvalidation = data.series.sample(20)\n\nmax_encoder_length = 60\nmax_prediction_length = 20\n\ntraining_cutoff = data[\"time_idx\"].max() - max_prediction_length\n\ntraining = TimeSeriesDataSet(\n data[lambda x: ~x.series.isin(validation)],\n time_idx=\"time_idx\",\n target=\"value\",\n categorical_encoders={\"series\": NaNLabelEncoder().fit(data.series)},\n group_ids=[\"series\"],\n static_categoricals=[\"static\"],\n min_encoder_length=max_encoder_length,\n max_encoder_length=max_encoder_length,\n min_prediction_length=max_prediction_length,\n max_prediction_length=max_prediction_length,\n time_varying_unknown_reals=[\"value\"],\n time_varying_known_reals=[\"time_idx\"],\n target_normalizer=GroupNormalizer(groups=[\"series\"]),\n add_relative_time_idx=False,\n add_target_scales=True,\n randomize_length=None,\n)\n\nvalidation = TimeSeriesDataSet.from_dataset(\n training,\n data[lambda x: x.series.isin(validation)],\n # predict=True,\n stop_randomization=True,\n)\nbatch_size = 64\ntrain_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)\nval_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)\n\n# save datasets\ntraining.save(\"training.pkl\")\nvalidation.save(\"validation.pkl\")\n\nearly_stop_callback = EarlyStopping(monitor=\"val_loss\", min_delta=1e-4, patience=5, verbose=False, mode=\"min\")\nlr_logger = LearningRateMonitor()\n\ntrainer = pl.Trainer(\n max_epochs=10,\n gpus=-1,\n gradient_clip_val=0.1,\n limit_train_batches=30,\n limit_val_batches=3,\n # fast_dev_run=True,\n # logger=logger,\n # profiler=True,\n callbacks=[lr_logger, early_stop_callback],\n)\n\n\ndeepar = DeepAR.from_dataset(\n training,\n learning_rate=0.1,\n hidden_size=32,\n dropout=0.1,\n loss=NormalDistributionLoss(),\n log_interval=10,\n log_val_interval=3,\n # reduce_on_plateau_patience=3,\n)\nprint(f\"Number of parameters in network: {deepar.size()/1e3:.1f}k\")\n\n# # find optimal learning rate\n# deepar.hparams.log_interval = -1\n# deepar.hparams.log_val_interval = -1\n# trainer.limit_train_batches = 1.0\n# res = trainer.tuner.lr_find(\n# deepar, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e2\n# )\n\n# print(f\"suggested learning rate: {res.suggestion()}\")\n# fig = res.plot(show=True, suggest=True)\n# fig.show()\n# deepar.hparams.learning_rate = res.suggestion()\n\ntorch.set_num_threads(10)\ntrainer.fit(\n deepar,\n train_dataloaders=train_dataloader,\n val_dataloaders=val_dataloader,\n)\n\n# calcualte mean absolute error on validation set\nactuals = torch.cat([y for x, (y, weight) in iter(val_dataloader)])\npredictions = deepar.predict(val_dataloader)\nprint(f\"Mean absolute error of model: {(actuals - predictions).abs().mean()}\")\n\n# # plot actual vs. predictions\n# raw_predictions, x = deepar.predict(val_dataloader, mode=\"raw\", return_x=True)\n# for idx in range(10): # plot 10 examples\n# deepar.plot_prediction(x, raw_predictions, idx=idx, add_loss_to_title=True)\n" ]
[ [ "torch.set_num_threads", "pandas.to_timedelta", "pandas.Timestamp" ] ]
yanxp/ASM-Pytorch
[ "4eec5caea13320d2502007015e032d76d59eefc4" ]
[ "lib/utils/help.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nfrom model.config import cfg\nfrom model.test import im_detect\nfrom model.nms_wrapper import nms\n\nfrom utils.timer import Timer\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os, cv2\nimport argparse\n\nfrom nets.vgg16 import vgg16\nfrom nets.resnet_v1 import resnetv1\nimport random\nimport torch\nimport xml.etree.ElementTree as ET\n\nCLASSES = ('__background__',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n\ndef softmax(ary):\n ary = ary.flatten()\n expa = np.exp(ary)\n dom = np.sum(expa)\n return expa/dom\n\ndef choose_model(dir):\n ''' \n get the latest model in in dir'''\n lists = os.listdir(dir)\n lists.sort(key= lambda fn:os.path.getmtime(os.path.join(dir,fn)))\n return lists[-1]\n\ndef load_model(net_file ,path):\n '''\n return caffe.Net'''\n import caffe\n net = caffe.Net(net_file, path, caffe.TEST) \n return net\ndef judge_y(score):\n '''return :\n y:np.array len(score)\n '''\n y=[]\n for s in score:\n if s==1 or np.log(s)>np.log(1-s):\n y.append(1)\n else:\n y.append(-1)\n return np.array(y, dtype=np.int)\ndef detect_im(net, detect_idx, imdb,clslambda):\n roidb = imdb.roidb\n allBox =[]; allScore = []; allY=[] ;eps =0 ; al_idx = []\n for i in detect_idx:\n imgpath = imdb.image_path_at(i)\n im = cv2.imread(imgpath)\n height = im.shape[0]; width=im.shape[1]\n\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im)\n timer.toc()\n \n BBox=[] # all eligible boxes for this img\n Score=[] # every box in BBox has k*1 score vector\n Y = []\n CONF_THRESH = 0.5 # if this is high then no image can enter al, but low thresh leads many images enter al\n NMS_THRESH = 0.3\n if np.amax(scores[:,1:])<CONF_THRESH:\n al_idx.append(i)\n continue\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(torch.from_numpy(dets), NMS_THRESH)\n dets = dets[keep.numpy(), :]\n inds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n \n if len(inds) == 0 :\n continue\n# vis_detections(im, cls, dets, thresh=CONF_THRESH)\n for j in inds:\n bbox = dets[j, :4]\n BBox.append(bbox)\n # find which region this box deriving from\n k = keep[j]\n Score.append(scores[k].copy())\n Y.append(judge_y(scores[k]))\n y = Y[-1]\n loss = -( (1+y)/2 * np.log(scores[k]) + (1-y)/2 * np.log(1-scores[k]+(1e-30))) \n tmp = np.max(1-loss/clslambda)\n eps = eps if eps >= tmp else tmp\n \n allBox.append(BBox[:]); allScore.append(Score[:]); allY.append(Y[:])\n return np.array(allScore), np.array(allBox), np.array(allY), al_idx, eps\ndef judge_uv(loss, gamma, clslambda,eps):\n '''\n return \n u: scalar\n v: R^kind vector\n '''\n lsum = np.sum(loss)\n dim = loss.shape[0]\n v = np.zeros((dim,))\n\n if(lsum > gamma):\n return 1, np.array([eps]*dim)\n elif lsum < gamma:\n for i,l in enumerate(loss):\n if l > clslambda[i]:\n v[i] = 0\n elif l<clslambda[i]*(1-eps):\n v[i] = eps\n else:\n v[i]=1-l/clslambda[i]\n return 0, v\n\nimport matplotlib as mpl\n#mpl.use('Agg')\nimport matplotlib.pyplot as plt\ndef vis_detections(im, class_name, dets, thresh=0.5):\n \"\"\"Draw detected bounding boxes.\"\"\"\n plt.switch_backend('Agg')\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n im = im[:, :, (2, 1, 0)]\n fig,ax = plt.subplots()\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n ax.set_title(('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name,\n thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n import time\n t0 = time.time()\n fig = plt.gcf()\n fig.savefig('images/'+str(t0)+'.jpg')\n\ndef blur_image(roidbs,ss_candidate_idx):\n '''\n blur regions except BBox\n '''\n def _handle(roi, idx):\n imgpath = roi['image'].split('/')[-1]\n im = cv2.imread(roi['image'])\n im_bbox = []\n for box in roi['boxes']:\n box = map(int, box)\n im_bbox.append(im[box[1]:box[3], box[0]:box[2]])\n new_im = cv2.blur(im, (25,25))\n for i, box in enumerate(roi['boxes']):\n box = map(int, box)\n# cv2.rectangle(new_im,(box[0],box[1]),(box[2],box[3]),(255,0,0),3)\n new_im[box[1]:box[3], box[0]:box[2]] = im_bbox[i]\n \n path = 'tmpdata/{}'.format(imgpath)\n cv2.imwrite(path, new_im)\n assert os.path.exists(path), \"didnt save successfully\"\n roi['image'] = path\n return roi\n print ('blur inrelevent regions')\n res_roidb = []\n for i in range(len(roidbs)):\n if len(roidbs[i]['boxes'])>0 and i in ss_candidate_idx and not roidbs[i]['flipped']:\n res_roidb.append(roidbs[i].copy())\n res_roidb[i] = _handle(res_roidb[i], i)\n else:\n res_roidb.append(roidbs[i].copy())\n return res_roidb\n" ]
[ [ "numpy.sum", "matplotlib.pyplot.draw", "numpy.zeros", "matplotlib.pyplot.switch_backend", "matplotlib.pyplot.axis", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.gcf", "numpy.exp", "matplotlib.pyplot.subplots", "numpy.amax", "numpy.hstack", "numpy.log", "torch.from_numpy", "numpy.max", "numpy.array", "numpy.where", "matplotlib.pyplot.Rectangle" ] ]
luispedro/Coelho2021_GMGCv1_analysis
[ "5f1a62844631121cc11f8ac5a776d25baca56ff7" ]
[ "taxonomic-annotation/reconcile.py" ]
[ "import pandas as pd\nfrom taxonomic import ncbi\nn = ncbi.NCBI()\ntaxonomic = pd.read_table('/g/bork1/coelho/DD_DeCaF/genecats.cold/GMGC10.taxonomic.map', index_col=0, engine='c')\nspecies = pd.read_table('/g/bork1/coelho/DD_DeCaF/genecats.cold/GMGC10.species.match.map', header=None, usecols=[1,2], index_col=0, squeeze=True, names=['gene', 'TaxID'], engine='c')\nsuperkingdom = pd.read_table('/g/bork1/coelho/DD_DeCaF/genecats.cold/GMGC10.kingdom.annotation', header=None, names=['gene', 'superkingdom'], index_col=0, squeeze=True, engine='c')\ntaxid = taxonomic['NCBI TaxID'].to_dict()\nd_superkingdom = superkingdom.to_dict()\nd_species = species.to_dict()\nd_predicted_taxid = taxonomic['NCBI TaxID'].to_dict()\ntaxid = taxonomic['NCBI TaxID'][taxonomic.Rank == 'species'].to_dict()\n\ngs = {}\nfor g,t in taxid.items():\n gs[g] = n.ancestors.get(str(t), '1')\n if len(gs) % 10_000_000 == 0:\n print(len(gs) // 1_000_000)\n\n\nno_match = {'None', 'no_match'}\nprok = {'Bacteria', 'Archaea'}\nfinal = d_species.copy()\nfor g,sk in d_superkingdom.items():\n if sk in no_match:\n continue\n if g in d_species:\n continue\n elif sk not in prok:\n final[g] = sk\n elif g in gs:\n final[g] = gs[g]\n else:\n final[g] = d_predicted_taxid.get(g, 1)\n \n \n\nfor g,p in d_predicted_taxid.items():\n if g not in final:\n final[g] = 1\n\nfinal = pd.Series(final)\nfinalstr = final.map(str)\nfinalnames = finalstr.map(n.names)\nfinalranks = finalstr.map(n.ranks)\n\nfinalframe = pd.DataFrame({'taxid' : finalstr, 'rank' : finalranks, 'name': finalnames})\nfinalframe.to_csv('taxonomic.final.tsv', sep='\\t')\n" ]
[ [ "pandas.read_table", "pandas.Series", "pandas.DataFrame" ] ]
Embodimentgeniuslm3/NeMo
[ "5f5a9a0a1d0bcf28675841af3df9b08b56ae3203" ]
[ "nemo/collections/nlp/models/machine_translation/mt_enc_dec_bottleneck_model.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport json\nimport random\nfrom multiprocessing import Value\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Union\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.utils.data as pt_data\nfrom omegaconf import DictConfig, ListConfig, OmegaConf\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.utilities import rank_zero_only\nfrom sacrebleu import corpus_bleu\n\nfrom nemo.collections.common.losses import NLLLoss\nfrom nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTBottleneckModelConfig\nfrom nemo.collections.nlp.models.machine_translation.mt_enc_dec_model import MTEncDecModel\nfrom nemo.collections.nlp.modules.common.transformer import AttentionBridge, TopKSequenceGenerator\nfrom nemo.core.classes.common import typecheck\nfrom nemo.utils import logging, model_utils\n\n__all__ = ['MTBottleneckModel']\n\n\ndef build_linear_or_identity(input_dim, output_dim):\n \"\"\"\n Auxiliary method to return FC layer when input_dim != output_dim\n else return identity\n \"\"\"\n if input_dim != output_dim:\n model = torch.nn.Linear(input_dim, output_dim)\n else:\n model = torch.nn.Identity()\n\n return model\n\n\nclass MTBottleneckModel(MTEncDecModel):\n \"\"\"\n Machine translation model which supports bottleneck architecture,\n NLL, VAE, and MIM loss.\n\n Supported losses:\n 1) nll - Conditional cross entropy (the usual NMT loss)\n 2) mim - MIM learning framework. A latent variable model with good\n reconstruction and compressed latent representation.\n https://arxiv.org/pdf/2003.02645.pdf\n 3) vae - VAE learning framework. A latent variable model which learns\n good probability estimation over observations and\n a regularized latent representation.\n https://arxiv.org/pdf/1312.6114.pdf\n \"\"\"\n\n def __init__(self, cfg: MTBottleneckModelConfig, trainer: Trainer = None):\n super().__init__(cfg=cfg, trainer=trainer)\n\n self.model_type: str = cfg.get(\"model_type\", \"nll\")\n self.min_logv: float = cfg.get(\"min_logv\", -6)\n self.latent_size: int = cfg.get(\"latent_size\", -1)\n self.non_recon_warmup_batches: int = cfg.get(\"non_recon_warmup_batches\", 200000)\n self.recon_per_token: bool = cfg.get(\"recon_per_token\", True)\n\n # if True, translation uses the mean of latent for VAE and MIM\n self.deterministic_translate = True\n\n # latent_size -1 will take value of encoder.hidden_size\n if self.latent_size < 0:\n self.latent_size = self.encoder.hidden_size\n\n if not self.recon_per_token:\n # disable reduction for train and eval loss\n self.eval_loss_fn = NLLLoss(ignore_index=self.decoder_tokenizer.pad_id, reduction='none')\n self.loss_fn._per_token_reduction = False\n\n if self.model_type not in [\"nll\", \"mim\", \"vae\"]:\n raise ValueError(f\"Unknown model_type = {self.model_type}\")\n\n # project bridge dimension back to decoder hidden dimensions\n self.latent2hidden = build_linear_or_identity(self.latent_size, self.decoder.hidden_size)\n\n # project dimension of encoder hidden to latent dimension\n self.hidden2latent_mean = build_linear_or_identity(self.encoder.hidden_size, self.latent_size)\n\n # MIM or VAE\n if self.model_type != \"nll\":\n # for probabilistic latent variable models we also need variance\n self.hidden2latent_logv = build_linear_or_identity(self.encoder.hidden_size, self.latent_size)\n\n def _validate_encoder_decoder_hidden_size(self):\n \"\"\"\n Validate encoder and decoder hidden sizes, and enforce same size.\n We support here encoder/decoder with different hidden_size, so do nothing.\n \"\"\"\n pass\n\n def eval_epoch_end(self, outputs, mode):\n # call parent for logging\n super().eval_epoch_end(outputs, mode)\n\n # if user specifies one validation dataloader, then PTL reverts to giving a list of dictionary instead of a list of list of dictionary\n if isinstance(outputs[0], dict):\n outputs = [outputs]\n\n for dataloader_idx, output in enumerate(outputs):\n # add logs if available in outputs\n log_dict = {}\n for x in output:\n if \"log\" in x:\n for k, v in x[\"log\"].items():\n log_dict[k] = log_dict.get(k, []) + [v]\n\n for k, v in log_dict.items():\n if dataloader_idx == 0:\n self.log(f\"{mode}_{k}\", np.mean(v), sync_dist=True)\n else:\n self.log(f\"{mode}_{k}_dl_index_{dataloader_idx}\", np.mean(v), sync_dist=True)\n\n @classmethod\n def list_available_models(cls) -> Optional[Dict[str, str]]:\n \"\"\"\n This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.\n\n Returns:\n List of available pre-trained models.\n \"\"\"\n result = []\n\n return result\n\n def encode_latent(self, hidden):\n \"\"\"\n Sample latent code z with reparameterization from bridge for\n probabilistic latent variable models (e.g., mim, vae),\n or return value for non-probabilistic models (nll)\n \"\"\"\n # all models have mean\n z_mean = self.hidden2latent_mean(hidden)\n\n if self.model_type == \"nll\":\n # reconstruction only\n z = z_mean\n z_logv = torch.zeros_like(z)\n else:\n # mim or vae\n\n # sample posterior q(z|x) for MIM and VAE\n z_logv = self.hidden2latent_logv(hidden)\n # avoid numerical instability for MIM\n z_logv = z_logv.clamp_min(self.min_logv)\n # sample z with reparameterization\n e = torch.randn_like(z_mean)\n z = e * torch.exp(0.5 * z_logv) + z_mean\n\n return z, z_mean, z_logv\n\n def loss(\n self, z, z_mean, z_logv, z_mask, tgt_log_probs, tgt, tgt_mask, tgt_labels, train=False, return_info=False\n ):\n \"\"\"\n Compute the loss from latent (z) and target (x).\n\n train - If True enables loss annealing, and label smoothing\n \"\"\"\n\n recon_loss_fn = self.loss_fn if train else self.eval_loss_fn\n\n info_dict = {}\n\n if self.recon_per_token:\n log_p_x_given_z_per_token = -recon_loss_fn(log_probs=tgt_log_probs, labels=tgt_labels)\n\n log_p_x_given_z = log_p_x_given_z_per_token\n log_p_x_given_z_per_token = log_p_x_given_z_per_token.detach()\n else:\n # averaging of log_p_x_given_z per sample\n output_mask = (tgt_labels != self.decoder_tokenizer.pad_id).type_as(tgt_log_probs)\n\n log_p_x_given_z_per_token = (\n -recon_loss_fn(log_probs=tgt_log_probs, labels=tgt_labels,).view(tgt_log_probs.shape[:2]) * output_mask\n )\n\n # probability per sample\n log_p_x_given_z = log_p_x_given_z_per_token.sum(-1).mean()\n\n tokens = output_mask.sum()\n log_p_x_given_z_per_token = log_p_x_given_z_per_token.sum().detach() / tokens\n\n info_dict[\"log_p_x_given_z\"] = log_p_x_given_z.detach().cpu()\n\n info_dict[\"log_p_x_given_z_per_token\"] = log_p_x_given_z_per_token.detach().cpu()\n\n # loss warmup during training only\n if train:\n trainer = self.trainer\n # if we do not have a trainer ignore annealing\n if trainer is None:\n # ignore warmup and auxiliary loss\n warmup_coef = 1.0\n else:\n global_step = self.trainer.global_step\n\n warmup_coef = min(global_step / self.non_recon_warmup_batches, 1)\n else:\n # ignore warmup and auxiliary loss\n warmup_coef = 1.0\n\n info_dict[\"warmup_coef_recon\"] = warmup_coef\n\n if self.model_type in [\"mim\", \"vae\"]:\n # tokens = tgt_mask.sum()\n q_z_given_x = torch.distributions.Normal(loc=z_mean, scale=torch.exp(0.5 * z_logv),)\n # average latent distribution to match averaging of observations\n if self.recon_per_token:\n # average latent per dimension - to heuristically match per-token reconstruction\n log_q_z_given_x = q_z_given_x.log_prob(z).mean(-1).mean(-1).mean()\n else:\n log_q_z_given_x = q_z_given_x.log_prob(z).sum(-1).sum(-1).mean()\n\n # build prior distribution\n p_z = torch.distributions.Normal(loc=torch.zeros_like(z), scale=torch.ones_like(z),)\n if self.recon_per_token:\n # average latent distribution similar to averaging of observations\n log_p_z = p_z.log_prob(z).mean(-1).mean(-1).mean()\n else:\n log_p_z = p_z.log_prob(z).sum(-1).sum(-1).mean()\n\n if self.model_type == \"mim\":\n loss_terms = 0.5 * (log_q_z_given_x + log_p_z)\n elif self.model_type == \"vae\":\n # KL divergence -Dkl( q(z|x) || p(z) )\n loss_terms = log_p_z - log_q_z_given_x\n\n # show loss value for reconstruction but train with MIM/VAE loss\n loss = -(log_p_x_given_z + warmup_coef * loss_terms)\n\n info_dict[\"log_q_z_given_x\"] = log_q_z_given_x.detach().cpu()\n info_dict[\"log_var_q_z_given_x\"] = z_logv.detach().cpu()\n info_dict[\"log_p_z\"] = log_p_z.detach().cpu()\n info_dict[\"kl_div_q_p\"] = (log_q_z_given_x - log_p_z).detach().cpu()\n\n elif self.model_type == \"nll\":\n loss = -log_p_x_given_z\n\n if return_info:\n return loss, info_dict\n else:\n return loss\n\n @typecheck()\n def forward(self, src, src_mask, tgt, tgt_mask):\n \"\"\"\n return_info - if True, returns loss, info_dict with additional information\n regarding the loss that can be logged\n \"\"\"\n if self.validate_input_ids:\n # test src/tgt for id range (i.e., hellp in catching wrong tokenizer)\n self.test_encoder_ids(src, raise_error=True)\n self.test_decoder_ids(tgt, raise_error=True)\n\n enc_hiddens, enc_mask = self.encoder(input_ids=src, encoder_mask=src_mask, return_mask=True,)\n\n # build posterior distribution q(x|z)\n z, z_mean, z_logv = self.encode_latent(hidden=enc_hiddens)\n z_mask = enc_mask\n\n # decoding cross attention context\n context_hiddens = self.latent2hidden(z)\n\n tgt_hiddens = self.decoder(\n input_ids=tgt, decoder_mask=tgt_mask, encoder_embeddings=context_hiddens, encoder_mask=enc_mask,\n )\n\n # build decoding distribution\n tgt_log_probs = self.log_softmax(hidden_states=tgt_hiddens)\n\n return z, z_mean, z_logv, z_mask, tgt_log_probs\n\n @torch.no_grad()\n def batch_translate(self, src: torch.LongTensor, src_mask: torch.LongTensor, return_beam_scores: bool = False):\n \"\"\"\n Translates a minibatch of inputs from source language to target language.\n Args:\n src: minibatch of inputs in the src language (batch x seq_len)\n src_mask: mask tensor indicating elements to be ignored (batch x seq_len)\n Returns:\n translations: a list strings containing detokenized translations\n inputs: a list of string containing detokenized inputs\n \"\"\"\n mode = self.training\n try:\n self.eval()\n enc_hiddens, enc_mask = self.encoder(input_ids=src, encoder_mask=src_mask, return_mask=True)\n\n # build posterior distribution q(x|z)\n z, z_mean, _ = self.encode_latent(hidden=enc_hiddens)\n\n if getattr(self, \"deterministic_translate\", True):\n z = z_mean\n\n # decoding cross attention context\n context_hiddens = self.latent2hidden(z)\n\n best_translations = self.beam_search(\n encoder_hidden_states=context_hiddens,\n encoder_input_mask=enc_mask,\n return_beam_scores=return_beam_scores,\n )\n if return_beam_scores:\n all_translations, scores, best_translations = best_translations\n scores = scores.view(-1)\n all_translations = self.ids_to_postprocessed_text(\n all_translations, self.decoder_tokenizer, self.target_processor, filter_beam_ids=True\n )\n\n best_translations = self.ids_to_postprocessed_text(\n best_translations, self.decoder_tokenizer, self.target_processor, filter_beam_ids=True\n )\n inputs = self.ids_to_postprocessed_text(\n src, self.encoder_tokenizer, self.source_processor, filter_beam_ids=False\n )\n\n finally:\n self.train(mode=mode)\n if return_beam_scores:\n return inputs, all_translations, scores.data.cpu().numpy().tolist(), best_translations\n\n return inputs, best_translations\n\n def training_step(self, batch, batch_idx):\n \"\"\"\n Lightning calls this inside the training loop with the data from the training dataloader\n passed in as `batch`.\n \"\"\"\n # forward pass\n for i in range(len(batch)):\n if batch[i].ndim == 3:\n # Dataset returns already batched data and the first dimension of size 1 added by DataLoader\n # is excess.\n batch[i] = batch[i].squeeze(dim=0)\n src_ids, src_mask, tgt_ids, tgt_mask, labels = batch\n z, z_mean, z_logv, z_mask, tgt_log_probs = self(src_ids, src_mask, tgt_ids, tgt_mask)\n train_loss, info_dict = self.loss(\n z=z,\n z_mean=z_mean,\n z_logv=z_logv,\n z_mask=z_mask,\n tgt_log_probs=tgt_log_probs,\n tgt=tgt_ids,\n tgt_mask=tgt_mask,\n tgt_labels=labels,\n train=True,\n return_info=True,\n )\n tensorboard_logs = {\n 'train_loss': train_loss,\n 'lr': self._optimizer.param_groups[0]['lr'],\n }\n tensorboard_logs.update(info_dict)\n\n return {'loss': train_loss, 'log': tensorboard_logs}\n\n def eval_step(self, batch, batch_idx, mode, dataloader_idx=0):\n for i in range(len(batch)):\n if batch[i].ndim == 3:\n # Dataset returns already batched data and the first dimension of size 1 added by DataLoader\n # is excess.\n batch[i] = batch[i].squeeze(dim=0)\n\n if self.multilingual:\n self.source_processor = self.source_processor_list[dataloader_idx]\n self.target_processor = self.target_processor_list[dataloader_idx]\n\n src_ids, src_mask, tgt_ids, tgt_mask, labels = batch\n z, z_mean, z_logv, z_mask, tgt_log_probs = self(src_ids, src_mask, tgt_ids, tgt_mask)\n eval_loss, info_dict = self.loss(\n z=z,\n z_mean=z_mean,\n z_logv=z_logv,\n z_mask=z_mask,\n tgt_log_probs=tgt_log_probs,\n tgt=tgt_ids,\n tgt_mask=tgt_mask,\n tgt_labels=labels,\n train=False,\n return_info=True,\n )\n # this will run encoder twice -- TODO: potentially fix\n _, translations = self.batch_translate(src=src_ids, src_mask=src_mask)\n\n num_measurements = labels.shape[0] * labels.shape[1]\n if dataloader_idx == 0:\n getattr(self, f'{mode}_loss')(\n loss=eval_loss, num_measurements=num_measurements,\n )\n else:\n getattr(self, f'{mode}_loss_{dataloader_idx}')(\n loss=eval_loss, num_measurements=num_measurements,\n )\n np_tgt = tgt_ids.detach().cpu().numpy()\n ground_truths = [self.decoder_tokenizer.ids_to_text(tgt) for tgt in np_tgt]\n ground_truths = [self.target_processor.detokenize(tgt.split(' ')) for tgt in ground_truths]\n num_non_pad_tokens = np.not_equal(np_tgt, self.decoder_tokenizer.pad_id).sum().item()\n return {\n 'translations': translations,\n 'ground_truths': ground_truths,\n 'num_non_pad_tokens': num_non_pad_tokens,\n 'log': {k: v.detach().cpu().numpy() if torch.is_tensor(v) else v for k, v in info_dict.items()},\n }\n" ]
[ [ "torch.ones_like", "torch.randn_like", "torch.nn.Linear", "torch.zeros_like", "torch.no_grad", "numpy.not_equal", "torch.exp", "torch.is_tensor", "torch.nn.Identity", "numpy.mean" ] ]
szabolcsdombi/zengl
[ "2c9c26784285f2f049fb5d6fc9da0ad65d32d52f" ]
[ "examples/heightmap_terrain.py" ]
[ "import imageio\nimport numpy as np\nimport zengl\nfrom skimage.filters import gaussian\n\nimport assets\nfrom window import Window\n\nimageio.plugins.freeimage.download()\nimg = imageio.imread(assets.get('Terrain002.exr')) # https://ambientcg.com/view?id=Terrain002\n\nnormals = np.zeros((512, 512, 3))\nnormals[:, 1:-1, 0] = img[:, :-2, 0] - img[:, 2:, 0]\nnormals[1:-1, :, 1] = img[:-2, :, 0] - img[2:, :, 0]\nnormals[:, :, 0] = gaussian(normals[:, :, 0])\nnormals[:, :, 1] = gaussian(normals[:, :, 1])\nnormals[:, :, 2] = 0.01\n\nnormals /= np.repeat(np.sum(np.sqrt(normals * normals), axis=2), 3).reshape(512, 512, 3)\nnormals = normals * 0.5 + 0.5\n\nnorm_img = np.full((512, 512, 4), 255, 'u1')\nnorm_img[:, :, :3] = np.clip(normals * 255, 0, 255)\n\ncolor_img = np.full((512, 512, 4), 255, 'u1')\ngray = np.random.randint(0, 32, (512, 512))\nshade = np.where(gaussian(normals[:, :, 2]) > 0.75, 200, 50).astype('u1')\ncolor_img[:, :, 0] = gray + shade\ncolor_img[:, :, 1] = gray + shade\ncolor_img[:, :, 2] = gray + shade\n\n\ndef create_terrain(N):\n vert = np.zeros((N * N, 2), 'i4')\n idx = np.full((N - 1, N * 2 + 1), -1, 'i4')\n vert[:] = np.array([np.repeat(np.arange(N), N), np.tile(np.arange(N), N)]).T\n idx[:, :-1] = (np.repeat(np.arange(N * N - N), 2) + np.tile([0, N], N * N - N)).reshape(-1, N * 2)\n return vert, idx\n\n\nwindow = Window(1280, 720)\nctx = zengl.context()\n\nimage = ctx.image(window.size, 'rgba8unorm', samples=4)\ndepth = ctx.image(window.size, 'depth24plus', samples=4)\nimage.clear_value = (1.0, 1.0, 1.0, 1.0)\n\nvertices, indices = create_terrain(512)\nvertex_buffer = ctx.buffer(vertices)\nindex_buffer = ctx.buffer(indices)\n\nimg = imageio.imread(assets.get('Terrain002.exr'))\nheightmap = ctx.image((512, 512), 'r32float', img[:, :, 0].tobytes())\nnormalmap = ctx.image((512, 512), 'rgba8unorm', norm_img.tobytes())\ncolormap = ctx.image((512, 512), 'rgba8unorm', color_img.tobytes())\n\nuniform_buffer = ctx.buffer(size=64)\n\nctx.includes['terrain_info'] = '''\n const vec2 TerrainSize = vec2(512.0, 512.0);\n const vec3 TerrainScale = vec3(0.1, 0.1, 10.0);\n const vec3 TerrainPosition = vec3(-25.6, -25.6, 0.0);\n'''\n\nterrain = ctx.pipeline(\n vertex_shader='''\n #version 330\n\n #include \"terrain_info\"\n\n layout (std140) uniform Common {\n mat4 mvp;\n };\n\n uniform sampler2D Heightmap;\n uniform sampler2D Normalmap;\n\n layout (location = 0) in ivec2 in_vert;\n\n out vec3 v_normal;\n out vec2 v_texcoord;\n\n void main() {\n v_normal = texelFetch(Normalmap, in_vert, 0).rgb * 2.0 - 1.0;\n float z = texelFetch(Heightmap, in_vert, 0).r;\n v_texcoord = (vec2(in_vert) + 0.5) / TerrainSize;\n gl_Position = mvp * vec4(vec3(in_vert, z) * TerrainScale + TerrainPosition, 1.0);\n }\n ''',\n fragment_shader='''\n #version 330\n\n in vec3 v_normal;\n in vec2 v_texcoord;\n\n uniform sampler2D Colormap;\n\n layout (location = 0) out vec4 out_color;\n\n void main() {\n vec3 light = vec3(4.0, 3.0, 10.0);\n vec3 color = texture(Colormap, v_texcoord).rgb;\n float lum = dot(normalize(light), normalize(v_normal)) * 0.7 + 0.3;\n out_color = vec4(color * lum, 1.0);\n }\n ''',\n layout=[\n {\n 'name': 'Common',\n 'binding': 0,\n },\n {\n 'name': 'Heightmap',\n 'binding': 0,\n },\n {\n 'name': 'Normalmap',\n 'binding': 1,\n },\n {\n 'name': 'Colormap',\n 'binding': 2,\n },\n ],\n resources=[\n {\n 'type': 'uniform_buffer',\n 'binding': 0,\n 'buffer': uniform_buffer,\n },\n {\n 'type': 'sampler',\n 'binding': 0,\n 'image': heightmap,\n },\n {\n 'type': 'sampler',\n 'binding': 1,\n 'image': normalmap,\n },\n {\n 'type': 'sampler',\n 'binding': 2,\n 'image': colormap,\n },\n ],\n framebuffer=[image, depth],\n primitive_restart=True,\n topology='triangle_strip',\n cull_face='back',\n vertex_buffers=zengl.bind(vertex_buffer, '2i', 0),\n index_buffer=index_buffer,\n vertex_count=index_buffer.size // 4,\n)\n\nwhile window.update():\n x, y = np.sin(window.time * 0.5) * 30.0, np.cos(window.time * 0.5) * 30.0\n camera = zengl.camera((x, y, 25.0), (0.0, 0.0, 0.0), aspect=window.aspect, fov=45.0)\n uniform_buffer.write(camera)\n\n image.clear()\n depth.clear()\n terrain.render()\n image.blit()\n" ]
[ [ "numpy.tile", "numpy.zeros", "numpy.cos", "numpy.arange", "numpy.clip", "numpy.full", "numpy.sqrt", "numpy.sin", "numpy.random.randint" ] ]
RalfGuder/LaTeX-examples
[ "cd0d97f85fadb59b7c6e9062b37a8bf7d725ba0c" ]
[ "documents/math-minimal-distance-to-cubic-function/calcMinDist.py" ]
[ "#!/usr/bin/env python\n\nimport numpy\n\n\nclass Point:\n \"\"\"Represents a point in 2D.\"\"\"\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n\ndef euclidean_dist(p1, p2):\n \"\"\"Euclidean distance of two 2D points.\"\"\"\n from math import sqrt\n return sqrt((p1.x-p2.x)**2 + (p1.y-p2.y)**2)\n\n\ndef get_min_dist(p1, precision=0.001, start_x=0, end_x=3):\n \"\"\"Get x of point on (x,x^2) that has minimal distance to given Point p.\"\"\"\n min_dist = -1\n for x in numpy.arange(start_x, end_x, precision):\n p2 = Point(x, x**2)\n dist = euclidean_dist(p1, p2)\n if min_dist == -1 or dist < min_dist:\n min_dist = dist\n return min_dist\n\n\"\"\"for i in numpy.arange(0, 3, 0.01):\n min_dist = get_min_dist(Point(0, i))\n if abs(i-min_dist) < 0.005:\n print(i, min_dist)\"\"\"\n\nprint(get_min_dist(Point(0, 4.25), precision=0.001, start_x=0, end_x=3))\n# print(euclidean_dist(Point(0,5),Point(2, 2**2)))\n\n# print(get_min_dist(5, 0.00001, 2, 3))\n" ]
[ [ "numpy.arange" ] ]
lisong996/akshare
[ "1ee414cdecc2a492f2bb2f40d326a627c46dae2b", "1ee414cdecc2a492f2bb2f40d326a627c46dae2b" ]
[ "akshare/qhkc_web/qhkc_index.py", "akshare/stock_fundamental/stock_register.py" ]
[ "# -*- coding:utf-8 -*-\n#!/usr/bin/env python\n\"\"\"\nDate: 2019/9/30 13:58\nDesc: 奇货可查网站目前已经商业化运营, 特提供奇货可查-指数数据接口, 方便您程序化调用\n注:期货价格为收盘价; 现货价格来自网络; 基差=现货价格-期货价格; 基差率=(现货价格-期货价格)/现货价格 * 100 %.\n\"\"\"\nfrom typing import AnyStr\n\nimport pandas as pd\nimport requests\n\nfrom akshare.futures.cons import (\n QHKC_INDEX_URL,\n QHKC_INDEX_TREND_URL,\n QHKC_INDEX_PROFIT_LOSS_URL,\n)\n\n\ndef get_qhkc_index(name: AnyStr = \"奇货商品\", url: AnyStr = QHKC_INDEX_URL):\n \"\"\"\n 奇货可查-指数-指数详情\n 获得奇货可查的指数数据: '奇货黑链', '奇货商品', '奇货谷物', '奇货贵金属', '奇货饲料', '奇货软商品', '奇货化工', '奇货有色', '奇货股指', '奇货铁合金', '奇货油脂'\n :param url: 网址\n :param name 中文名称\n :return: pd.DataFrame\n date price volume ... margin profit long_short_ratio\n 2013-01-04 1000 260820 ... 1130485758 1816940 52.78\n 2013-01-07 998.244 245112 ... 1132228518 2514410 52.15\n 2013-01-08 1000.8 318866 ... 1160374489 2981010 51.99\n 2013-01-09 998.661 247352 ... 1166611242 3904220 52.44\n 2013-01-10 999.802 161292 ... 1153164771 1448190 52.81\n ... ... ... ... ... ... ...\n 2019-09-24 845.391 881138 ... 1895149977 128379050 48.5\n 2019-09-25 845.674 715180 ... 1797235248 128788230 48.29\n 2019-09-26 840.154 1347570 ... 1730488227 137104890 48.44\n 2019-09-27 834.831 920160 ... 1605342767 143128540 48.77\n 2019-09-30 831.959 1031558 ... 1521875378 147810580 48.82\n \"\"\"\n name_id_dict = {}\n qhkc_index_url = \"https://qhkch.com/ajax/official_indexes.php\"\n r = requests.post(qhkc_index_url)\n display_name = [item[\"name\"] for item in r.json()[\"data\"]]\n index_id = [item[\"id\"] for item in r.json()[\"data\"]]\n for item in range(len(display_name)):\n name_id_dict[display_name[item]] = index_id[item]\n payload_id = {\"id\": name_id_dict[name]}\n r = requests.post(url, data=payload_id)\n print(name, \"数据获取成功\")\n json_data = r.json()\n date = json_data[\"data\"][\"date\"]\n price = json_data[\"data\"][\"price\"]\n volume = json_data[\"data\"][\"volume\"]\n open_interest = json_data[\"data\"][\"openint\"]\n total_value = json_data[\"data\"][\"total_value\"]\n profit = json_data[\"data\"][\"profit\"]\n long_short_ratio = json_data[\"data\"][\"line\"]\n df_temp = pd.DataFrame(\n [date, price, volume, open_interest, total_value, profit, long_short_ratio]\n ).T\n df_temp.columns = [\n \"date\",\n \"price\",\n \"volume\",\n \"open_interest\",\n \"margin\",\n \"profit\",\n \"long_short_ratio\",\n ]\n return df_temp\n\n\ndef get_qhkc_index_trend(name: AnyStr = \"奇货商品\", url: AnyStr = QHKC_INDEX_TREND_URL):\n \"\"\"\n 奇货可查-指数-大资金动向\n 获得奇货可查的指数数据: '奇货黑链', '奇货商品', '奇货谷物', '奇货贵金属', '奇货饲料', '奇货软商品', '奇货化工', '奇货有色', '奇货股指', '奇货铁合金', '奇货油脂'\n :param url: 网址\n :param name None\n :return: pd.DataFrame\n broker grade money open_order variety\n 中金期货 B -3.68209e+07 3.68209e+07 沪金\n 浙商期货 D -25845534 25845534 沪银\n 永安期货 A -25614000 25614000 沪银\n 招商期货 D -23517351 23517351 沪银\n 海通期货 A 21440845 21440845 沪金\n 美尔雅 D 21370975 21370975 沪金\n 中原期货 C -21204612 21204612 沪银\n 国投安信 A -1.52374e+07 1.52374e+07 沪银\n 中信期货 C 1.50941e+07 1.50941e+07 沪银\n 海通期货 A -1.47184e+07 1.47184e+07 沪银\n 方正中期 E -1.31432e+07 1.31432e+07 沪银\n 东证期货 D -1.283e+07 1.283e+07 沪银\n 一德期货 A 1.24973e+07 1.24973e+07 沪银\n 国投安信 A -11602860 11602860 沪金\n 国泰君安 B -1.09363e+07 1.09363e+07 沪金\n 华安期货 D -9.99499e+06 9.99499e+06 沪金\n 南华期货 B -9.23675e+06 9.23675e+06 沪银\n 国贸期货 B 8.55245e+06 8.55245e+06 沪银\n 道通期货 C 8527675 8527675 沪金\n 招商期货 D -7.85457e+06 7.85457e+06 沪金\n 东方财富 E -7.58235e+06 7.58235e+06 沪银\n 五矿经易 A 6.95354e+06 6.95354e+06 沪银\n 银河期货 B 6.84522e+06 6.84522e+06 沪银\n 国贸期货 B 6731025 6731025 沪金\n 平安期货 D -6710418 6710418 沪银\n 上海中期 C 6628800 6628800 沪金\n 中信期货 C -6345830 6345830 沪金\n 银河期货 B -6126295 6126295 沪金\n 华泰期货 A -5.96254e+06 5.96254e+06 沪金\n 招金期货 E -5.53029e+06 5.53029e+06 沪银\n 东证期货 D -5.47486e+06 5.47486e+06 沪金\n 光大期货 C -5334730 5334730 沪金\n 广发期货 D 5.31904e+06 5.31904e+06 沪金\n 国信期货 D -5.05211e+06 5.05211e+06 沪金\n \"\"\"\n name_id_dict = {}\n qhkc_index_url = \"https://qhkch.com/ajax/official_indexes.php\"\n r = requests.post(qhkc_index_url)\n display_name = [item[\"name\"] for item in r.json()[\"data\"]]\n index_id = [item[\"id\"] for item in r.json()[\"data\"]]\n for item in range(len(display_name)):\n name_id_dict[display_name[item]] = index_id[item]\n payload_id = {\"page\": 1, \"limit\": 10, \"index\": name_id_dict[name], \"date\": \"\"}\n r = requests.post(url, data=payload_id)\n print(f\"{name}期货指数-大资金动向数据获取成功\")\n json_data = r.json()\n df_temp = pd.DataFrame()\n for item in json_data[\"data\"]:\n broker = item[\"broker\"]\n grade = item[\"grade\"]\n money = item[\"money\"]\n order_money = item[\"order_money\"]\n variety = item[\"variety\"]\n df_temp = df_temp.append(\n pd.DataFrame([broker, grade, money, order_money, variety]).T\n )\n df_temp.columns = [\"broker\", \"grade\", \"money\", \"open_order\", \"variety\"]\n df_temp.reset_index(drop=True, inplace=True)\n return df_temp\n\n\ndef get_qhkc_index_profit_loss(\n name: AnyStr = \"奇货商品\",\n url: AnyStr = QHKC_INDEX_PROFIT_LOSS_URL,\n start_date=\"\",\n end_date=\"\",\n):\n \"\"\"\n 奇货可查-指数-盈亏详情\n 获得奇货可查的指数数据: '奇货黑链', '奇货商品', '奇货谷物', '奇货贵金属', '奇货饲料', '奇货软商品', '奇货化工', '奇货有色', '奇货股指', '奇货铁合金', '奇货油脂'\n :param url: 网址\n :param name None\n :param start_date: \"\"\n :param end_date: \"20190716\" 指定 end_date 就可以了\n :return: pd.DataFrame\n indexes value trans_date\n 招金期货-沪金 -307489200 2019-09-30\n 平安期货-沪银 -195016650 2019-09-30\n 建信期货-沪银 -160327350 2019-09-30\n 国贸期货-沪银 -159820965 2019-09-30\n 东证期货-沪银 -123508635 2019-09-30\n ... ... ...\n 永安期货-沪银 187411350 2019-09-30\n 中信期货-沪金 242699750 2019-09-30\n 华泰期货-沪银 255766185 2019-09-30\n 永安期货-沪金 293008700 2019-09-30\n 国泰君安-沪金 302774950 2019-09-30\n \"\"\"\n name_id_dict = {}\n qhkc_index_url = \"https://qhkch.com/ajax/official_indexes.php\"\n r = requests.post(qhkc_index_url)\n display_name = [item[\"name\"] for item in r.json()[\"data\"]]\n index_id = [item[\"id\"] for item in r.json()[\"data\"]]\n for item in range(len(display_name)):\n name_id_dict[display_name[item]] = index_id[item]\n payload_id = {\"index\": name_id_dict[name], \"date1\": start_date, \"date2\": end_date}\n r = requests.post(url, data=payload_id)\n print(f\"{name}期货指数-盈亏分布数据获取成功\")\n json_data = r.json()\n indexes = json_data[\"data\"][\"indexes\"]\n value = json_data[\"data\"][\"value\"]\n trans_date = [json_data[\"data\"][\"trans_date\"]] * len(value)\n df_temp = pd.DataFrame([indexes, value, trans_date]).T\n df_temp.columns = [\"indexes\", \"value\", \"trans_date\"]\n return df_temp\n\n\nif __name__ == \"__main__\":\n data = get_qhkc_index(\"奇货谷物\")\n print(data)\n data = get_qhkc_index_trend(\"奇货贵金属\")\n print(data)\n data = get_qhkc_index_profit_loss(\"奇货贵金属\", end_date=\"20190716\")\n print(data)\n", "# -*- coding:utf-8 -*-\n#!/usr/bin/env python\n\"\"\"\nDate: 2021/4/6 15:19\nDesc: 东方财富网-数据中心-新股数据-注册制审核\nhttp://data.eastmoney.com/kcb/?type=nsb\n\"\"\"\nimport pandas as pd\nimport requests\n\n\ndef stock_register_kcb() -> pd.DataFrame:\n \"\"\"\n 东方财富网-数据中心-新股数据-注册制审核-科创板\n http://data.eastmoney.com/kcb/?type=nsb\n :return: 科创板注册制审核结果\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"https://datacenter.eastmoney.com/securities/api/data/get\"\n params = {\n 'st': 'UPDATE_DATE',\n 'sr': '-1',\n 'ps': '5000',\n 'p': '1',\n 'type': 'RPT_REGISTERED_INFO',\n 'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',\n 'token': '894050c76af8597a853f5b408b759f5d',\n 'client': 'WEB',\n 'filter': '(TOLIST_MARKET=\"科创板\")',\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n }\n r = requests.get(url, params=params, headers=headers)\n data_json = r.json()\n page_num = data_json['result']['pages']\n big_df = pd.DataFrame()\n for page in range(1, page_num+1):\n params = {\n 'st': 'UPDATE_DATE',\n 'sr': '-1',\n 'ps': '5000',\n 'p': page,\n 'type': 'RPT_REGISTERED_INFO',\n 'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',\n 'token': '894050c76af8597a853f5b408b759f5d',\n 'client': 'WEB',\n 'filter': '(TOLIST_MARKET=\"科创板\")',\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n }\n r = requests.get(url, params=params, headers=headers)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json['result'][\"data\"])\n big_df = big_df.append(temp_df, ignore_index=True)\n big_df.reset_index(inplace=True)\n big_df['index'] = range(1, len(big_df) + 1)\n big_df.columns = [\n \"序号\",\n \"_\",\n \"_\",\n \"发行人全称\",\n \"审核状态\",\n \"_\",\n \"注册地\",\n \"证监会行业\",\n \"保荐机构\",\n \"律师事务所\",\n \"会计师事务所\",\n \"更新日期\",\n \"受理日期\",\n \"拟上市地点\",\n \"_\",\n ]\n big_df = big_df[\n [\n \"序号\",\n \"发行人全称\",\n \"审核状态\",\n \"注册地\",\n \"证监会行业\",\n \"保荐机构\",\n \"律师事务所\",\n \"会计师事务所\",\n \"更新日期\",\n \"受理日期\",\n \"拟上市地点\",\n ]\n ]\n return big_df\n\n\ndef stock_register_cyb() -> pd.DataFrame:\n \"\"\"\n 东方财富网-数据中心-新股数据-注册制审核-创业板\n http://data.eastmoney.com/xg/cyb/\n :return: 创业板注册制审核结果\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"https://datacenter.eastmoney.com/securities/api/data/get\"\n params = {\n 'st': 'UPDATE_DATE',\n 'sr': '-1',\n 'ps': '5000',\n 'p': '1',\n 'type': 'RPT_REGISTERED_INFO',\n 'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',\n 'token': '894050c76af8597a853f5b408b759f5d',\n 'client': 'WEB',\n 'filter': '(TOLIST_MARKET=\"创业板\")',\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n }\n r = requests.get(url, params=params, headers=headers)\n data_json = r.json()\n page_num = data_json['result']['pages']\n big_df = pd.DataFrame()\n for page in range(1, page_num+1):\n params = {\n 'st': 'UPDATE_DATE',\n 'sr': '-1',\n 'ps': '5000',\n 'p': page,\n 'type': 'RPT_REGISTERED_INFO',\n 'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',\n 'token': '894050c76af8597a853f5b408b759f5d',\n 'client': 'WEB',\n 'filter': '(TOLIST_MARKET=\"创业板\")',\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n }\n r = requests.get(url, params=params, headers=headers)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json['result'][\"data\"])\n big_df = big_df.append(temp_df, ignore_index=True)\n big_df.reset_index(inplace=True)\n big_df['index'] = range(1, len(big_df) + 1)\n big_df.columns = [\n \"序号\",\n \"_\",\n \"_\",\n \"发行人全称\",\n \"审核状态\",\n \"_\",\n \"注册地\",\n \"证监会行业\",\n \"保荐机构\",\n \"律师事务所\",\n \"会计师事务所\",\n \"更新日期\",\n \"受理日期\",\n \"拟上市地点\",\n \"_\",\n ]\n big_df = big_df[\n [\n \"序号\",\n \"发行人全称\",\n \"审核状态\",\n \"注册地\",\n \"证监会行业\",\n \"保荐机构\",\n \"律师事务所\",\n \"会计师事务所\",\n \"更新日期\",\n \"受理日期\",\n \"拟上市地点\",\n ]\n ]\n return big_df\n\n\ndef stock_register_db() -> pd.DataFrame:\n \"\"\"\n 东方财富网-数据中心-新股数据-注册制审核-达标企业\n http://data.eastmoney.com/xg/cyb/\n :return: 达标企业\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://dcfm.eastmoney.com/EM_MutiSvcExpandInterface/api/js/get\"\n params = {\n 'st': 'eutime',\n 'sr': '-1',\n 'ps': '5000',\n 'p': '1',\n 'type': 'KCB_LB',\n 'js': '{\"data\":(x),\"pages\":(tp)}',\n 'token': '894050c76af8597a853f5b408b759f5d',\n 'filter': \"(MKT='fnsb')\",\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n }\n r = requests.get(url, params=params, headers=headers)\n data_json = r.json()\n page_num = data_json['pages']\n big_df = pd.DataFrame()\n for page in range(1, page_num+1):\n params = {\n 'st': 'eutime',\n 'sr': '-1',\n 'ps': '5000',\n 'p': page,\n 'type': 'KCB_LB',\n 'js': '{\"data\":(x),\"pages\":(tp)}',\n 'token': '894050c76af8597a853f5b408b759f5d',\n 'filter': \"(MKT='fnsb')\",\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\n }\n r = requests.get(url, params=params, headers=headers)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json['data'])\n big_df = big_df.append(temp_df, ignore_index=True)\n big_df.reset_index(inplace=True)\n big_df['index'] = range(1, len(big_df) + 1)\n big_df.columns = [\n \"序号\",\n \"_\",\n \"_\",\n \"_\",\n \"企业名称\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"经营范围\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"近三年营业收入-2019\",\n \"近三年净利润-2019\",\n \"近三年研发费用-2019\",\n \"近三年营业收入-2018\",\n \"近三年净利润-2018\",\n \"近三年研发费用-2018\",\n \"近三年营业收入-2017\",\n \"近三年净利润-2017\",\n \"近三年研发费用-2017\",\n \"近两年累计净利润\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n ]\n big_df = big_df[\n [\n \"序号\",\n \"企业名称\",\n \"经营范围\",\n \"近三年营业收入-2019\",\n \"近三年净利润-2019\",\n \"近三年研发费用-2019\",\n \"近三年营业收入-2018\",\n \"近三年净利润-2018\",\n \"近三年研发费用-2018\",\n \"近三年营业收入-2017\",\n \"近三年净利润-2017\",\n \"近三年研发费用-2017\",\n \"近两年累计净利润\",\n ]\n ]\n return big_df\n\n\nif __name__ == \"__main__\":\n stock_register_kcb_df = stock_register_kcb()\n print(stock_register_kcb_df)\n\n stock_register_cyb_df = stock_register_cyb()\n print(stock_register_cyb_df)\n\n stock_register_db_df = stock_register_db()\n print(stock_register_db_df)\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.DataFrame" ] ]
AK391/anycost-gan
[ "a827390a77d6360ed6902511de447a503584c63f" ]
[ "cuda_op/fused_act.py" ]
[ "import os\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import Function\nfrom torch.utils.cpp_extension import load\n\n\nmodule_path = os.path.dirname(__file__)\nfused = load(\n 'fused',\n sources=[\n os.path.join(module_path, 'fused_bias_act.cpp'),\n os.path.join(module_path, 'fused_bias_act_kernel.cu'),\n ],\n)\n\n\nclass FusedLeakyReLUFunctionBackward(Function):\n @staticmethod\n def forward(ctx, grad_output, out, negative_slope, scale):\n ctx.save_for_backward(out)\n ctx.negative_slope = negative_slope\n ctx.scale = scale\n\n empty = grad_output.new_empty(0)\n\n grad_input = fused.fused_bias_act(\n grad_output, empty, out, 3, 1, negative_slope, scale\n )\n\n dim = [0]\n\n if grad_input.ndim > 2:\n dim += list(range(2, grad_input.ndim))\n\n grad_bias = grad_input.sum(dim).detach()\n\n return grad_input, grad_bias\n\n @staticmethod\n def backward(ctx, gradgrad_input, gradgrad_bias):\n out, = ctx.saved_tensors\n gradgrad_out = fused.fused_bias_act(\n gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale\n )\n\n return gradgrad_out, None, None, None\n\n\nclass FusedLeakyReLUFunction(Function):\n @staticmethod\n def forward(ctx, input, bias, negative_slope, scale):\n empty = input.new_empty(0)\n out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)\n ctx.save_for_backward(out)\n ctx.negative_slope = negative_slope\n ctx.scale = scale\n\n return out\n\n @staticmethod\n def backward(ctx, grad_output):\n out, = ctx.saved_tensors\n\n grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(\n grad_output, out, ctx.negative_slope, ctx.scale\n )\n\n return grad_input, grad_bias, None, None\n\n\nclass FusedLeakyReLU(nn.Module):\n def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):\n super().__init__()\n\n self.bias = nn.Parameter(torch.zeros(channel))\n self.negative_slope = negative_slope\n self.scale = scale\n\n def forward(self, input):\n return fused_leaky_relu(input, self.bias[:input.shape[1]], self.negative_slope, self.scale)\n\n\ndef fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):\n return FusedLeakyReLUFunction.apply(input, bias[:input.shape[1]], negative_slope, scale)\n" ]
[ [ "torch.zeros" ] ]
guolinke/pytorch
[ "ad4b2571b605d2c2a7e288585469a06e79738eb9" ]
[ "torch/testing/_internal/common_methods_invocations.py" ]
[ "from functools import reduce, wraps, partial\nfrom itertools import product\nfrom operator import mul\nimport collections\nimport operator\nimport random\n\nimport torch\nimport numpy as np\nfrom torch._six import inf\nfrom torch.autograd import Variable\nimport collections.abc\n\nfrom typing import List, Sequence, Tuple, Dict, Any, Union\n\nfrom torch.testing import \\\n (make_non_contiguous, floating_types, floating_types_and, complex_types,\n floating_and_complex_types, floating_and_complex_types_and,\n all_types_and_complex_and, all_types_and, all_types_and_complex,\n integral_types_and, all_types)\nfrom .._core import _dispatch_dtypes\nfrom torch.testing._internal.common_device_type import \\\n (skipIf, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfNoCusolver,\n skipCPUIfNoLapack, skipCPUIfNoMkl, skipCUDAIfRocm, precisionOverride,)\nfrom torch.testing._internal.common_cuda import CUDA11OrLater, SM53OrLater\nfrom torch.testing._internal.common_utils import \\\n (is_iterable_of_tensors,\n random_symmetric_matrix, random_symmetric_psd_matrix,\n make_fullrank_matrices_with_distinct_singular_values,\n random_symmetric_pd_matrix, make_symmetric_matrices,\n make_symmetric_pd_matrices,\n random_fullrank_matrix_distinct_singular_value, set_rng_seed, SEED,\n TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, make_tensor, TEST_SCIPY,\n torch_to_numpy_dtype_dict, slowTest, TEST_WITH_ASAN, _wrap_warn_once,\n GRADCHECK_NONDET_TOL,)\n\nfrom setuptools import distutils\n\nif TEST_SCIPY:\n import scipy.special\n\n\nclass DecorateInfo(object):\n \"\"\"Describes which test, or type of tests, should be wrapped in the given\n decorators when testing an operator. Any test that matches all provided\n arguments will be decorated. The decorators will only be applied if the\n active_if argument is True.\"\"\"\n\n __slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if']\n\n def __init__(self, decorators, cls_name=None, test_name=None, *,\n device_type=None, dtypes=None, active_if=True):\n self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators]\n self.cls_name = cls_name\n self.test_name = test_name\n self.device_type = device_type\n self.dtypes = dtypes\n self.active_if = active_if\n\n def is_active(self, cls_name, test_name, device_type, dtype):\n return (\n self.active_if and\n (self.cls_name is None or self.cls_name == cls_name) and\n (self.test_name is None or self.test_name == test_name) and\n (self.device_type is None or self.device_type == device_type) and\n (self.dtypes is None or dtype in self.dtypes)\n )\n\n\nclass SkipInfo(DecorateInfo):\n \"\"\"Describes which test, or type of tests, should be skipped when testing\n an operator. Any test that matches all provided arguments will be skipped.\n The skip will only be checked if the active_if argument is True.\"\"\"\n\n def __init__(self, cls_name=None, test_name=None, *,\n device_type=None, dtypes=None, active_if=True):\n super().__init__(decorators=skipIf(True, \"Skipped!\"), cls_name=cls_name,\n test_name=test_name, device_type=device_type, dtypes=dtypes,\n active_if=active_if)\n\nclass SampleInput(object):\n \"\"\"Represents sample inputs to a function.\"\"\"\n\n __slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad', 'broadcasts_input']\n\n def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=None, broadcasts_input=False):\n # input is the first input to the op and must be either a Tensor or TensorList (Sequence[Tensor]).\n # This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...).\n # op with TensorList inputs do not support method or inplace variants.\n assert isinstance(input, torch.Tensor) or is_iterable_of_tensors(input)\n self.input: Union[torch.Tensor, Sequence[torch.Tensor]] = input\n self.args = args\n self.kwargs = kwargs if kwargs is not None else {}\n self.output_process_fn_grad = output_process_fn_grad\n\n # Specifies if `self.input` is broadcasted or not,\n # given that the operator supports broadcasting.\n # This field is used to verify the behavior for inplace variant.\n #\n # If a SampleInput is marked with `broadcasts_input=True`,\n # it is verified that we get a `RuntimerError` with this sample,\n # and inplace variant. Also inplace grad{grad} tests are skipped,\n # for such inputs (as they will error out otherwise).\n self.broadcasts_input = broadcasts_input\n\n def __repr__(self):\n arguments = [\n 'input=Tensor' if isinstance(self.input, torch.Tensor) else f'input=TensorList[{len(self.input)}]',\n f'args={self.args}' if len(self.args) > 0 else None,\n f'kwargs={self.kwargs}' if len(self.kwargs) > 0 else None,\n (f'output_process_fn_grad={self.output_process_fn_grad}'\n if self.output_process_fn_grad is not None else None),\n f'broadcasts_input={self.broadcasts_input}']\n\n return f'SampleInput({\", \".join(a for a in arguments if a is not None)})'\n\nclass AliasInfo(object):\n \"\"\"Class holds alias information. For example, torch.abs ->\n torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_\n \"\"\"\n\n def __init__(self, alias_name):\n self.name = alias_name\n self.op = _getattr_qual(torch, alias_name)\n self.method_variant = getattr(torch.Tensor, alias_name, None)\n self.inplace_variant = getattr(torch.Tensor, alias_name + \"_\", None)\n\n def __call__(self, *args, **kwargs):\n return self.op(*args, **kwargs)\n\n\n_NOTHING = object() # Unique value to distinguish default from anything else\n\n\n# Extension of getattr to support qualified names\n# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm\ndef _getattr_qual(obj, name, default=_NOTHING):\n try:\n for path in name.split('.'):\n obj = getattr(obj, path)\n return obj\n except AttributeError:\n if default is not _NOTHING:\n return default\n else:\n raise\n\n\n# Classes and methods for the operator database\nclass OpInfo(object):\n \"\"\"Operator information and helper functions for acquiring it.\"\"\"\n\n def __init__(self,\n name, # the string name of the function\n *,\n op=None, # the function variant of the operation, populated as torch.<name> if None\n dtypes=floating_types(), # dtypes this function is expected to work with\n dtypesIfCPU=None, # dtypes this function is expected to work with on CPU\n dtypesIfCUDA=None, # dtypes this function is expected to work with on CUDA\n dtypesIfROCM=None, # dtypes this function is expected to work with on ROCM\n backward_dtypes=None, # backward dtypes this function is expected to work with\n backward_dtypesIfCPU=None, # backward dtypes this function is expected to work with on CPU\n backward_dtypesIfCUDA=None, # backward dtypes this function is expected to work with on CUDA\n backward_dtypesIfROCM=None, # backward dtypes this function is expected to work with on ROCM\n default_test_dtypes=None, # dtypes to test with by default. Gets intersected\n # with the dtypes support on the tested device\n assert_autodiffed=False, # if a op's aten::node is expected to be symbolically autodiffed\n autodiff_nonfusible_nodes=None, # a list of strings with node names that are expected to be in a\n # DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'],\n # default is populated to be ['aten::(name of Python operator)']\n autodiff_fusible_nodes=None, # a list of strings with node names that are expected to be in FusionGroups\n # inside of DifferentiableGraphs when this operation is autodiffed.\n # Ex: ['aten::add', 'aten::mm'], defaults to an empty list\n # Note: currently no ops use fusible nodes\n supports_out=True, # whether the op supports the out kwarg\n skips=tuple(), # information about which tests to skip\n decorators=None, # decorators to apply to generated tests\n safe_casts_outputs=False, # whether op allows safe casting when writing to out arguments\n sample_inputs_func=None, # function to generate sample inputs\n aten_name=None, # name of the corresponding aten:: operator\n aliases=None, # iterable of aliases, e.g. (\"absolute\",) for torch.abs\n variant_test_name='', # additional string to include in the test name\n supports_autograd=True, # support for autograd\n supports_gradgrad=True, # support second order gradients (this value is ignored if supports_autograd=False)\n supports_inplace_autograd=None, # whether the operation supports inplace autograd\n # defaults to supports_autograd's value\n supports_sparse=False, # whether the op supports sparse inputs\n gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs), # wrapper function for gradcheck\n check_batched_grad=True, # check batched grad when doing gradcheck\n check_batched_gradgrad=True, # check batched grad grad when doing gradgradcheck\n gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck\n gradcheck_fast_mode=None, # Whether to use the fast implmentation for gradcheck/gradgradcheck.\n # When set to None, defers to the default value provided by the wrapper\n # function around gradcheck (testing._internal.common_utils.gradcheck)\n ):\n\n # Validates the dtypes are generated from the dispatch-related functions\n for dtype_list in (dtypes, dtypesIfCPU, dtypesIfCUDA, dtypesIfROCM):\n assert isinstance(dtype_list, (_dispatch_dtypes, type(None)))\n\n self.name = name\n self.aten_name = aten_name if aten_name is not None else name\n self.variant_test_name = variant_test_name\n\n self.dtypes = set(dtypes)\n self.dtypesIfCPU = set(dtypesIfCPU) if dtypesIfCPU is not None else self.dtypes\n self.dtypesIfCUDA = set(dtypesIfCUDA) if dtypesIfCUDA is not None else self.dtypes\n self.dtypesIfROCM = set(dtypesIfROCM) if dtypesIfROCM is not None else self.dtypesIfCUDA\n\n self.backward_dtypes = set(backward_dtypes) if backward_dtypes is not None else self.dtypes\n self.backward_dtypesIfCPU = set(backward_dtypesIfCPU) if backward_dtypesIfCPU is not None else (\n self.dtypesIfCPU if dtypesIfCPU is not None else self.backward_dtypes)\n self.backward_dtypesIfCUDA = set(backward_dtypesIfCUDA) if backward_dtypesIfCUDA is not None else (\n self.dtypesIfCUDA if dtypesIfCUDA is not None else self.backward_dtypes)\n self.backward_dtypesIfROCM = set(backward_dtypesIfROCM) if backward_dtypesIfROCM is not None else (\n self.dtypesIfROCM if dtypesIfROCM is not None else self.backward_dtypesIfCUDA)\n\n self._default_test_dtypes = set(default_test_dtypes) if default_test_dtypes is not None else None\n\n # NOTE: if the op is unspecified it is assumed to be under the torch namespace\n self.op = op if op else _getattr_qual(torch, self.name)\n method_variant = getattr(torch.Tensor, name, None)\n # attributes like real, imag are not callable\n self.method_variant = method_variant if callable(method_variant) else None\n inplace_name = name + \"_\"\n self.inplace_variant = getattr(torch.Tensor, inplace_name, None)\n self.operator_variant = getattr(operator, name, None)\n\n self.supports_out = supports_out\n self.safe_casts_outputs = safe_casts_outputs\n\n self.skips = skips\n self.decorators = decorators\n self.sample_inputs_func = sample_inputs_func\n\n self.assert_autodiffed = assert_autodiffed\n self.autodiff_fusible_nodes = autodiff_fusible_nodes if autodiff_fusible_nodes else []\n if autodiff_nonfusible_nodes is None:\n self.autodiff_nonfusible_nodes = ['aten::' + self.name]\n else:\n self.autodiff_nonfusible_nodes = autodiff_nonfusible_nodes\n\n # autograd support\n self.supports_autograd = supports_autograd\n self.supports_inplace_autograd = supports_inplace_autograd\n if self.supports_inplace_autograd is None:\n self.supports_inplace_autograd = supports_autograd\n\n self.gradcheck_wrapper = gradcheck_wrapper\n self.supports_gradgrad = supports_gradgrad\n self.check_batched_grad = check_batched_grad\n self.check_batched_gradgrad = check_batched_gradgrad\n self.gradcheck_nondet_tol = gradcheck_nondet_tol\n self.gradcheck_fast_mode = gradcheck_fast_mode\n\n self.supports_sparse = supports_sparse\n\n self.aliases = ()\n if aliases is not None:\n self.aliases = tuple(AliasInfo(a) for a in aliases) # type: ignore[assignment]\n\n def __call__(self, *args, **kwargs):\n \"\"\"Calls the function variant of the operator.\"\"\"\n return self.op(*args, **kwargs)\n\n def get_op(self):\n \"\"\"Returns the function variant of the operator, torch.<op_name>.\"\"\"\n return self.op\n\n def get_method(self):\n \"\"\"Returns the method variant of the operator, torch.Tensor.<op_name>.\n Returns None if the operator has no method variant.\n \"\"\"\n return self.method_variant\n\n def get_inplace(self):\n \"\"\"Returns the inplace variant of the operator, torch.Tensor.<op_name>_.\n Returns None if the operator has no inplace variant.\n \"\"\"\n return self.inplace_variant\n\n def get_operator_variant(self):\n \"\"\"Returns operator variant of the operator, e.g. operator.neg\n Returns None if the operator has no operator variant.\n \"\"\"\n return self.operator_variant\n\n def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):\n \"\"\"Returns an iterable of SampleInputs.\n\n These samples should be sufficient to test the function works correctly\n with autograd, TorchScript, etc.\n \"\"\"\n\n # TODO: Remove the try/except once all operators have sample_inputs_func with\n # **kwargs in their signature.\n try:\n samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)\n except TypeError:\n samples = self.sample_inputs_func(self, device, dtype, requires_grad)\n return samples\n\n # Returns True if the test should be skipped and False otherwise\n def should_skip(self, cls_name, test_name, device_type, dtype):\n return any(si.is_active(cls_name, test_name, device_type, dtype)\n for si in self.skips)\n\n def supported_dtypes(self, device_type):\n if device_type == 'cpu':\n return self.dtypesIfCPU\n if device_type == 'cuda':\n return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA\n else:\n return self.dtypes\n\n def supported_backward_dtypes(self, device_type):\n if device_type == 'cpu':\n return self.backward_dtypesIfCPU\n if device_type == 'cuda':\n return self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA\n else:\n return self.backward_dtypes\n\n def supports_complex_autograd(self, device_type):\n if device_type == 'cpu':\n return any(dtype.is_complex for dtype in self.backward_dtypesIfCPU)\n if device_type == 'cuda':\n if TEST_WITH_ROCM:\n return any(dtype.is_complex for dtype in self.backward_dtypesIfROCM)\n else:\n return any(dtype.is_complex for dtype in self.backward_dtypesIfCUDA)\n else:\n return any(dtype.is_complex for dtype in self.backward_dtypes)\n\n def supports_dtype(self, dtype, device_type):\n return dtype in self.supported_dtypes(device_type)\n\n def default_test_dtypes(self, device_type):\n \"\"\"Returns the default dtypes used to test this operator on the device.\n\n Equal to the operator's default_test_dtypes filtered to remove dtypes\n not supported by the device.\n \"\"\"\n supported = self.supported_dtypes(device_type)\n return (supported if self._default_test_dtypes is None\n else supported.intersection(self._default_test_dtypes))\n\n\nL = 20\nM = 10\nS = 5\n\n\ndef sample_inputs_unary(op_info, device, dtype, requires_grad, **kwargs):\n low, high = op_info.domain\n low = low if low is None else low + op_info._domain_eps\n high = high if high is None else high - op_info._domain_eps\n\n return (SampleInput(make_tensor((L,), device=device, dtype=dtype,\n low=low, high=high,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device=device, dtype=dtype,\n low=low, high=high,\n requires_grad=requires_grad)))\n\n# Metadata class for unary \"universal functions (ufuncs)\" that accept a single\n# tensor and have common properties like:\nclass UnaryUfuncInfo(OpInfo):\n \"\"\"Operator information for 'universal unary functions (unary ufuncs).'\n These are functions of a single tensor with common properties like:\n - they are elementwise functions\n - the input shape is the output shape\n - they typically have method and inplace variants\n - they typically support the out kwarg\n - they typically have NumPy or SciPy references\n See NumPy's universal function documentation\n (https://numpy.org/doc/1.18/reference/ufuncs.html) for more details\n about the concept of ufuncs.\n \"\"\"\n\n def __init__(self,\n name, # the string name of the function\n *,\n ref, # a reference function\n dtypes=floating_types(),\n dtypesIfCPU=None,\n dtypesIfCUDA=None,\n dtypesIfROCM=None,\n default_test_dtypes=(\n torch.uint8, torch.long, torch.half, torch.bfloat16,\n torch.float32, torch.cfloat), # dtypes which tests check by default\n domain=(None, None), # the [low, high) domain of the function\n handles_large_floats=True, # whether the op correctly handles large float values (like 1e20)\n handles_extremals=True, # whether the op correctly handles extremal values (like inf)\n handles_complex_extremals=True, # whether the op correct handles complex extremals (like inf -infj)\n supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle\n sample_inputs_func=sample_inputs_unary,\n sample_kwargs=lambda device, dtype, input: ({}, {}),\n supports_sparse=False,\n **kwargs):\n super(UnaryUfuncInfo, self).__init__(name,\n dtypes=dtypes,\n dtypesIfCPU=dtypesIfCPU,\n dtypesIfCUDA=dtypesIfCUDA,\n dtypesIfROCM=dtypesIfROCM,\n default_test_dtypes=default_test_dtypes,\n sample_inputs_func=sample_inputs_func,\n supports_sparse=supports_sparse,\n **kwargs)\n self.ref = ref\n self.domain = domain\n self.handles_large_floats = handles_large_floats\n self.handles_extremals = handles_extremals\n self.handles_complex_extremals = handles_complex_extremals\n self.supports_complex_to_float = supports_complex_to_float\n\n # test_unary_ufuncs.py generates its own inputs to test the consistency\n # of the operator on sliced tensors, non-contig tensors, etc.\n # `sample_kwargs` is a utility function to provide kwargs\n # along with those inputs if required (eg. clamp).\n # It should return two dictionaries, first holding kwarg for\n # torch operator and second one for reference NumPy operator.\n self.sample_kwargs = sample_kwargs\n\n # Epsilon to ensure grad and gradgrad checks don't test values\n # outside a function's domain.\n self._domain_eps = 1e-5\n\ndef sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs):\n return (SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(torch.tensor([1, 2, 3]),),),\n SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(torch.tensor(1),),),\n SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(torch.tensor([1, 2, 3]),),\n kwargs=dict(dim=1)),)\n\ndef sample_inputs_linalg_det(op_info, device, dtype, requires_grad):\n kw = dict(device=device, dtype=dtype)\n inputs = [\n make_tensor((S, S), **kw),\n make_tensor((1, 1), **kw), # 1x1\n random_symmetric_matrix(S, **kw), # symmetric\n random_symmetric_psd_matrix(S, **kw), # symmetric_psd\n random_symmetric_pd_matrix(S, **kw), # symmetric_pd\n\n # dim2_null, rank1 and rank2 are disabled because of\n # https://github.com/pytorch/pytorch/issues/53364\n # we should re-enable them once the issue is solved\n # random_square_matrix_of_rank(S, S - 2, **kw), # dim2_null\n # random_square_matrix_of_rank(S, 1, **kw), # rank1\n # random_square_matrix_of_rank(S, 2, **kw), # rank2\n\n random_fullrank_matrix_distinct_singular_value(S, **kw), # distinct_singular_value\n make_tensor((3, 3, S, S), **kw), # batched\n make_tensor((3, 3, 1, 1), **kw), # batched_1x1\n random_symmetric_matrix(S, 3, **kw), # batched_symmetric\n random_symmetric_psd_matrix(S, 3, **kw), # batched_symmetric_psd\n random_symmetric_pd_matrix(S, 3, **kw), # batched_symmetric_pd\n random_fullrank_matrix_distinct_singular_value(S, 3, 3, **kw), # batched_distinct_singular_values\n make_tensor((0, 0), **kw),\n make_tensor((0, S, S), **kw),\n ]\n for t in inputs:\n t.requires_grad = requires_grad\n return [SampleInput(t) for t in inputs]\n\ndef sample_inputs_linalg_matrix_power(op_info, device, dtype, requires_grad):\n # (<matrix_size>, (<batch_sizes, ...>))\n test_sizes = [\n (1, ()),\n (2, (0,)),\n (2, (2,)),\n ]\n\n inputs = []\n for matrix_size, batch_sizes in test_sizes:\n size = batch_sizes + (matrix_size, matrix_size)\n for n in (0, 3, 5):\n t = make_tensor(size, device, dtype, requires_grad=requires_grad)\n inputs.append(SampleInput(t, args=(n,)))\n for n in [-4, -2, -1]:\n t = random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_sizes, device=device, dtype=dtype)\n t.requires_grad = requires_grad\n inputs.append(SampleInput(t, args=(n,)))\n\n return inputs\n\ndef sample_inputs_hsplit(op_info, device, dtype, requires_grad):\n return (SampleInput(make_tensor((6,), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(2,),),\n SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=([1, 2, 3],),),)\n\ndef sample_inputs_vsplit(op_info, device, dtype, requires_grad):\n return (SampleInput(make_tensor((6, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(2,),),\n SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=([1, 2, 3],),),)\n\ndef sample_inputs_dsplit(op_info, device, dtype, requires_grad):\n return (SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=([1, 2, 3],),),\n SampleInput(make_tensor((S, S, 6), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(2,),),)\n\ndef sample_inputs_linalg_multi_dot(op_info, device, dtype, requires_grad):\n # Each test case consists of the sizes in the chain of multiplications\n # e.g. [2, 3, 4, 5] generates matrices (2, 3) @ (3, 4) @ (4, 5)\n test_cases = [\n [1, 2, 1],\n [2, 0, 2],\n [0, 2, 2],\n [2, 2, 2, 2],\n [2, 3, 4, 5],\n [5, 4, 0, 2],\n [2, 4, 3, 5, 3, 2]\n ]\n\n result = []\n for sizes in test_cases:\n tensors = []\n for size in zip(sizes[:-1], sizes[1:]):\n t = make_tensor(size, device, dtype, requires_grad=requires_grad)\n tensors.append(t)\n result.append(SampleInput(tensors))\n\n return result\n\ndef sample_inputs_linalg_matrix_norm(op_info, device, dtype, requires_grad, **kwargs):\n sizes = ((2, 2), (2, 3, 2))\n ords = ('fro', 'nuc', inf, -inf, 1, -1, 2, -2)\n dims = ((-2, -1), (-1, 0))\n\n inputs: List[SampleInput] = []\n for size, ord, dim, keepdim in product(sizes, ords, dims, [True, False]):\n t = make_tensor(size, device, dtype, requires_grad=requires_grad)\n inputs.append(SampleInput(t, args=(ord, dim, keepdim)))\n\n return inputs\n\ndef sample_inputs_linalg_norm(op_info, device, dtype, requires_grad):\n test_sizes = [\n (S,),\n (0,),\n (S, S),\n (0, 0),\n (S, 0),\n (0, S),\n (S, S, S),\n (0, S, S),\n (S, 0, S),\n (0, 0, 0),\n ]\n\n vector_ords = (None, 0, 0.5, 1, 2, 3.5, inf, -0.5, -1, -2, -3.5, -inf)\n matrix_ords = (None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf)\n\n inputs = []\n\n for test_size in test_sizes:\n is_vector_norm = len(test_size) == 1\n is_matrix_norm = len(test_size) == 2\n\n for keepdim in [False, True]:\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype, low=None, high=None,\n requires_grad=requires_grad),\n kwargs=dict(\n keepdim=keepdim)))\n\n if not (is_vector_norm or is_matrix_norm):\n continue\n\n ords = vector_ords if is_vector_norm else matrix_ords\n\n for ord in ords:\n\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(ord,),\n kwargs=dict(\n keepdim=keepdim)))\n\n if ord in ['nuc', 'fro']:\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n kwargs=dict(\n ord=ord,\n keepdim=keepdim,\n dim=(0, 1))))\n return inputs\n\ndef sample_inputs_linalg_vector_norm(op_info, device, dtype, requires_grad, **kwargs):\n size_1D = (S,)\n size_2D = (2, 2)\n\n test_cases = [\n # input size, ord, dim args\n (size_1D, 2, None),\n (size_1D, 2, (0,)),\n (size_1D, 0, None),\n (size_1D, 0, (0,)),\n (size_1D, 0.9, None),\n (size_1D, 0.9, (0,)),\n (size_1D, 1, None),\n (size_1D, 1, (0,)),\n (size_1D, -2.1, None),\n (size_1D, -2.1, (0,)),\n (size_1D, inf, None),\n (size_1D, inf, (0,)),\n (size_1D, -inf, None),\n (size_1D, -inf, (0,)),\n\n (size_2D, 2, None),\n (size_2D, 2, (0,)),\n (size_2D, 2, (-1, 0)),\n (size_2D, 0, None),\n (size_2D, 0, (0,)),\n (size_2D, 0, (-1, 0)),\n (size_2D, 0.9, None),\n (size_2D, 0.9, (0,)),\n (size_2D, 0.9, (-1, 0)),\n (size_2D, 1, None),\n (size_2D, 1, (0,)),\n (size_2D, 1, (-1, 0)),\n (size_2D, -2.1, None),\n (size_2D, -2.1, (0,)),\n (size_2D, -2.1, (-1, 0)),\n (size_2D, inf, None),\n (size_2D, inf, (0,)),\n (size_2D, inf, (-1, 0)),\n (size_2D, -inf, None),\n (size_2D, -inf, (0,)),\n (size_2D, -inf, (-1, 0)),\n ]\n inputs = []\n\n for test_size, ord, dim in test_cases:\n for keepdim in [False, True]:\n inputs.append(SampleInput(\n make_tensor(\n test_size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(ord,),\n kwargs=dict(\n keepdim=keepdim,\n dim=dim)))\n\n return inputs\n\n# In order to use the kwarg alpha, partials should be used in an OpInfo's sample_inputs_func\n# eg. sample_inputs_func=partial(sample_inputs_binary_pwise, alpha=2)\n# Then one sample input would also be generated corresponding to the value of alpha provided.\n# In the future, kwargs 'alpha_floating', 'alpha_integral' & 'alpha_complex' can be used to\n# specify scalars of floating, integral & complex types as values for \"alpha\".\ndef sample_inputs_binary_pwise(op_info, device, dtype, requires_grad, **kwargs):\n scalar = 3.14 + 3.14j if dtype.is_complex else (3.14 if dtype.is_floating_point else 3)\n scalar = 1 if dtype is torch.bool else scalar\n tests_list = [\n ((S, S, S), (S, S, S), False),\n ((S, S, S), (S, S), False),\n ((), (), False),\n ((S, S, S), (), False),\n ((S, S, S), scalar, False),\n ((), scalar, False)\n ]\n tests_with_lhs_broadcasting = [\n ((S, S), (S, S, S), True),\n ((), (S, S, S), True),\n ((S, 1, S), (M, S), True),\n ]\n test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator]\n samples = []\n for first_shape, shape_or_scalar, broadcasts_input in test_cases:\n arg = shape_or_scalar\n if isinstance(shape_or_scalar, tuple):\n arg = make_tensor(shape_or_scalar, device=device, dtype=dtype,\n requires_grad=requires_grad)\n samples.append(SampleInput(make_tensor(first_shape, device=device, dtype=dtype,\n requires_grad=requires_grad),\n args=(arg,),\n broadcasts_input=broadcasts_input))\n # Adds an extra sample using \"alpha\" if it's passed in kwargs\n if 'alpha' in kwargs:\n a = make_tensor((S, S, S), device=device, dtype=dtype, requires_grad=requires_grad)\n b = make_tensor((S, S, S), device=device, dtype=dtype, requires_grad=requires_grad)\n sample = SampleInput(a, args=(b,), kwargs={'alpha': kwargs['alpha']})\n samples.append(sample)\n return tuple(samples)\n\ndef sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs):\n args_list = (\n ((S, M), (M, S)),\n )\n inputs = tuple(SampleInput(make_tensor(first_shape, device, dtype,\n requires_grad=requires_grad),\n args=(make_tensor(second_shape, device, dtype,\n requires_grad=requires_grad),))\n for first_shape, second_shape in args_list)\n return inputs\n\ndef sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs):\n alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6)\n beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2)\n tests_list = [\n ((2, 3), (2, 2), (2, 3), False)\n ]\n tests_with_lhs_broadcasting = [\n ((1,), (2, 2), (2, 3), True),\n ((), (2, 2), (2, 3), True)\n ]\n test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator]\n inputs = tuple(SampleInput(make_tensor(shape_a, device, dtype, requires_grad=requires_grad),\n args=(make_tensor(shape_b, device, dtype,\n requires_grad=requires_grad),\n make_tensor(shape_c, device, dtype,\n requires_grad=requires_grad)),\n kwargs={'alpha': alpha_val, 'beta': beta_val},\n broadcasts_input=broadcasts_input)\n for shape_a, shape_b, shape_c, broadcasts_input in test_cases)\n return inputs\n\ndef sample_inputs_mv(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\ndef sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((M, S, M, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((M, M, S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\ndef sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\ndef sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (((S,), (S, M), (M,), 1, 1, False),\n ((S,), (S, M), (M,), 0.2, 0.6, False),\n )\n\n test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True),\n ((1,), (S, M), (M,), 0.2, 0.6, True),\n ((), (S, M), (M,), 1, 1, True),\n ((), (S, M), (M,), 0.2, 0.6, True),\n )\n\n cases = test_cases + test_cases_with_broadcast\n sample_inputs = []\n for input_args in cases:\n args = (make_tensor(input_args[0], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(input_args[1], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(input_args[2], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n alpha, beta = input_args[3], input_args[4]\n broadcasts_input = input_args[5]\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]), kwargs=dict(beta=beta, alpha=alpha),\n broadcasts_input=broadcasts_input))\n return tuple(sample_inputs)\n\ndef sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1),\n ((1,), (S, S, S), (S, S, M), 1, 1),\n ((S, M), (S, S, S), (S, S, M), 0.6, 0.2),\n ((1,), (S, S, S), (S, S, M), 0.6, 0.2),\n ((), (S, S, S), (S, S, M), 1, 1),\n ((), (S, S, S), (S, S, M), 0.6, 0.2),\n ]\n sample_inputs = []\n for input_args in test_cases:\n args = (make_tensor(input_args[0], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(input_args[1], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(input_args[2], device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n alpha, beta = input_args[3], input_args[4]\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]), kwargs=dict(beta=beta, alpha=alpha)))\n if dtype.is_complex:\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),\n kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j))))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = [((S, S), (S, S), (S, S)),\n ((S, S), (S, 1), (1, S)),\n ((1,), (S, S, 1), (1, S)),\n ((), (), ()),\n ((S, S), (), ()),\n ((), (S, S, 1), (1, S)),\n ]\n\n sample_inputs = []\n for input_args in test_cases:\n args = tuple(make_tensor(arg, device, dtype, requires_grad=requires_grad) if isinstance(arg, tuple) else arg\n for arg in input_args)\n sample_inputs.append(SampleInput(args[0], args=args[1:]))\n\n sample_inputs.append(SampleInput(args[0], args=args[1:], kwargs=dict(value=3.14)))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False),\n ((1,), (S, S, S), (S, S, M), 1, 1, True),\n ((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False),\n ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True),\n ((), (S, S, S), (S, S, M), 1, 1, True),\n ((), (S, S, S), (S, S, M), 0.6, 0.2, True),\n ]\n sample_inputs = []\n for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases:\n args = (make_tensor(input_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(batch1_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n make_tensor(batch2_shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),\n kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input))\n if dtype.is_complex:\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2]),\n kwargs=dict(beta=beta * (1 + 2j), alpha=alpha * (2 + 3j)),\n broadcasts_input=broadcasts_input))\n return tuple(sample_inputs)\n\ndef sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs):\n input1 = SampleInput(\n make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)))\n\n input2 = SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)))\n\n if dtype.is_complex:\n alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j\n elif dtype.is_floating_point:\n alpha, beta = 0.2, 0.6\n else:\n alpha, beta = 2, 3\n\n input3 = SampleInput(\n make_tensor((S, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),\n kwargs=dict(beta=beta, alpha=alpha))\n\n input4 = SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad)),\n kwargs=dict(beta=beta, alpha=alpha))\n\n return (input1, input2, input3, input4)\n\ndef sample_inputs_xlogy(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n make_tensor((S, S), device, dtype, low=0, high=None, requires_grad=requires_grad),\n )\n ),\n )\n\n\ndef sample_inputs_xlog1py(self, device, dtype, requires_grad):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def generator():\n # same shape\n yield SampleInput(make_arg((S, S)), args=(make_arg((S, S), low=-1),))\n # rhs broadcast\n yield SampleInput(make_arg((S, S)), args=(make_arg((S,), low=-1),))\n # all zero `x`\n with torch.no_grad():\n x = make_arg((S, S))\n x.fill_(0)\n yield SampleInput(x, args=(make_arg((S, S), low=-1),))\n\n # randomly zero-masked `x`\n x = make_arg((S, S))\n y = make_arg((S, S), low=-1)\n with torch.no_grad():\n x[torch.rand(x.shape) > 0.5] = 0\n yield SampleInput(x, args=(y,))\n\n # Scalar x\n # `input` has to be a tensor\n # yield SampleInput(0, args=(make_arg((S, S), low=-1),))\n # yield SampleInput(2.1, args=(make_arg((S, S), low=-1),))\n\n # Scalar y\n yield SampleInput(make_arg((S, S)), args=(-0.5,))\n yield SampleInput(make_arg((S, S)), args=(1.2,))\n\n return list(generator())\n\n\ndef sample_inputs_logsumexp(self, device, dtype, requires_grad):\n inputs = (\n ((), (0,), True),\n ((S, S), (1,), True),\n ((S, S), (1,), False)\n )\n samples = []\n\n for shape, dim, keepdim in inputs:\n t = make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, args=(dim, keepdim)))\n\n return tuple(samples)\n\ndef sample_inputs_logcumsumexp(self, device, dtype, requires_grad):\n inputs = (\n ((S, S, S), 0),\n ((S, S, S), 1),\n ((), 0),\n )\n samples = []\n\n for shape, dim in inputs:\n t = make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(t, args=(dim,)))\n\n return tuple(samples)\n\ndef sample_inputs_trace(self, device, dtype, requires_grad, **kwargs):\n return (SampleInput((make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))),)\n\n\ndef sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((1, 2, 3), (-1, -2)),\n ((1, 2, 3), (-1, 2)),\n ((1, 2, 3), (1, -2)),\n ((1, 2, 3), (1, 2)),\n ((), (0, 0)),\n ((1, ), (0, 0)),\n ((M, M), (0, 1)),\n ((S, S, S), (2, 0)), )\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n return list(generator())\n\n\ndef sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates always invertible input for linear algebra ops using\n random_fullrank_matrix_distinct_singular_value.\n The input is generated as the itertools.product of 'batches' and 'ns'.\n In total this function generates 8 SampleInputs\n 'batches' cases include:\n () - single input,\n (0,) - zero batched dimension,\n (2,) - batch of two matrices,\n (1, 1) - 1x1 batch of matrices\n 'ns' gives 0x0 and 5x5 matrices.\n Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.\n \"\"\"\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 0]\n out = []\n for batch, n in product(batches, ns):\n a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)\n a.requires_grad = requires_grad\n out.append(SampleInput(a))\n return out\n\ndef np_sinc_with_fp16_as_fp32(x):\n # Wraps numpy's sinc function so that fp16 values are promoted to fp32\n # before sinc is invoked. Context: numpy's sinc returns NaN when evaluated\n # at 0 for fp16.\n if x.dtype == np.float16:\n return np.sinc(x.astype(np.float32))\n else:\n return np.sinc(x)\n\ndef sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((S, 1, 1), (S, S, S)),\n ((S, 1, S), (S, S, S)),\n ((S, 1), (S, S, S)),\n ((1,), (S, S, S)),\n ((1, S), (1, 1, S)),\n ((), ()),\n ((), (1, 3, 2)),\n )\n\n return tuple(\n SampleInput(\n make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(shape,)) for size, shape in test_cases)\n\ndef sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs):\n small_S = 2\n test_cases = (\n ((S, S, 2), (S, S + 1, 2)),\n ((S, S), (S, S)),\n ((S, S, S), (S, S, S)),\n ((3, 5), (3, 5)),\n ((2, 3, 5), (2, 3, 5)),\n ((1, 2, 3), (1, 2, 3)),\n ((1, 1), (S, 1)),\n ((0, 5), (4, 5)),\n ((4, 5), (0, 5)),\n ((0, 4, 5), (3, 5)),\n ((4, 5), (0, 3, 5)),\n ((0, 4, 5), (1, 3, 5)),\n ((1, 4, 5), (0, 3, 5)),\n # Using S here would make this one test take 9s\n ((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)),\n ((small_S, 1, 1, small_S), (1, small_S, small_S)),\n ((1, 1, small_S), (small_S, 1, small_S, small_S)),\n )\n\n samples = []\n for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']:\n for p in [0, 1, 2, 3, 0.5, 1.5, 2.5, float(\"inf\")]:\n for t1_size, t2_size in test_cases:\n # The args should never be non-contiguous as this is not supported in the backward\n samples.append(SampleInput(\n make_tensor(t1_size, device, dtype, requires_grad=requires_grad, noncontiguous=False),\n args=(make_tensor(t2_size, device, dtype, requires_grad=requires_grad, noncontiguous=False), p, cm)))\n\n return samples\n\ndef sample_inputs_comparison_ops(self, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((S, S, S), (S, S, S), False),\n ((S, S, S), (), False),\n ((S, S, S), (1,), False),\n ((S,), (1,), False),\n ((), (), False),\n )\n test_cases_lhs_broadcasting = (\n ((S, 1, S), (S, S, S), True),\n ((1,), (S, S, S), True),\n ((1, S), (1, 1, S), True),\n ((), (0,), True),\n ((), (S, S, S), True),\n )\n cases = test_cases + test_cases_lhs_broadcasting\n sample_inputs = list(SampleInput(make_tensor(first_shape, device, dtype,\n requires_grad=requires_grad),\n args=(make_tensor(second_shape, device, dtype,\n requires_grad=requires_grad),),\n broadcasts_input=broadcasts_input)\n for first_shape, second_shape, broadcasts_input in cases)\n equal_tensors_non_bool = (\n ([[[-8, 6], [9, 0]], [[0, 5], [5, 7]]]),\n ([[[6, 5]], [[1, -5]]]),\n ([[2], [-1]]),\n ([0, -6]),\n ([3],),\n )\n equal_tensors_bool = (\n ([[[1, 0], [0, 0]], [[0, 1], [1, 0]]]),\n ([[[1, 1]], [[1, 0]]]),\n ([[1], [0]]),\n ([0, 1]),\n ([1],),\n )\n more_cases = equal_tensors_bool if dtype is torch.bool else equal_tensors_non_bool\n more_inputs = list(SampleInput(torch.tensor(elements, device=device, dtype=dtype,\n requires_grad=requires_grad),\n args=(torch.tensor(elements, device=device, dtype=dtype,\n requires_grad=requires_grad),))\n for elements in more_cases)\n sample_inputs = [*sample_inputs, *more_inputs]\n return tuple(sample_inputs)\n\ndef sample_inputs_div(self, device, dtype, requires_grad, rounding_mode=None, **kwargs):\n a = make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n is_integral = not dtype.is_floating_point and not dtype.is_complex\n b = make_tensor((S, S, S), device, dtype, low=1 if is_integral else 0.1, high=None,\n requires_grad=requires_grad)\n\n kwargs = None # type: ignore[assignment]\n if rounding_mode is not None:\n kwargs = dict(rounding_mode=rounding_mode)\n\n return (\n SampleInput(a, args=(b,), kwargs=kwargs),\n SampleInput(a, args=(2,)),\n )\n\ndef sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs):\n tensors = [\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n ]\n\n return (SampleInput(tensors, args=(0,)),)\n\ndef sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs):\n tensors = [\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n ]\n\n return (SampleInput(tensors),)\n\ndef sample_inputs_hypot(op_info, device, dtype, requires_grad):\n input = make_tensor((S, S), device, dtype, requires_grad=requires_grad)\n args = make_tensor((S, S), device, dtype, requires_grad=requires_grad)\n\n return (\n SampleInput(input, args=(args,)),\n )\n\ndef sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, gather_variable((S, S), 1, M, True, device=device))),\n SampleInput(\n make_tensor((M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(1, gather_variable((M, S // 2), 0, S, True, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor([0], dtype=torch.int64, device=device))),\n SampleInput(\n make_tensor((S,), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor(0, dtype=torch.int64, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor(0, dtype=torch.int64, device=device))),\n )\n\n\ndef sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs):\n return (SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S), 1, S, True, device=device), 0)),\n\n # `indices` broadcast\n SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((1, S // 2), 0, S, True, device=device), 1)),\n\n # `self` broadcast\n SampleInput(make_tensor((1, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S // 2), 0, S, True, device=device), 1)),\n\n # without `dim` arg\n SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S // 2), 0, S, True, device=device), )),\n SampleInput(make_tensor((S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(gather_variable((S, S // 2), 0, S, True, device=device),)),\n )\n\ndef sample_inputs_amax_amin(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((S, S, S), ()),\n ((S, S, S), (1,)),\n ((S, S, S), ((1, 2,),)),\n ((S, S, S), (1, True,)),\n ((), (0,)),\n ((), ()),\n ((), (0, True,)),\n )\n return tuple(SampleInput((make_tensor(size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)),\n args=args)\n for size, args in test_cases)\n\ndef sample_inputs_argmax_argmin(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((2, 2, 2), ()),\n ((2, 2, 2), (0,)),\n ((2, 2, 2), (1,)),\n ((2, 2, 2), (2,)),\n ((2, 2, 2), (2, True,)),\n ((2, 2, 2), (None,)),\n ((), (0,)),\n ((), ()),\n ((), (None, True,)),\n ((1,), ()),\n ((1,), (0,)),\n ((1,), (0, True)),\n ((2,), ()),\n ((2,), (0,)),\n ((2,), (0, True)),\n ((2, 2, 3), ()),\n ((2, 2, 3), (0,)),\n ((2, 2, 3), (1,)),\n ((2, 2, 3), (None, True)),\n )\n return tuple(SampleInput((make_tensor(size, device, dtype,\n requires_grad=requires_grad)),\n args=args)\n for size, args in test_cases)\n\ndef sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((1,), 0, None, None),\n ((S,), 0, None, None),\n ((S, 1), 0, None, None),\n ((S, 1), 1, None, None),\n ((S, S), 0, None, None),\n ((S, S), 1, None, None),\n ((S, S), 0, (1, S), (2, S)),\n ((S, S), 0, None, (2, S)),\n ((S, S, S), 1, None, None),\n ((S, S, S), 1, (S, 1, S), (S, 1, S)),)\n\n sample_inputs = []\n for size, dim, size_prepend, size_append in test_cases:\n args = (make_tensor(size, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad), 1, dim,\n make_tensor(size_prepend, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad) if size_prepend else None,\n make_tensor(size_append, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad) if size_append else None)\n sample_inputs.append(SampleInput(args[0], args=(args[1], args[2])))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_gradient(op_info, device, dtype, requires_grad):\n sample_inputs = []\n test_cases_float = (\n ((S,), None, None),\n ((S,), 2., None),\n ((S, S), None, None),\n ((S, S), [2.0, 2.1], None),\n ((S, S), [2.0, 2.1], (0, 1)),\n ((4, 4, 4), [2., 1.], (0, 1)),\n )\n for size, spacing, dim in test_cases_float:\n t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)\n sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=spacing)))\n\n test_cases_tensor = (\n ((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1)),\n ((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1)),\n )\n for size, coordinates, dim in test_cases_tensor:\n t = make_tensor(size, device, dtype, low=None, high=None, requires_grad=requires_grad)\n coordinates_tensor_list = []\n for coords in coordinates:\n a = torch.tensor(coords, dtype=dtype, device=device)\n coordinates_tensor_list.append(a)\n sample_inputs.append(SampleInput(t, kwargs=dict(dim=dim, spacing=coordinates_tensor_list)))\n\n return tuple(sample_inputs)\n\ndef sample_inputs_index_select(op_info, device, dtype, requires_grad):\n return (\n SampleInput(\n make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, index_variable(2, S, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor([0], dtype=torch.int64, device=device))),\n SampleInput(\n make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(0, torch.tensor(0, dtype=torch.int64, device=device))),\n )\n\ndef sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs):\n test_args = [\n (dont_convert([1, 2]),),\n (slice(0, 3),),\n (dont_convert([slice(0, 3), 1]),),\n (dont_convert([[0, 2, 3], [1, 3, 3], [0, 0, 2]]),),\n (dont_convert([[0, 0, 3], [1, 1, 3], [0, 0, 2]]),),\n (dont_convert([slice(None), slice(None), [0, 3]]),),\n (dont_convert([slice(None), [0, 3], slice(None)]),),\n (dont_convert([[0, 3], slice(None), slice(None)]),),\n (dont_convert([[0, 3], [1, 2], slice(None)]),),\n (dont_convert([[0, 3], ]),),\n (dont_convert([[0, 3], slice(None)]),),\n (dont_convert([[0, 3], Ellipsis]),),\n (dont_convert([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])]),),\n (index_variable(2, S, device=device),),\n (mask_not_all_zeros((S,)),),\n ]\n\n return tuple(SampleInput(\n make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=args)\n for args in test_args)\n\ndef sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n for accumulate in [False, True]:\n # Test with indices arg\n inputs.append(SampleInput(\n make_tensor((S, S,), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n (index_variable(2, S, device=device), ),\n make_tensor((2, S), device, dtype, low=None, high=None)),\n kwargs=dict(accumulate=accumulate)))\n\n # Test with mask arg\n mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,))\n inputs.append(SampleInput(\n make_tensor((S, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(\n (mask, ),\n make_tensor((S,), device, dtype, low=None, high=None),),\n kwargs=dict(accumulate=accumulate)))\n\n return inputs\n\n# Missing to test the nondeterminism of the operation\n# https://github.com/pytorch/pytorch/issues/53352\ndef sample_inputs_index_add(op_info, device, dtype, requires_grad, **kwargs):\n # These testa are pretty much the same as those from index_copy.\n # Perhaps merge?\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n t = make_arg((S, S))\n s = make_arg((S, S))\n # non-contiguous target\n t_nonctg = t.transpose(0, 1)\n # non-contiguous source\n s_nonctg = s.transpose(0, 1)\n\n idx = make_arg((S,), dtype=torch.int64, low=0, high=S)\n idx_nonctg = make_arg((S,), dtype=torch.int64, low=0, high=S, noncontiguous=True)\n samples = [SampleInput(tensor, args=(1, idx, source))\n for tensor, idx, source in product([t, t_nonctg], [idx, idx_nonctg], [s, s_nonctg])]\n samples.extend(SampleInput(tensor, args=(1, idx, source), kwargs=dict(alpha=a))\n for tensor, idx, source, a in product([t, t_nonctg], [idx, idx_nonctg], [s, s_nonctg], [-1, 0, 2]))\n\n # Add scalar cases\n scalar_sizes = [(), (1,)]\n ts = (make_arg(size) for size in scalar_sizes)\n idxs = (make_arg(size, dtype=torch.int64, low=0, high=1) for size in scalar_sizes)\n ss = (make_arg(size) for size in scalar_sizes)\n\n samples.extend(SampleInput(t, args=(0, idx, s)) for t, idx, s in product(ts, idxs, ss))\n samples.extend(SampleInput(t, args=(0, idx, s), kwargs=dict(alpha=a)) for t, idx, s, a in product(ts, idxs, ss, [-1, 0, 2]))\n return samples\n\ndef sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs):\n def apply_grad(t):\n if dtype in floating_types_and(torch.float16, torch.bfloat16):\n t.requires_grad_(requires_grad)\n\n def small_3d_unique(dtype, device):\n res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S)\n res = res.to(dtype)\n apply_grad(res)\n return res\n\n def large_1d_unique(dtype, device):\n res = torch.randperm(L * L * L, dtype=torch.int64, device=device)\n res = res.to(dtype)\n apply_grad(res)\n return res\n\n samples = []\n # Test case for large tensor.\n largesample = SampleInput(large_1d_unique(dtype, device))\n samples.append(largesample)\n\n # Test cases for small 3d tensors.\n # Imitates legacy tests from test/test_torch.py\n t = small_3d_unique(dtype, device)\n dims = range(-3, 3)\n flag = [True, False]\n for dim, descending, stable in product(dims, flag, flag):\n # default schema without stable sort\n samples.append(SampleInput(t, args=(dim, descending)))\n # schema with stable sort, no CUDA support yet\n if torch.device(device).type == 'cpu':\n samples.append(\n SampleInput(t, kwargs=dict(dim=dim, descending=descending, stable=stable))\n )\n\n # Test cases for scalar tensor\n scalar = torch.tensor(1, dtype=dtype, device=device)\n apply_grad(scalar)\n samples.append(SampleInput(scalar))\n samples.append(SampleInput(scalar, args=(0,)))\n samples.append(SampleInput(scalar, args=(0, True)))\n # no CUDA support for stable sort yet\n if not device.startswith('cuda'):\n samples.append(SampleInput(scalar, kwargs=dict(stable=True)))\n samples.append(SampleInput(scalar, kwargs=dict(dim=0, stable=True)))\n samples.append(SampleInput(scalar, kwargs=dict(dim=0, descending=True, stable=True)))\n return samples\n\ndef sample_inputs_index_fill(op_info, device, dtype, requires_grad, **kwargs):\n samples = []\n t = make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)\n fill_val = torch.tensor(-1 + 1j if t.is_complex() else -1)\n # non-contiguous input\n t01 = t.transpose(0, 1)\n t02 = t.transpose(0, 2)\n t12 = t.transpose(1, 2)\n idx = index_variable(1, S, device=device)\n # non-contiguous index\n idx_nonctg = torch.empty_strided((S,), (2,), device=device, dtype=torch.int64)\n idx_nonctg.copy_(idx)\n for d in range(t.dim()):\n for tensor in [t, t01, t02, t12]:\n samples.append(SampleInput(tensor, args=(d, idx, fill_val)))\n samples.append(SampleInput(tensor, args=(d, -idx - 1, fill_val)))\n samples.append(SampleInput(tensor, args=(d, idx_nonctg, fill_val)))\n\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n index_tensor = partial(torch.tensor, device=device, dtype=torch.long)\n\n def unique_idx(numel, max_idx):\n # Generate unique random indices vector of `numel`\n # elements in range [0, max_idx).\n indices = random.sample(range(max_idx), numel)\n return index_tensor(indices)\n\n samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), 2)))\n samples.append(SampleInput(make_arg((S, S)), args=(0, unique_idx(2, S), make_arg(()))))\n samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor(0), 2)))\n samples.append(SampleInput(make_arg(()), args=(0, index_tensor([0]), 2)))\n samples.append(SampleInput(make_arg(()), args=(0, index_tensor(0), 2)))\n\n # Duplicate indices\n samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0]), 2)))\n samples.append(SampleInput(make_arg((S, S)), args=(0, index_tensor([0, 0, 2]), make_arg(()))))\n\n return samples\n\ndef sample_inputs_max_min_binary(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n args_for_binary_op = (\n ((S, S, S), (S, S, S),),\n ((S, S, S), (S,),),\n ((S,), (S, S, S),),\n ((S, 1, S), (S, S),),\n ((S, S), (S, S),),\n ((), (),),\n ((S, S, S), (),),\n ((), (S, S, S),),\n )\n inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=(make_tensor(other_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),),))\n for input_tensor, other_tensor in args_for_binary_op)\n return inputs\n\ndef sample_inputs_hardswish(self, device, dtype, requires_grad):\n N = 5\n # make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ?\n tensors = [SampleInput(make_tensor((N * 2, N * 2), device=device, dtype=dtype,\n requires_grad=requires_grad, low=-5, high=5)) for _ in range(1, N)]\n return tensors\n\ndef sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n args_for_reduction_with_dim = (\n ((S, S, S), (1,),),\n ((S, S, S), (1, True, ),),\n ((), (0,),),\n ((), (0, True,),),\n )\n inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=args,))\n for input_tensor, args in args_for_reduction_with_dim)\n return inputs\n\ndef sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n inputs.append(SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),))\n inputs.append(SampleInput(make_tensor((), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),))\n return inputs\n\n# Generates input tensors for testing reduction ops\ndef _generate_reduction_inputs(device, dtype, requires_grad):\n yield make_tensor((), device, dtype, requires_grad=requires_grad)\n yield make_tensor((2,), device, dtype, requires_grad=requires_grad)\n yield make_tensor((2, 3), device, dtype, requires_grad=requires_grad, noncontiguous=True)\n yield make_tensor((3, 2, 1, 2, 2), device, dtype, requires_grad=requires_grad)\n\n# Generates a subset of possible dim and keepdim kwargs for a tensor\n# with ndim dims appropriate for testing. If supports_multiple_dims\n# is True (default) then dim kwarg can be a list of dims.\ndef _generate_reduction_kwargs(ndim, supports_multiple_dims=True):\n for keepdim in [True, False]:\n # Always test reducing inner and outer most dimensions\n yield {'dim': 0, 'keepdim': keepdim}\n yield {'dim': -1, 'keepdim': keepdim}\n\n # Also reduce middle dimension\n if ndim > 2:\n yield {'dim': ndim // 2, 'keepdim': keepdim}\n\n if supports_multiple_dims:\n # Always test reducing all dims\n yield {'dim': tuple(range(ndim)), 'keepdim': keepdim}\n\n # Test reducing both first and last dimensions\n if ndim > 1:\n yield {'dim': (0, ndim - 1), 'keepdim': keepdim}\n\n # Test reducing every other dimension starting with the second\n if ndim > 3:\n yield {'dim': tuple(range(1, ndim, 2)), 'keepdim': keepdim}\n\n# Wraps sample_inputs_reduction function to provide the additional supports_multiple_dims args\ndef sample_inputs_reduction_wrapper(supports_multiple_dims):\n # Generates sample inputs for reduction ops that contain the input tensor\n # and dim and keepdim kwargs. If a reduction op needs to test additional\n # args/kwargs then create a separate sample_inputs function\n def fn(op_info, device, dtype, requires_grad):\n inputs = []\n\n for t in _generate_reduction_inputs(device, dtype, requires_grad):\n # Add case without dim and keepdim kwargs\n inputs.append(SampleInput(t))\n for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):\n inputs.append(SampleInput(t, kwargs=kwargs))\n\n return inputs\n\n return fn\n\ndef sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad):\n test_quantiles = (0.5, make_tensor((2,), device, dtype, low=0, high=1))\n test_interpolations = ['linear', 'midpoint']\n\n inputs = []\n for quantiles in test_quantiles:\n for t in _generate_reduction_inputs(device, dtype, requires_grad):\n # Add case without dim and keepdim kwargs\n inputs.append(SampleInput(t, args=(quantiles,)))\n for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False):\n # Interpolation kwarg for now is only supported when providing both dim and keepdim\n for interpolation in test_interpolations:\n kwargs['interpolation'] = interpolation\n inputs.append(SampleInput(t, args=(quantiles,), kwargs=kwargs))\n\n return inputs\n\ndef sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs):\n def get_tensor_input(size):\n return make_tensor(size, device, dtype, requires_grad=requires_grad)\n\n inputs = []\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3,)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, 1, True, True)))\n inputs.append(SampleInput(get_tensor_input((S, M, S)), args=(3, -2, True, True)))\n\n inputs.append(SampleInput(get_tensor_input(()), args=(1,)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, 0)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, -1)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, 0, True, True)))\n inputs.append(SampleInput(get_tensor_input(()), args=(1, -1, True, True)))\n\n return inputs\n\ndef sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs):\n inputs = []\n arg_a = make_tensor((S,), device, dtype, requires_grad=requires_grad)\n arg_b = make_tensor((M,), device, dtype, requires_grad=requires_grad)\n inputs.append(SampleInput(arg_a, args=(arg_b,)))\n return inputs\n\ndef sample_inputs_dist(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S))\n ps = (2, 4)\n\n def generate_samples():\n for size_x, size_y, p in product(sizes, sizes, ps):\n yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p))\n\n return list(generate_samples())\n\n# Missing to test the nondeterminism of the operation\n# https://github.com/pytorch/pytorch/issues/53352\ndef sample_inputs_index_copy(op_info, device, dtype, requires_grad, **kwargs):\n def make_arg(shape, low=None, high=None, dtype=dtype):\n return make_tensor(shape, device=device, dtype=dtype,\n low=low, high=high,\n requires_grad=requires_grad)\n\n t = make_arg((S, S))\n s = make_arg((S, S))\n # non-contiguous input\n t01 = t.transpose(0, 1)\n # non-contiguous input\n s01 = s.transpose(0, 1)\n\n # idx is a permutation of 0...S-1 for this function to be deterministic\n idx = torch.randperm(S, device=device, dtype=torch.int64)\n # non-contiguous index\n idx_nonctg = torch.repeat_interleave(idx, 2, dim=-1)[::2]\n # index_copy_ does not support negative indices\n # idx_neg = -idx - 1\n samples = [SampleInput(tensor, args=(1, idx, source))\n for tensor, idx, source in product([t, t01], [idx, idx_nonctg], [s, s01])]\n\n # Add scalar cases\n scalar_sizes = [(), (1,)]\n ts = (make_arg(size) for size in scalar_sizes)\n idxs = (make_arg(size, dtype=torch.int64, low=0, high=1) for size in scalar_sizes)\n ss = (make_arg(size) for size in scalar_sizes)\n\n samples.extend(SampleInput(t, args=(0, idx, s)) for t, idx, s in product(ts, idxs, ss))\n return samples\n\ndef sample_inputs_mode(op_info, device, dtype, requires_grad):\n inputs = []\n args = (\n ((S, S, S), (),),\n ((S, S, S), (1, ),),\n ((S, S, S), (1, True, ),),\n ((), (),),\n ((), (0,),),\n ((), (0, True,),),\n )\n inputs = list((SampleInput(make_tensor(input_tensor, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=args,))\n for input_tensor, args in args)\n return inputs\n\n# Missing to test the nondeterminism of the operation\n# https://github.com/pytorch/pytorch/issues/53352\ndef sample_inputs_put(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)\n\n S = 3\n\n def gen_inputs():\n # Generic inputs\n tgt_gen = (make_arg((S, S), noncontiguous=not ctg) for ctg in (True, False))\n src_gen = (make_arg((S,), noncontiguous=not ctg) for ctg in (True, False))\n idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S]\n idx_nonctg = torch.repeat_interleave(idx, 2, dim=-1)[::2]\n idx_neg = -idx - 1\n idx_list = [idx, idx_nonctg, idx_neg]\n for tgt, idx, src, acc in product(tgt_gen, idx_list, src_gen, (True, False)):\n yield SampleInput(input=tgt, args=(idx, src, acc))\n\n # Scalar cases\n scalar_sizes = [(), (1,)]\n tgt_gen = (make_arg(size) for size in scalar_sizes)\n idx_gen = (make_idx(size, high=1) for size in scalar_sizes)\n src_gen = (make_arg(size) for size in scalar_sizes)\n for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)):\n yield SampleInput(input=tgt, args=(idx, src, acc))\n\n # Empty cases\n tgt_sizes = [(0,), (), (1,), (3, 2)]\n tgt_gen = (make_arg(size) for size in tgt_sizes)\n idx = make_idx((0,), high=1)\n src = make_arg((0,))\n for tgt, acc in product(tgt, (True, False)):\n yield SampleInput(input=tgt, args=(idx, src, acc))\n\n return list(gen_inputs())\n\ndef sample_inputs_take(op_info, device, dtype, requires_grad):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False)\n\n S = 3\n\n def gen_inputs():\n # Generic inputs: take S elements out of S * S\n src_gen = (make_arg((S, S), noncontiguous=not ctg) for ctg in (True, False))\n idx = make_idx((S,), high=S * S)\n idx_nonctg = make_idx((S,), high=S * S, noncontiguous=True)\n idx_neg = -idx - 1\n idx_list = [idx, idx_nonctg, idx_neg]\n for src, idx in product(src_gen, idx_list):\n yield SampleInput(input=src, args=(idx,))\n\n # Scalar cases\n scalar_sizes = [(), (1,)]\n src_gen = (make_arg(size) for size in scalar_sizes)\n idx_gen = (make_idx(size, high=1) for size in scalar_sizes)\n for src, idx in product(src_gen, idx_gen):\n yield SampleInput(input=src, args=(idx,))\n\n # Empty cases\n src_sizes = [(0,), (), (1,), (3, 2)]\n src_gen = (make_arg(size) for size in src_sizes)\n idx = make_idx((0,), high=1)\n for src in src_gen:\n yield SampleInput(input=src, args=(idx,))\n\n return list(gen_inputs())\n\ndef sample_movedim_moveaxis(op_info, device, dtype, requires_grad):\n return (\n SampleInput(\n make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=((0, 1, 2, 3), (3, 2, 1, 0))),\n SampleInput(\n make_tensor((4, 3, 2, 1), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=((0, -1, -2, -3), (-3, -2, -1, -0)))\n )\n\n\ndef sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs):\n rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),)\n shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1))\n\n if requires_grad:\n # Tests for variant_consistency_jit, grad, gradgrad\n # are slower. Use smaller bags of `rep_dims` and `shapes`\n # in this case.\n rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment]\n shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment]\n\n tensors = [make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad) for shape in shapes]\n\n samples = []\n for rep_dim, tensor in product(rep_dims, tensors):\n for t in (tensor, tensor.T):\n if op_info.name == 'repeat' and len(rep_dim) >= t.dim():\n # `torch.repeat` errors for `len(rep_dims) < t.dim()`,\n # so we filter such combinations.\n samples.append(SampleInput(t, args=(rep_dim,),))\n elif op_info.name == 'tile':\n samples.append(SampleInput(t, args=(rep_dim,),))\n\n return samples\n\n\ndef sample_inputs_narrow(op_info, device, dtype, requires_grad, **kwargs):\n shapes_and_args = (\n ((S, S, S), (1, 2, 2)),\n ((S, S, S), (-1, 2, 2)),\n ((S, S, S), (1, 0, 0)),\n ((S, S, S), (-1, 0, 0)),\n )\n\n def generator():\n for shape, args in shapes_and_args:\n tensor = make_tensor(shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n yield SampleInput(tensor, args=args)\n\n return list(generator())\n\n\ndef sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs):\n shapes_and_axes = [\n ((3, 4, 5), 0),\n ((3, 4, 5), 1),\n ((3, 4, 5), 3),\n ((3, 4, 5), -1),\n ((3, 4, 5), -3),\n ((), 0)\n ]\n\n samples = []\n for shape, axis in shapes_and_axes:\n tensor = make_tensor(shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n samples.append(SampleInput(tensor, args=(axis,),))\n\n return samples\n\n\ndef sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs):\n shapes_and_args = (\n ((S, 1, S, 1), ()),\n ((1, 1, 1, 1), ()),\n ((S, 1, S, 1), (1,)),\n ((S, 1, S, 1), (-1,)),\n ((S, 1, S, 1), (2,)),\n ((S, 1, S, 1), (-2,)),\n ((), (0, )),\n )\n\n def generator():\n for shape, args in shapes_and_args:\n tensor = make_tensor(shape, device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n\n yield SampleInput(tensor, args=args)\n\n return list(generator())\n\n\n# TODO: reconcile with torch.linalg.det and torch.linalg.slogdet\n# Creates matrices with a positive nonzero determinant\ndef sample_inputs_logdet(op_info, device, dtype, requires_grad, **kwargs):\n def make_nonzero_det(A, *, sign=1, min_singular_value=0.1, **kwargs):\n u, s, vh = torch.linalg.svd(A, full_matrices=False)\n s.clamp_(min=min_singular_value)\n A = (u * s.unsqueeze(-2)) @ vh\n det = A.det()\n if sign is not None:\n if A.dim() == 2:\n if (det < 0) ^ (sign < 0):\n A[0, :].neg_()\n else:\n cond = ((det < 0) ^ (sign < 0)).nonzero()\n if cond.size(0) > 0:\n for i in range(cond.size(0)):\n A[list(cond[i])][0, :].neg_()\n return A\n\n samples = []\n\n # cases constructed using make_tensor()\n tensor_shapes = (\n (S, S),\n (1, 1),\n (3, 3, S, S),\n (3, 3, 1, 1)\n )\n\n for shape in tensor_shapes:\n t = make_tensor(shape, device=device, dtype=dtype)\n d = make_nonzero_det(t).requires_grad_(requires_grad)\n samples.append(SampleInput(d))\n\n # cases constructed using:\n # 1) make_symmetric_matrices\n # 2) make_symmetric_pd_matrices\n # 3) make_fullrank_matrices_with_distinct_singular_values\n symmetric_shapes = (\n (S, S),\n (3, S, S),\n )\n\n\n def _helper(constructor, *shape, **kwargs):\n t = constructor(*shape, device=device, dtype=dtype)\n d = make_nonzero_det(t, **kwargs).requires_grad_(requires_grad)\n samples.append(SampleInput(d))\n\n for shape in symmetric_shapes:\n _helper(make_symmetric_matrices, *shape)\n _helper(make_symmetric_pd_matrices, *shape)\n _helper(make_fullrank_matrices_with_distinct_singular_values, *shape, min_singular_value=0)\n\n return tuple(samples)\n\ndef np_unary_ufunc_integer_promotion_wrapper(fn):\n # Wrapper that passes PyTorch's default scalar\n # type as an argument to the wrapped NumPy\n # unary ufunc when given an integer input.\n # This mimicks PyTorch's integer->floating point\n # type promotion.\n #\n # This is necessary when NumPy promotes\n # integer types to double, since PyTorch promotes\n # integer types to the default scalar type.\n\n # Helper to determine if promotion is needed\n def is_integral(dtype):\n return dtype in [np.bool_, bool, np.uint8, np.int8, np.int16, np.int32, np.int64]\n\n # NOTE: Promotion in PyTorch is from integer types to the default dtype\n np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]\n\n @wraps(fn)\n def wrapped_fn(x):\n if is_integral(x.dtype):\n return fn(x, dtype=np_dtype)\n return fn(x)\n\n return wrapped_fn\n\n\n# Metadata class for Fast Fourier Transforms in torch.fft.\nclass SpectralFuncInfo(OpInfo):\n \"\"\"Operator information for torch.fft transforms. \"\"\"\n\n def __init__(self,\n name, # the string name of the function\n *,\n ref=None, # Reference implementation (probably in np.fft namespace)\n dtypes=floating_and_complex_types(),\n ndimensional: bool, # Whether dim argument can be a tuple\n decorators=None,\n **kwargs):\n decorators = list(decorators) if decorators is not None else []\n decorators += [\n skipCPUIfNoMkl,\n skipCUDAIfRocm,\n # gradgrad is quite slow\n DecorateInfo(slowTest, 'TestGradients', 'test_fn_gradgrad'),\n ]\n\n super().__init__(name=name,\n dtypes=dtypes,\n decorators=decorators,\n **kwargs)\n self.ref = ref if ref is not None else _getattr_qual(np, name)\n self.ndimensional = ndimensional\n\n\n def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):\n nd_tensor = make_tensor((S, S + 1, S + 2), device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n tensor = make_tensor((31,), device, dtype, low=None, high=None,\n requires_grad=requires_grad)\n\n if self.ndimensional:\n return [\n SampleInput(nd_tensor, kwargs=dict(s=(3, 10), dim=(1, 2), norm='ortho')),\n SampleInput(nd_tensor, kwargs=dict(norm='ortho')),\n SampleInput(nd_tensor, kwargs=dict(s=(8,))),\n SampleInput(tensor),\n\n *(SampleInput(nd_tensor, kwargs=dict(dim=dim))\n for dim in [-1, -2, -3, (0, -1)]),\n ]\n else:\n return [\n SampleInput(nd_tensor, kwargs=dict(n=10, dim=1, norm='ortho')),\n SampleInput(nd_tensor, kwargs=dict(norm='ortho')),\n SampleInput(nd_tensor, kwargs=dict(n=7)),\n SampleInput(tensor),\n\n *(SampleInput(nd_tensor, kwargs=dict(dim=dim))\n for dim in [-1, -2, -3]),\n ]\n\n\nclass ShapeFuncInfo(OpInfo):\n \"\"\"Early version of a specialized OpInfo for Shape manipulating operations like tile and roll\"\"\"\n def __init__(self,\n name, # the string name of the function\n *,\n ref, # a reference function\n dtypes=floating_types(),\n dtypesIfCPU=None,\n dtypesIfCUDA=None,\n dtypesIfROCM=None,\n sample_inputs_func=None,\n **kwargs):\n super(ShapeFuncInfo, self).__init__(name,\n dtypes=dtypes,\n dtypesIfCPU=dtypesIfCPU,\n dtypesIfCUDA=dtypesIfCUDA,\n dtypesIfROCM=dtypesIfROCM,\n sample_inputs_func=sample_inputs_func,\n **kwargs)\n self.ref = ref\n\ndef sample_inputs_foreach(self, device, dtype, N):\n tensors = [make_tensor((N, N), device, dtype) for _ in range(N)]\n return tensors\n\n\ndef get_foreach_method_names(name):\n # get torch inplace reference function\n method_name = \"_foreach_\" + name\n method_name_inplace = \"_foreach_\" + name + \"_\"\n\n method = getattr(torch, method_name, None)\n method_inplace = getattr(torch, method_name_inplace, None)\n\n ref = getattr(torch.Tensor, name, None)\n\n return method, method_inplace, ref\n\nclass ForeachUnaryFuncInfo(OpInfo):\n \"\"\"Early version of a specialized OpInfo for foreach unary functions\"\"\"\n def __init__(self,\n name,\n dtypes=floating_and_complex_types(),\n dtypesIfCPU=all_types_and_complex(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n dtypesIfROCM=None,\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_foreach,\n **kwargs):\n super(ForeachUnaryFuncInfo, self).__init__(\"_foreach_\" + name,\n dtypes=dtypes,\n dtypesIfCPU=dtypesIfCPU,\n dtypesIfCUDA=dtypesIfCUDA,\n dtypesIfROCM=dtypesIfROCM,\n safe_casts_outputs=safe_casts_outputs,\n sample_inputs_func=sample_inputs_func,\n **kwargs)\n\n foreach_method, foreach_method_inplace, torch_ref_method = get_foreach_method_names(name)\n self.method_variant = foreach_method\n self.inplace_variant = foreach_method_inplace\n self.ref = torch_ref_method\n\n\ndef sample_inputs_linalg_cholesky_inverse(op_info, device, dtype, requires_grad=False):\n # Generate Cholesky factors of positive-definite (non-singular) Hermitian (symmetric) matrices\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n inputs = (\n torch.zeros(0, 0, dtype=dtype, device=device), # 0x0 matrix\n torch.zeros(0, 2, 2, dtype=dtype, device=device), # zero batch of matrices\n random_hermitian_pd_matrix(S, dtype=dtype, device=device), # single matrix\n random_hermitian_pd_matrix(S, 2, dtype=dtype, device=device), # batch of matrices\n )\n test_cases = (torch.linalg.cholesky(a) for a in inputs)\n out = []\n for a in test_cases:\n a.requires_grad = requires_grad\n out.append(SampleInput(a))\n out.append(SampleInput(a, kwargs=dict(upper=True)))\n return out\n\ndef sample_inputs_linalg_lstsq(op_info, device, dtype, requires_grad=False, **kwargs):\n from torch.testing._internal.common_utils import random_well_conditioned_matrix\n out = []\n for batch in ((), (3,), (3, 3)):\n shape = batch + (3, 3)\n # NOTE: inputs are not marked with `requires_grad` since\n # linalg_lstsq is not differentiable\n a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)\n b = make_tensor(shape, device, dtype, low=None, high=None)\n out.append(SampleInput(a, args=(b,)))\n return out\n\ndef sample_inputs_householder_product(op_info, device, dtype, requires_grad, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.householder_product (torch.orgqr).\n The first argument should be a square matrix or batch of square matrices, the second argument is a vector or batch of vectors.\n Empty, square, rectangular, batched square and batched rectangular input is generated.\n \"\"\"\n # Each column of the matrix is getting multiplied many times leading to very large values for\n # the Jacobian matrix entries and making the finite-difference result of grad check less accurate.\n # That's why gradcheck with the default range [-9, 9] fails and [-2, 2] is used here.\n samples = (\n SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((2, 1, S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((2, 1, S + 1, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((2, 1, S,), device, dtype, low=-2, high=2, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((0, 0), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),\n\n SampleInput(make_tensor((S, S), device, dtype, low=-2, high=2, requires_grad=requires_grad),\n args=(make_tensor((0,), device, dtype, low=None, high=None, requires_grad=requires_grad),)),\n )\n\n return samples\n\ndef sample_inputs_ormqr(op_info, device, dtype, requires_grad):\n # create a helper function wrapping `make_tensor`\n make_input = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n def gen_inputs():\n batches = [(), (0, ), (2, ), (2, 1)]\n ns = [5, 2, 0]\n tf = [True, False]\n for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf):\n reflectors = make_input((*batch, m, n))\n tau = make_input((*batch, min(m, n)))\n other_matrix_shape = (m, n) if left else (n, m)\n other = make_input((*batch, *other_matrix_shape))\n kwargs = {\"left\": left, \"transpose\": transpose}\n yield SampleInput(reflectors, args=(tau, other,), kwargs=kwargs)\n\n return tuple(gen_inputs())\n\ndef sample_inputs_linalg_cholesky(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates always positive-definite input for torch.linalg.cholesky using\n random_hermitian_pd_matrix.\n The input is generated as the itertools.product of 'batches' and 'ns'.\n In total this function generates 8 SampleInputs\n 'batches' cases include:\n () - single input,\n (0,) - zero batched dimension,\n (2,) - batch of two matrices,\n (1, 1) - 1x1 batch of matrices\n 'ns' gives 0x0 and 5x5 matrices.\n Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.\n \"\"\"\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 0]\n out = []\n for batch, n in product(batches, ns):\n a = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)\n a.requires_grad = requires_grad\n out.append(SampleInput(a))\n return out\n\ndef sample_inputs_symeig(op_info, device, dtype, requires_grad=False):\n out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n\n for o in out:\n o.kwargs = {\"upper\": bool(np.random.choice([True, False])),\n \"eigenvectors\": True}\n # A gauge-invariant function\n o.output_process_fn_grad = lambda output: (output[0], abs(output[1]))\n return out\n\n\ndef sample_inputs_linalg_eigh(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.eigh/eigvalsh with UPLO=\"U\" or \"L\" keyword argument.\n \"\"\"\n def out_fn(output):\n if isinstance(output, tuple):\n # eigh function\n return output[0], abs(output[1])\n else:\n # eigvalsh function\n return output\n\n samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n for sample in samples:\n sample.kwargs = {\"UPLO\": np.random.choice([\"L\", \"U\"])}\n sample.output_process_fn_grad = out_fn\n\n return samples\n\n\ndef sample_inputs_linalg_slogdet(op_info, device, dtype, requires_grad=False):\n def out_fn(output):\n return output[1]\n\n samples = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad)\n for sample in samples:\n sample.output_process_fn_grad = out_fn\n\n return samples\n\n\ndef sample_inputs_linalg_pinv_hermitian(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.pinv with hermitian=True keyword argument.\n \"\"\"\n out = sample_inputs_linalg_invertible(op_info, device, dtype, requires_grad, **kwargs)\n for o in out:\n o.kwargs = {\"hermitian\": True}\n return out\n\ndef sample_inputs_linalg_solve(op_info, device, dtype, requires_grad=False, vector_rhs_allowed=True, **kwargs):\n \"\"\"\n This function generates always solvable input for torch.linalg.solve\n Using random_fullrank_matrix_distinct_singular_value gives a non-singular (=invertible, =solvable) matrices 'a'.\n The first input to torch.linalg.solve is generated as the itertools.product of 'batches' and 'ns'.\n The second input is generated as the product of 'batches', 'ns' and 'nrhs'.\n In total this function generates 18 SampleInputs\n 'batches' cases include:\n () - single input,\n (0,) - zero batched dimension,\n (2,) - batch of two matrices.\n 'ns' gives 0x0 and 5x5 matrices.\n and 'nrhs' controls the number of vectors to solve for:\n () - using 1 as the number of vectors implicitly\n (1,) - same as () but explicit\n (3,) - solve for 3 vectors.\n Zeros in dimensions are edge cases in the implementation and important to test for in order to avoid unexpected crashes.\n 'vector_rhs_allowed' controls whether to include nrhs = () to the list of SampleInputs.\n torch.solve / triangular_solve / cholesky_solve (opposed to torch.linalg.solve) do not allow\n 1D tensors (vectors) as the right-hand-side.\n Once torch.solve / triangular_solve / cholesky_solve and its testing are removed,\n 'vector_rhs_allowed' may be removed here as well.\n \"\"\"\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n batches = [(), (0, ), (2, )]\n ns = [5, 0]\n if vector_rhs_allowed:\n nrhs = [(), (1,), (3,)]\n else:\n nrhs = [(1,), (3,)]\n out = []\n for n, batch, rhs in product(ns, batches, nrhs):\n a = random_fullrank_matrix_distinct_singular_value(n, *batch, dtype=dtype, device=device)\n a.requires_grad = requires_grad\n b = torch.randn(*batch, n, *rhs, dtype=dtype, device=device)\n b.requires_grad = requires_grad\n out.append(SampleInput(a, args=(b,)))\n return out\n\n\ndef sample_inputs_legacy_solve(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates always solvable input for legacy solve functions\n (the ones that are not in torch.linalg module).\n The difference from sample_inputs_linalg_solve is that here the right-hand-side of A x = b equation\n should have b.ndim >= 2, vectors are not allowed.\n Also the arguments order is swapped.\n \"\"\"\n out = sample_inputs_linalg_solve(\n op_info, device, dtype, requires_grad=requires_grad, vector_rhs_allowed=False\n )\n\n # Reverses tensor order\n for sample in out:\n sample.input, sample.args = sample.args[0], (sample.input,)\n\n return out\n\n\ndef sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs):\n # not needed once OpInfo tests support Iterables\n def generate_samples():\n batch_shapes = ((), (3,), (3, 3))\n for batch_shape, get_infos in product(batch_shapes, (True, False)):\n shape = batch_shape + (S, S)\n input = make_tensor(shape, device, dtype, requires_grad=requires_grad, low=None, high=None)\n yield SampleInput(input, args=(True, get_infos))\n\n return list(generate_samples())\n\n\ndef sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs):\n # not needed once OpInfo tests support Iterables\n def generate_samples():\n for lu_sample in sample_inputs_lu(op_info, device, dtype, requires_grad, **kwargs):\n lu_data, pivots = lu_sample.input.lu()\n yield SampleInput(lu_data, args=(pivots,))\n\n # generate rectangular inputs\n lu_data_shape = lu_data.shape\n batch_shape = lu_data_shape[:-2]\n n = lu_data_shape[-2]\n\n for shape_inc in ((1, 0), (0, 1)):\n lu_data, pivots = make_tensor(\n batch_shape + (n + shape_inc[0], n + shape_inc[1]),\n device, dtype,\n requires_grad=False,\n low=None, high=None\n ).lu()\n lu_data.requires_grad_(requires_grad)\n yield SampleInput(lu_data, args=(pivots,))\n\n return list(generate_samples())\n\n\ndef sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2)))\n\n def generator():\n for arg in args:\n yield SampleInput(make_arg((S, S, S)), args=arg)\n\n return list(generator())\n\n\ndef sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n args = ((1, (0, 1),),\n (1, (1, 2),),\n (1, (1, -1),),\n ())\n\n def generator():\n for arg in args:\n yield SampleInput(make_arg((S, S, S)), args=arg)\n\n return list(generator())\n\n\ndef sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs):\n tensor_nd = make_tensor((S, S, S), device=device, dtype=dtype,\n low=None, high=None, requires_grad=requires_grad)\n tensor_1d = make_tensor((S,), device=device, dtype=dtype,\n low=None, high=None, requires_grad=requires_grad)\n\n return [\n SampleInput(tensor_nd),\n SampleInput(tensor_nd, kwargs=dict(dim=1)),\n SampleInput(tensor_nd, kwargs=dict(dim=1, unbiased=True, keepdim=True)),\n SampleInput(tensor_1d, kwargs=dict(dim=0, unbiased=True, keepdim=True)),\n SampleInput(tensor_1d, kwargs=dict(dim=0, unbiased=False, keepdim=False)),\n\n SampleInput(tensor_nd, kwargs=dict(dim=(1,), correction=S // 2)),\n SampleInput(tensor_nd, kwargs=dict(dim=None, correction=0, keepdim=True)),\n ]\n\n\ndef _sample_inputs_svd(op_info, device, dtype, requires_grad=False, is_linalg_svd=False):\n \"\"\"\n This function generates input for torch.svd with distinct singular values so that autograd is always stable.\n Matrices of different size:\n square matrix - S x S size\n tall marix - S x (S-2)\n wide matrix - (S-2) x S\n and batched variants of above are generated.\n Each SampleInput has a function 'output_process_fn_grad' attached to it that is applied on the output of torch.svd\n It is needed for autograd checks, because backward of svd doesn't work for an arbitrary loss function.\n \"\"\"\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n # svd and linalg.svd returns V and V.conj().T, respectively. So we need to slice\n # along different dimensions when needed (this is used by\n # test_cases2:wide_all and wide_all_batched below)\n if is_linalg_svd:\n def slice_V(v):\n return v[..., :(S - 2), :]\n\n def uv_loss(usv):\n u00 = usv[0][0, 0]\n v00_conj = usv[2][0, 0]\n return u00 * v00_conj\n else:\n def slice_V(v):\n return v[..., :, :(S - 2)]\n\n def uv_loss(usv):\n u00 = usv[0][0, 0]\n v00_conj = usv[2][0, 0].conj()\n return u00 * v00_conj\n\n test_cases1 = ( # some=True (default)\n # loss functions for complex-valued svd have to be \"gauge invariant\",\n # i.e. loss functions shouldn't change when sigh of the singular vectors change.\n # the simplest choice to satisfy this requirement is to apply 'abs'.\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),\n lambda usv: usv[1]), # 'check_grad_s'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),\n lambda usv: abs(usv[0])), # 'check_grad_u'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),\n lambda usv: abs(usv[2])), # 'check_grad_v'\n # this test is important as it checks the additional term that is non-zero only for complex-valued inputs\n # and when the loss function depends both on 'u' and 'v'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device),\n uv_loss), # 'check_grad_uv'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:(S - 2)],\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2][..., :, :(S - 2)]))), # 'wide'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:, :(S - 2)],\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device),\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'batched'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :(S - 2), :],\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'wide_batched'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :, :(S - 2)],\n lambda usv: (abs(usv[0]), usv[1], abs(usv[2]))), # 'tall_batched'\n )\n test_cases2 = ( # some=False\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:(S - 2)],\n lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all'\n (random_fullrank_matrix_distinct_singular_value(S, dtype=dtype).to(device)[:, :(S - 2)],\n lambda usv: (abs(usv[0][:, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :(S - 2), :],\n lambda usv: (abs(usv[0]), usv[1], abs(slice_V(usv[2])))), # 'wide_all_batched'\n (random_fullrank_matrix_distinct_singular_value(S, 2, dtype=dtype).to(device)[..., :, :(S - 2)],\n lambda usv: (abs(usv[0][..., :, :(S - 2)]), usv[1], abs(usv[2]))), # 'tall_all_batched'\n )\n\n out = []\n for a, out_fn in test_cases1:\n a.requires_grad = requires_grad\n if is_linalg_svd:\n kwargs = {'full_matrices': False}\n else:\n kwargs = {'some': True}\n out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))\n\n for a, out_fn in test_cases2:\n a.requires_grad = requires_grad\n if is_linalg_svd:\n kwargs = {'full_matrices': True}\n else:\n kwargs = {'some': False}\n out.append(SampleInput(a, kwargs=kwargs, output_process_fn_grad=out_fn))\n\n return out\n\n\ndef sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases = [((1, 2, 3, 4), (0, 2, 3, 1)),\n ((1, 2, 3, 4), (0, -2, -1, 1)),\n ((), ()),\n ((1, 2, 3, 4), (2, 1, 3, 0))]\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=(args,))\n\n return list(generator())\n\n\n# Based on erstwhile method_tests tests & some tensor_op_tests for pow\ndef sample_inputs_pow(op_info, device, dtype, requires_grad, **kwargs):\n samples = []\n\n if dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]:\n test_cases = (\n ((2, 2), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, False),\n ((2, 2), 0, 5, 1e-3, requires_grad, (1,), 0, 1, 0.1, requires_grad, False),\n ((), 1e-3, 1e-3 + 1, 0, True, (), 0.1, 1.1, 0, False, False),\n ((2, 2), 0, 5, 1e-3, requires_grad, (), 0.1, 1.1, 1, False, False),\n )\n tests_require_resizing = (\n ((1,), 0, 5, 1e-3, requires_grad, (2, 2), 0, 1, 0.1, requires_grad, True),\n ((2, 1, 2), 0, 5, 1e-3, requires_grad, (1, 2, 1), 0, 1, 0.1, requires_grad, True),\n ((), 1e-3, 1e-3 + 1, 0, True, (1, S, 1), 0, 1, 0.1, requires_grad, True),\n )\n cases = test_cases + tests_require_resizing\n samples = list(SampleInput(make_tensor(shape_b, low=low_b, high=high_b,\n requires_grad=b_grad, device=device,\n dtype=dtype) + additive_b,\n args=(make_tensor(shape_e, low=low_e, high=high_e,\n requires_grad=e_grad, device=device,\n dtype=dtype) + additive_e,),\n broadcasts_input=broadcasts_input)\n for shape_b, low_b, high_b, additive_b, b_grad, shape_e, low_e,\n high_e, additive_e, e_grad, broadcasts_input in cases)\n tensor_scalar_inputs = (\n ((2, 2), 0, 5, 1e-3, requires_grad, (3.14,)),\n ((), 1e-3, 1e-3 + 1, 0, True, (3.14,))\n )\n more_samples = list(SampleInput(make_tensor(shape, dtype=dtype, device=device,\n high=high, low=low,\n requires_grad=b_grad) + additive,\n args=exp)\n for shape, low, high, additive, b_grad, exp in tensor_scalar_inputs)\n samples = [*samples, *more_samples]\n elif dtype in [torch.complex64, torch.complex128]:\n args_tuple = (\n ((2, 2), 0, 5, requires_grad, (3.14,)),\n ((), 0, 1, True, (3.14,)),\n ((), 0, 1, True, (3.14j,))\n )\n samples = list(SampleInput(make_tensor(shape, dtype=dtype, device=device,\n high=high, low=low,\n requires_grad=b_grad) + 1e-3 * (1 + 1j),\n args=arg)\n for shape, low, high, b_grad, arg in args_tuple)\n elif dtype == torch.bool:\n arg_tuple = (0, 1, 1., 2.3)\n samples = list(SampleInput(make_tensor((2, 2), device=device, dtype=dtype,\n requires_grad=requires_grad),\n args=(arg,))\n for arg in arg_tuple)\n dtypes_list = [torch.float64, torch.float32, torch.int64, torch.int32]\n more_samples = list(SampleInput(make_tensor((2, 2), device, dtype=torch.bool,\n requires_grad=requires_grad),\n args=(make_tensor((2, 2), device, dtype=dtype,\n requires_grad=requires_grad),))\n for dtype in dtypes_list)\n samples = [*samples, *more_samples]\n samples.append(SampleInput(make_tensor((2, 2, 2), device, dtype=torch.bool,\n requires_grad=requires_grad),\n args=(make_tensor((2, 1), device, dtype=torch.float64,\n requires_grad=requires_grad),)))\n else:\n exp_tuple = (1, 2, 3)\n samples = list(SampleInput(make_tensor((2, 2), device, dtype,\n requires_grad=requires_grad),\n args=(arg,))\n for arg in exp_tuple)\n samples.append(SampleInput(make_tensor((2, 2), device, dtype,\n requires_grad=requires_grad),\n args=(make_tensor((2, 2), device, dtype,\n requires_grad=requires_grad),)))\n return tuple(samples)\n\ndef sample_inputs_svd(op_info, device, dtype, requires_grad=False, **kwargs):\n return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=False)\n\ndef sample_inputs_linalg_svd(op_info, device, dtype, requires_grad=False, **kwargs):\n return _sample_inputs_svd(op_info, device, dtype, requires_grad, is_linalg_svd=True)\n\ndef sample_inputs_linalg_svdvals(op_info, device, dtype, requires_grad=False, **kwargs):\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 2, 0]\n samples = []\n for batch, (m, n) in product(batches, product(ns, ns)):\n a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)\n samples.append(SampleInput(a))\n return samples\n\ndef sample_inputs_eig(op_info, device, dtype, requires_grad=False, **kwargs):\n eigvecs = make_tensor((S, S), device=device, dtype=dtype,\n low=None, high=None)\n eigvals = make_tensor((S,), device=device, dtype=dtype,\n low=None, high=None)\n # we produce only diagonazible inputs which do not have\n # complex eigenvalues for real inputs, as there is no\n # backward implementation for real inputs with complex\n # eigenvalues yet.\n input = (eigvecs * eigvals.unsqueeze(-2)) @ eigvecs.inverse()\n input.requires_grad_(requires_grad)\n\n def process_output(eigpair):\n eigvals, eigvecs = eigpair\n if dtype.is_complex:\n # eig produces eigenvectors which are normalized to 1 norm.\n # Note that if v is an eigenvector, so is v * e^{i \\phi},\n # and |v| = |v * e^{i \\phi}| = 1.\n # This, however, makes the eigenvector backward computation process\n # rather unstable unless the objective function is gauge-invariant,\n # that is if f(z) == f(|z|), for example.\n # Hence for complex inputs we ignore the phases and return only\n # the absolute values.\n return eigvals, eigvecs.abs()\n else:\n return eigvals, eigvecs\n\n return [\n SampleInput(\n input,\n kwargs=dict(eigenvectors=True),\n output_process_fn_grad=process_output\n ),\n ]\n\n\ndef sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs):\n x = make_tensor((3,), device, dtype, requires_grad=requires_grad)\n y = make_tensor((4,), device, dtype, requires_grad=requires_grad)\n A = make_tensor((2, 3,), device, dtype, requires_grad=requires_grad, noncontiguous=True)\n B = make_tensor((1, 3,), device, dtype, requires_grad=requires_grad)\n C = make_tensor((1, 2, 3,), device, dtype, requires_grad=requires_grad)\n D = make_tensor((1, 3, 4,), device, dtype, requires_grad=requires_grad, noncontiguous=True)\n E = make_tensor((4, 4,), device, dtype, requires_grad=requires_grad)\n H = make_tensor((3, 3,), device, dtype, requires_grad=requires_grad, noncontiguous=True)\n I = make_tensor((1, 3, 1,), device, dtype, requires_grad=requires_grad)\n\n inputs = []\n\n # Vector operations\n inputs.append(SampleInput([x], args=('i->',))) # sum\n inputs.append(SampleInput([x, y], args=('i,j->ij',))) # outer\n\n # Matrix operations\n inputs.append(SampleInput([A], args=(\"ij->i\",))) # col sum\n inputs.append(SampleInput([A, B], args=(\"ij,kj->ik\",))) # matmul\n inputs.append(SampleInput([A, E], args=(\"ij,Ab->ijAb\",))) # matrix outer product\n\n # Tensor operations\n inputs.append(SampleInput([C, D], args=(\"aij,ajk->aik\",))) # batch matmul\n inputs.append(SampleInput([D, E], args=(\"aij,jk->aik\",))) # tensor matrix contraction\n inputs.append(SampleInput([C, B], args=(\"ijk,ik->j\",))) # non contiguous\n\n # Test diagonals\n inputs.append(SampleInput([I], args=('iji->j',))) # non-contiguous trace\n\n # Test ellipsis\n inputs.append(SampleInput([H], args=(\"i...->...\",)))\n inputs.append(SampleInput([C, x], args=('...ik, ...j -> ij',)))\n\n return inputs\n\n\ndef sample_inputs_linalg_qr(op_info, device, dtype, requires_grad=False, **kwargs):\n \"\"\"\n This function generates input for torch.linalg.qr\n The input is generated as the itertools.product of 'batches' and 'ns'.\n \"\"\"\n batches = [(), (0,), (2, ), (1, 1)]\n ns = [5, 2, 0]\n out = []\n for batch, (m, n) in product(batches, product(ns, ns)):\n a = torch.randn(*batch, m, n, dtype=dtype, device=device, requires_grad=requires_grad)\n out.append(SampleInput(a))\n return out\n\ndef sample_inputs_geqrf(op_info, device, dtype, requires_grad=False):\n batches = [(), (0, ), (2, ), (1, 1)]\n ns = [5, 2, 0]\n samples = []\n for batch, (m, n) in product(batches, product(ns, ns)):\n # TODO: CUDA path doesn't work with batched or empty inputs\n if torch.device(device).type == 'cuda' and (batch != () or m == 0 or n == 0):\n continue\n a = make_tensor((*batch, m, n), device, dtype, low=None, high=None, requires_grad=requires_grad)\n samples.append(SampleInput(a))\n return samples\n\ndef sample_inputs_flip(op_info, device, dtype, requires_grad, **kwargs):\n tensors = (\n make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)\n )\n\n dims = ((0, 1, 2), (0,), (0, 2), (-1,), ())\n\n samples = [SampleInput(tensor, kwargs={'dims': dim}) for tensor, dim in product(tensors, dims)]\n\n return samples\n\ndef sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs):\n tensors = (\n make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((S, 0, M), device, dtype, low=None, high=None, requires_grad=requires_grad)\n )\n return [SampleInput(tensor) for tensor in tensors]\n\n# TODO: clamp shares tensors among its sample inputs --- we should prohibit this!\ndef sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs):\n x = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n lb = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n ub = make_tensor((S, M, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n\n def detach(tensor):\n return tensor.clone().detach_().requires_grad_(requires_grad)\n\n return [\n SampleInput(detach(x), args=(lb, ub)),\n SampleInput(detach(x), args=(detach(lb[0]), detach(ub[0]))),\n SampleInput(detach(x), args=(detach(lb[:, :1]),)),\n ]\n\ndef sample_inputs_clamp_scalar(op_info, device, dtype, requires_grad):\n tensors = (\n make_tensor((2, 3, 2), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((2, 0, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n if dtype is torch.uint8:\n min_max_vals = ((2, 5), (3, 7))\n else:\n min_max_vals = ((0, 1), (-1, 1))\n output = [SampleInput(tensor, args=vals) for tensor, vals in product(tensors, min_max_vals)]\n output += [SampleInput(tensors[0], args=(0.5, None)), SampleInput(tensors[0], args=(None, 0.5))]\n empty_tensor = make_tensor((), device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad)\n output += [SampleInput(empty_tensor, args=(0.0, 1.0)), ]\n return output\n\ndef sample_kwargs_clamp_scalar(device, dtype, input):\n if dtype is torch.uint8:\n min_val, max_val = (random.randint(1, 3), random.randint(4, 8))\n elif dtype.is_floating_point:\n min_val, max_val = (random.uniform(-8, 0), random.uniform(1, 8)) # type: ignore[assignment]\n else:\n min_val, max_val = (random.randint(-8, 0), random.randint(1, 8))\n return {'min': min_val, 'max': max_val}, {'a_min': min_val, 'a_max': max_val}\n\ndef sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs):\n def make_arg(shape):\n # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck\n return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)\n\n def prod_zeros(dim_select):\n assert len(dim_select) == 2\n result = make_arg(3 * (S,))\n with torch.no_grad():\n result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()\n result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()\n result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()\n return result\n\n # will not be needed once OpInfo tests suport Iterables\n def sample_generator():\n for dim in range(3):\n yield SampleInput(make_arg((S, S, S)), args=(dim,))\n # Scalar tensors and empty tensor\n for size in [(), (1,), (0,)]:\n yield SampleInput(make_arg(size), args=(0,))\n\n yield SampleInput(prod_zeros([0, 1]), args=(1,))\n yield SampleInput(prod_zeros([0, 2]), args=(1,))\n yield SampleInput(prod_zeros([1, 2]), args=(1,))\n\n # test dtype kwarg\n yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype})\n\n return list(sample_generator())\n\ndef sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs):\n return [SampleInput(make_tensor((S, 2), device, dtype, requires_grad=requires_grad),)]\n\ndef sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs):\n tensors = (\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n make_tensor((), device, dtype, requires_grad=requires_grad)\n )\n return [SampleInput(tensor) for tensor in tensors]\n\ndef sample_inputs_copysign(op_info, device, dtype, requires_grad, **kwargs):\n def _make_tensor(*shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n cases = [\n # no broadcast\n ((S, S, S), (S, S, S), False),\n # broadcast rhs\n ((S, S, S), (S, S), False),\n\n # scalar\n ((S, S), 3.14, False),\n # scalar positive zero\n ((S, S), 0.0, False),\n # scalar negative zero\n ((S, S), -0.0, False),\n ]\n\n # broadcast lhs\n cases.append(((S, S), (S, S, S), True))\n # broadcast all\n cases.append(((S, 1, S), (M, S), True))\n\n def generator():\n for input_shape, arg_val, broadcasts_input in cases:\n if isinstance(arg_val, tuple):\n arg = _make_tensor(*arg_val)\n else:\n # arg_val is scalar\n arg = arg_val\n\n yield SampleInput(_make_tensor(*input_shape), args=(arg, ), broadcasts_input=broadcasts_input)\n\n return list(generator())\n\ndef sample_inputs_prod(op_info, device, dtype, requires_grad):\n def make_arg(shape):\n # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck\n return make_tensor(shape, device, dtype, low=-1, high=+1, requires_grad=requires_grad)\n\n def prod_single_zero():\n result = make_arg(2 * (S,))\n with torch.no_grad():\n result[0, 1] = 0\n return result\n\n # will not be needed once OpInfo tests support Iterables\n def sample_generator():\n for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad):\n yield SampleInput(sample.input) # only Tensor, ignore other inputs\n yield sample\n sample.kwargs['keepdim'] = True\n yield sample\n yield SampleInput(prod_single_zero())\n yield SampleInput(make_arg((3, 3, 3)), args=(1,))\n yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True})\n\n # test zero scalar tensor\n zero = make_arg(())\n with torch.no_grad():\n zero.zero_()\n yield SampleInput(zero)\n yield SampleInput(zero, args=(0,))\n yield SampleInput(zero, args=(0,), kwargs={'keepdim': True})\n\n return list(sample_generator())\n\ndef sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs):\n vec_sample = SampleInput(make_tensor((M, ), device, dtype, low=None, high=None, requires_grad=requires_grad))\n\n tensors = (\n make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((3, 5), device, dtype, low=None, high=None, requires_grad=requires_grad),\n make_tensor((5, 3), device, dtype, low=None, high=None, requires_grad=requires_grad),\n )\n\n args = ((), (2,), (-2,), (1,), (2,))\n\n samples = []\n for tensor, arg in product(tensors, args):\n samples.append(SampleInput(tensor, args=arg))\n\n return samples + [vec_sample]\n\ndef sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs):\n low, high = op_info.domain\n\n # Note: Operator is very sensitive at points near the\n # start and end of domain and leads to NaN for float16\n # if domain_eps is 1e-5.\n domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2\n\n low = low + domain_eps\n high = high - domain_eps\n\n samples = (\n SampleInput(make_tensor((S, S, S), device, dtype, low=low, high=high, requires_grad=requires_grad)),\n SampleInput(make_tensor((S, S, S), device, dtype, low=low,\n high=high, requires_grad=requires_grad), args=(0.2,)),\n SampleInput(make_tensor((), device, dtype, low=low, high=high, requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype, low=low,\n high=high, requires_grad=requires_grad), args=(0.2,)),\n )\n\n return samples\n\ndef sample_inputs_floor_divide(op_info, device, dtype, requires_grad, **kwargs):\n lhs = make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n rhs = make_tensor((S, S, S), device, dtype, low=None, high=None, requires_grad=requires_grad)\n # Avoid integer divide by 0\n if not (dtype.is_floating_point or dtype.is_complex):\n rhs[rhs == 0] = 1\n\n return [\n SampleInput(lhs, args=(rhs,)),\n SampleInput(lhs, args=(rhs[0],)),\n SampleInput(lhs, args=(3.14,)),\n ]\n\n\ndef sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def samples_generator():\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))))\n yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S))))\n yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S))))\n yield SampleInput(make_arg((S,)),\n args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))),\n broadcasts_input=True)\n\n samples = tuple(samples_generator())\n return samples\n\n\ndef sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n def sample_generator():\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10))\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(())))\n yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10))\n yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10))\n yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(())))\n yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10))\n\n yield SampleInput(make_arg((S,)),\n args=(torch.randn(S, S, device=device) > 0, make_arg(())),\n broadcasts_input=True)\n yield SampleInput(make_arg((S,)),\n args=(torch.randn(S, S, device=device) > 0, 10),\n broadcasts_input=True)\n\n samples = tuple(sample_generator())\n return samples\n\ndef sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs):\n samples = (\n SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn(M, M, device=device) > 0,)),\n\n SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M,), device=device) > 0,)),\n\n SampleInput(make_tensor((M,), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M, M), device=device) > 0,)),\n\n SampleInput(make_tensor((M, 1, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M, M), device=device) > 0,)),\n\n SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.tensor(1, device=device, dtype=torch.bool),)),\n\n SampleInput(make_tensor((M, M), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.tensor(1, device=device, dtype=torch.bool),)),\n\n SampleInput(make_tensor((), device, dtype, low=None, high=None, requires_grad=requires_grad),\n args=(torch.randn((M, M), device=device) > 0,)),\n )\n\n return samples\n\ndef sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs):\n samples = (\n SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad)),\n SampleInput(make_tensor((S, S, S), device, dtype, requires_grad=requires_grad)),\n )\n\n return samples\n\ndef sample_inputs_matmul(op_info, device, dtype, requires_grad):\n test_cases = (((L,), (L,)),\n ((S, M), (M,)),\n ((M,), (M, S)),\n ((S, M), (M, S)),\n ((S, S, M), (M,)),\n ((S, S, M), (M, S)),\n ((M,), (S, M, S)),\n ((S, M), (S, M, S)),\n ((S, S, M, M), (S, S, M, S)),\n ((S, S, M, M), (M,)),\n ((M,), (S, S, M, S)))\n sample_inputs = []\n for lhs_shape, rhs_shape in test_cases:\n lhs = make_tensor(lhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n rhs = make_tensor(rhs_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n sample_inputs.append(SampleInput(lhs, args=(rhs,)))\n return tuple(sample_inputs)\n\n\ndef sample_inputs_polar(op_info, device, dtype, requires_grad, **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n samples = (\n SampleInput(_make_tensor_helper((S, S), low=0), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper((), low=0), args=(_make_tensor_helper(()),)),\n )\n\n return samples\n\ndef sample_inputs_complex(op_info, device, dtype, requires_grad, **kwargs):\n def _make_tensor_helper(shape):\n return make_tensor(shape, device, dtype, requires_grad=requires_grad)\n\n samples = (\n SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),\n )\n\n return samples\n\n\ndef sample_inputs_polygamma(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n tensor_shapes = ((S, S), ())\n ns = (1, 2, 3, 4, 5)\n\n def generator():\n for shape, n in product(tensor_shapes, ns):\n yield SampleInput(make_arg(shape), args=(n,))\n\n return list(generator())\n\n\ndef sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n tensor_shapes = ((S, S), ())\n ns = (1, 2, 3, 4, 5)\n\n # Since the accepted lower bound for input\n # to mvlgamma depends on `p` argument,\n # the following function computes the lower bound\n # which we pass to `make_tensor`.\n def compute_min_val(p):\n return (p - 1.) / 2\n\n def generator():\n for shape, n in product(tensor_shapes, ns):\n min_val = compute_min_val(n)\n yield SampleInput(make_arg(shape, low=min_val), args=(n,))\n\n return list(generator())\n\n\n# Since `mvlgamma` has multiple entries,\n# there are multiple common skips for the additional\n# entries. Following function is a helper to that end.\ndef skips_mvlgamma(skip_redundant=False):\n skips = (\n # outside domain values are hard error for mvlgamma op.\n SkipInfo('TestUnaryUfuncs', 'test_float_domains'),\n )\n if not skip_redundant:\n # Redundant tests\n skips = skips + ( # type: ignore[assignment]\n SkipInfo('TestGradients'),\n SkipInfo('TestOpInfo'),\n SkipInfo('TestCommon'),\n )\n return skips\n\n\n# To test reference numerics against multiple values of argument `p`,\n# we make multiple OpInfo entries with each entry corresponding to different value of p.\n# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing.\n# Class `MvlGammaInfo` already contains the basic information related to the operator,\n# it only takes arguments like `domain`, `skips` and `sample_kwargs`, which\n# differ between the entries.\nclass MvlGammaInfo(UnaryUfuncInfo):\n def __init__(self, variant_test_name, domain, skips, sample_kwargs):\n super(MvlGammaInfo, self).__init__(\n 'mvlgamma',\n ref=reference_mvlgamma if TEST_SCIPY else _NOTHING,\n variant_test_name=variant_test_name,\n domain=domain,\n decorators=(precisionOverride({torch.float16: 5e-2}),),\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.half),\n sample_inputs_func=sample_inputs_mvlgamma,\n supports_out=False,\n skips=skips,\n sample_kwargs=sample_kwargs)\n\n\ndef sample_inputs_entr(op_info, device, dtype, requires_grad, **kwargs):\n low, _ = op_info.domain\n\n if requires_grad:\n low = 0 + op_info._domain_eps\n\n return (SampleInput(make_tensor((L,), device, dtype,\n low=low,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype,\n low=low,\n requires_grad=requires_grad)))\n\ndef sample_inputs_rsub(op_info, device, dtype, requires_grad, variant='tensor', **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n def _samples_with_alpha_helper(args, alphas, filter_fn=lambda arg_alpha: True):\n filtered_product = filter(filter_fn, product(args, alphas)) # type: ignore[var-annotated]\n return (SampleInput(input, args=(arg,), kwargs=dict(alpha=alpha))\n for (input, arg), alpha in filtered_product)\n\n int_alpha, float_alpha, complex_alpha = 2, 0.1, 1 + 0.6j\n\n if variant == 'tensor':\n samples = (\n SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper((S, S)), args=(_make_tensor_helper((S,)),)),\n SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper((S, S)),)),\n SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper(()),)),\n SampleInput(_make_tensor_helper(()), args=(_make_tensor_helper((S,)),)),\n SampleInput(_make_tensor_helper((S,)), args=(_make_tensor_helper(()),)),\n )\n\n if dtype.is_complex:\n alphas = [int_alpha, float_alpha, complex_alpha]\n elif dtype.is_floating_point:\n alphas = [int_alpha, float_alpha]\n else:\n alphas = [int_alpha]\n\n args = ((_make_tensor_helper((S, S)), _make_tensor_helper((S, S))),\n (_make_tensor_helper((S, S)), _make_tensor_helper((S,))),\n (_make_tensor_helper(()), _make_tensor_helper(())))\n samples += tuple(_samples_with_alpha_helper(args, alphas)) # type: ignore[assignment]\n elif variant == 'scalar':\n # Scalar Other\n samples = (SampleInput(_make_tensor_helper((S, S)), args=(0.5,)),\n SampleInput(_make_tensor_helper(()), args=(0.5,)),\n SampleInput(_make_tensor_helper((S, S)), args=(1.5j,)),\n SampleInput(_make_tensor_helper(()), args=(1.5j,)),\n SampleInput(_make_tensor_helper((S, S)), args=(0.4 + 1.2j,)),\n SampleInput(_make_tensor_helper(()), args=(1.2 + 1.76j,)))\n\n scalar_args = [(_make_tensor_helper((S, S)), 0.5), (_make_tensor_helper(()), 0.5),\n (_make_tensor_helper((S, S)), 2.7j), (_make_tensor_helper(()), 2.7j),\n (_make_tensor_helper((S, S)), 1 - 2.7j), (_make_tensor_helper(()), 1 + 2.7j)]\n\n alphas = [int_alpha, float_alpha, complex_alpha]\n\n def filter_fn(arg_alpha):\n arg, alpha = arg_alpha\n if isinstance(alpha, complex):\n if dtype.is_complex or isinstance(arg[1], complex):\n return True\n else:\n # complex alpha is valid only if either `self` or `other` is complex\n return False\n\n # Non-Complex Alpha\n return True\n\n # Samples with alpha (scalar version) covers the following cases\n # self | other | alpha\n # -----------------------------------------\n # real | real | real (int and float)\n # real | complex | real and complex\n # complex | real | real and complex\n # complex | complex | real and complex\n #\n # It does not cover\n # real | real | complex\n # x = torch.randn(2, requires_grad=True, dtype=torch.float64)\n # torch.rsub(x, 1, alpha=1. + 1.6j)\n # RuntimeError: value cannot be converted to type double without overflow: (-1,-1.6)\n\n samples += tuple(_samples_with_alpha_helper(scalar_args, alphas, filter_fn=filter_fn)) # type: ignore[assignment]\n else:\n raise Exception(\"Invalid variant!\")\n\n return samples\n\ndef sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n samples = [\n SampleInput(_make_tensor_helper((S, S, S)), args=(0,)),\n SampleInput(_make_tensor_helper((S, S, S)), args=(1,)),\n SampleInput(_make_tensor_helper(()), args=(0,)),\n ]\n\n if supports_dtype_kwargs:\n # NOTE: if `dtype` is not same as input, then inplace variants fail with\n # `provided dtype must match the dtype of self tensor in cumsum`\n samples.append(SampleInput(_make_tensor_helper((S, S, S)), args=(1,), kwargs={'dtype': dtype}))\n\n return samples\n\n\ndef sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs):\n test_cases = (\n ((), (0, 1, 1)),\n ((S, S, S, S), (0, 3, 1)),\n ((S, S, S, S), (1, 3, 1)),\n ((S, S, S, S), (2, 3, 1)),\n ((S, S, S, S), (3, 3, 1)),\n ((S, S, S, S), (0, 3, 2)),\n ((S, S, S, S), (1, 3, 2)),\n ((S, S, S, S), (2, 3, 2)),\n ((S, S, S, S), (3, 3, 2)),\n ((S, S, S, S), (0, 4, 1)),\n ((S, S, S, S), (1, 4, 1)),\n ((S, S, S, S), (2, 4, 1)),\n ((S, S, S, S), (3, 4, 1)),\n ((M,), (0, 3, 1)),\n ((M,), (0, 3, 2)),\n ((M,), (0, 3, 3)),\n ((1000,), (0, 3, 11)),\n ((1000,), (0, 2, 27)),\n ((10, 10), (0, 1, 2)),\n ((10, 10), (1, 2, 3)),\n ((10, 10), (1, 2, 2)),\n ((S, S, S), (2, 3, 2)),\n )\n\n sample_inputs = []\n for shape, arguments in test_cases:\n sample_inputs += [SampleInput(make_tensor(shape, device, dtype,\n low=None, high=None,\n requires_grad=requires_grad),\n args=arguments)]\n return sample_inputs\n\n\ndef sample_inputs_atan2(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n cases = (\n ((S, S, S), (S, S, S), False),\n ((), (), False),\n ((S, S, S), (S,), False),\n ((S,), (S, S, S), True),\n ((S, 1, S), (S, S), True),\n )\n\n def generator():\n for x_shape, y_shape, broadcasts_input in cases:\n yield SampleInput(make_arg(x_shape), args=(make_arg(y_shape),),\n broadcasts_input=broadcasts_input)\n\n return list(generator())\n\ndef sample_inputs_msort(op_info, device, dtype, requires_grad):\n def apply_grad(t):\n if dtype in floating_types_and(torch.float16, torch.bfloat16):\n t.requires_grad_(requires_grad)\n\n def large_1d_unique(dtype, device):\n res = torch.randperm(L * L * L, dtype=torch.int64, device=device)\n res = res.to(dtype)\n apply_grad(res)\n return res\n\n samples = []\n # Test case for large tensor.\n largesample = SampleInput(large_1d_unique(dtype, device))\n\n sample = SampleInput(make_tensor((S, M, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad))\n\n return [largesample, sample]\n\ndef sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n samples = (\n # no broadcast\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4)),\n # broadcast rhs\n SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4)),\n # scalar tensor\n SampleInput(make_arg(()), args=(make_arg(()), 0.4)),\n # broadcast rhs scalar-tensor\n SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4)),\n # broadcast rhs with weight tensor\n SampleInput(make_arg((S, S)), args=(make_arg((S,)), make_arg((S, S)))),\n # broadcast rhs and weight tensor\n SampleInput(make_arg((S, S)), args=(make_arg((S, 1)), make_arg((S,)))),\n # broadcast_lhs\n SampleInput(make_arg((S,)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),\n # scalar broadcast_lhs\n SampleInput(make_arg(()), args=(make_arg((S, S)), 0.4), broadcasts_input=True),\n # broadcast all\n SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), 0.4), broadcasts_input=True),\n # tensor broadcast all\n SampleInput(make_arg((S, 1)), args=(make_arg((S, S)), make_arg((S, 1))),\n broadcasts_input=True),\n )\n\n if dtype.is_complex:\n samples = samples + ( # type: ignore[assignment]\n # no broadcast\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 0.4j)),\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 1.2 + 0.1j)),\n # broadcast rhs\n SampleInput(make_arg((S, S)), args=(make_arg((S,)), 0.4j)),\n SampleInput(make_arg((S, S)), args=(make_arg((S, S)), 5.4 + 9j)),\n # scalar tensor\n SampleInput(make_arg(()), args=(make_arg(()), 0.4j)),\n SampleInput(make_arg(()), args=(make_arg(()), 6.1 + 0.004j)),\n # broadcast rhs scalar-tensor\n SampleInput(make_arg((S, S)), args=(make_arg(()), 0.4j)),\n SampleInput(make_arg((S, S)), args=(make_arg(()), 1 + 2j)),\n )\n\n return samples\n\ndef sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs):\n cases = (\n ((2, 2, 2), (2, 2, 2), (2)),\n ((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])),\n )\n samples = []\n for first_shape, second_shape, dims in cases:\n samples.append(SampleInput(make_tensor(first_shape, device, dtype,\n requires_grad=requires_grad),\n args=(make_tensor(second_shape, device, dtype,\n requires_grad=requires_grad),),\n kwargs=dict(dims=dims,)))\n return tuple(samples)\n\ndef sample_inputs_kron(op_info, device, dtype, requires_grad):\n test_cases = (\n ((S, S), (M, L)),\n )\n\n sample_inputs = []\n for input_shape, other_shape in test_cases:\n input = make_tensor(input_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n other = make_tensor(other_shape, device, dtype, low=None, high=None, requires_grad=requires_grad)\n sample = SampleInput(input, args=(other,))\n sample_inputs.append(sample)\n return tuple(sample_inputs)\n\ndef sample_inputs_inner(self, device, dtype, requires_grad, **kwargs):\n return (\n SampleInput(\n make_tensor((S, ), device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor((S, ), device, dtype, requires_grad=requires_grad),\n )\n ),\n SampleInput(\n make_tensor((), device, dtype, requires_grad=requires_grad),\n args=(\n make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n )\n ),\n )\n\n# Tests for scatter when passing the reduce argument are missing\n# Reference: https://github.com/pytorch/pytorch/issues/56464\ndef sample_inputs_scatter(op_info, device, dtype, requires_grad):\n def _tensor(shape, dtype=dtype, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n def _gather(shape, index_dim, max_indices):\n return gather_variable(shape, index_dim, max_indices, device=device)\n\n zero = torch.tensor(0, dtype=torch.long, device=device)\n test_cases = (\n (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),\n (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),\n (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor(()), (0, zero.clone().detach(), _tensor(()))),\n (_tensor(()), (0, zero.clone().detach(), 2.5)),\n )\n\n return [SampleInput(tensor, args=args) for tensor, args in test_cases]\n\ndef sample_inputs_scatter_add(op_info, device, dtype, requires_grad):\n def _tensor(shape, dtype=dtype, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n def _gather(shape, index_dim, max_indices):\n return gather_variable(shape, index_dim, max_indices, device=device)\n\n zero = torch.tensor(0, dtype=torch.long, device=device)\n test_cases = (\n (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))),\n (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))),\n (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))),\n (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))),\n (_tensor(()), (0, zero.clone().detach(), _tensor(()))),\n )\n\n return [SampleInput(tensor, args=args) for tensor, args in test_cases]\n\n\ndef sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs):\n samples = (SampleInput(make_tensor((S, S, S), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)),\n SampleInput(make_tensor((), device, dtype,\n low=None, high=None,\n requires_grad=requires_grad)),)\n\n return samples\n\n\ndef sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, S, S), (S * S, S)),\n ((S * S, S), (S, S, S)),\n ((S,), (S,)),\n ((), ()),\n ((), (1,)))\n\n def generator():\n for case in cases:\n shape, args = case\n yield(SampleInput(make_arg(shape), args=(args, )))\n\n return list(generator())\n\n\ndef sample_inputs_view_as_reshape_as(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n\n cases = (((S, S, S), (S * S, S)),\n ((), ()),\n ((), (1, 1)),\n )\n\n def generator():\n for case in cases:\n shape, shape_other = case\n yield(SampleInput(make_arg(shape, requires_grad=requires_grad),\n args=(make_arg(shape_other, requires_grad=False), )))\n\n return list(generator())\n\n\ndef sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, S, S), (1, 2)),\n ((S, S, S), (-1, 2)),\n ((S, S, S), (-1, -1)),\n ((S, S, S), (1, -1)),\n ((S,), (0, 2))\n )\n\n def generator():\n for shape, args in cases:\n yield SampleInput(make_arg(shape), args=args)\n\n return list(generator())\n\n\ndef sample_inputs_rbinops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs):\n def _make_tensor_helper(shape, low=None, high=None):\n return make_tensor(shape, device, dtype, low=low, high=high, requires_grad=requires_grad)\n\n scalar: Union[int, float, complex] = 3\n\n if dtype.is_floating_point:\n scalar = 3.14\n elif dtype.is_complex:\n scalar = 3.14j\n\n samples = [\n SampleInput(_make_tensor_helper((S, S, S)), args=(scalar,)),\n SampleInput(_make_tensor_helper(()), args=(scalar,)),\n ]\n\n return samples\n\n\ndef sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad)\n\n cases = (((S, 1, 1), (S, S, S)),\n ((S, 1, S), (S, S, S)),\n ((S, 1), (S, S, S)),\n ((1,), (S, S, S)),\n ((1, S), (1, 1, S)),\n ((), ()),\n ((), (1, 3, 2)),\n )\n\n def generator():\n for case in cases:\n shape, args = case\n yield(SampleInput(make_arg(shape), args=(args, )))\n\n return list(generator())\n\n\ndef sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n\n cases = (((S, 1, 1), (S, S, S)),\n ((), ()),\n ((), (1, 1)),\n )\n\n def generator():\n for case in cases:\n shape, shape_other = case\n yield(SampleInput(make_arg(shape, requires_grad=requires_grad),\n args=(make_arg(shape_other, requires_grad=False), )))\n\n return list(generator())\n\n\ndef sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs):\n make_arg = partial(make_tensor, dtype=dtype, device=device)\n\n cases = (((S, S, S), (2,)),\n ((S, S, S), (S, 1)),\n ((S, S, S), (S, -1)))\n\n def generator():\n for case in cases:\n shape, args = case\n yield(SampleInput(make_arg(shape, requires_grad=requires_grad), args=args))\n\n return list(generator())\n\n\nforeach_unary_op_db: List[OpInfo] = [\n ForeachUnaryFuncInfo('exp'),\n ForeachUnaryFuncInfo('acos'),\n ForeachUnaryFuncInfo('asin'),\n ForeachUnaryFuncInfo('atan'),\n ForeachUnaryFuncInfo('cos'),\n ForeachUnaryFuncInfo('cosh'),\n ForeachUnaryFuncInfo('log'),\n ForeachUnaryFuncInfo('log10'),\n ForeachUnaryFuncInfo('log2'),\n ForeachUnaryFuncInfo('tan'),\n ForeachUnaryFuncInfo('tanh'),\n ForeachUnaryFuncInfo('sin'),\n ForeachUnaryFuncInfo('sinh'),\n\n ForeachUnaryFuncInfo('neg',\n dtypes=all_types_and_complex(),\n dtypesIfCPU=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex(),\n sample_inputs_func=sample_inputs_foreach,\n safe_casts_outputs=False),\n\n ForeachUnaryFuncInfo('sqrt',\n dtypes=floating_types(),\n dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('ceil',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16)),\n\n ForeachUnaryFuncInfo('erf',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('erfc',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16)),\n\n ForeachUnaryFuncInfo('expm1',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16)),\n\n ForeachUnaryFuncInfo('floor',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16)),\n\n ForeachUnaryFuncInfo('log1p',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('round',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16)),\n\n ForeachUnaryFuncInfo('frac',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16)),\n\n ForeachUnaryFuncInfo('reciprocal',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('sigmoid',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half)),\n\n ForeachUnaryFuncInfo('trunc',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16)),\n\n ForeachUnaryFuncInfo('abs',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n dtypesIfCPU=all_types_and_complex_and(torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n safe_casts_outputs=False)\n]\n\ndef reference_sign(x):\n if x.dtype == np.bool_:\n # `np.sign` doesn't support `bool`.\n # >>> np.sign(True)\n # ufunc 'sign' did not contain a loop\n # with signature matching types dtype('bool') -> dtype('bool')\n return np.sign(x, dtype=np.uint8).astype(np.bool_)\n return np.sign(x)\n\n\ndef reference_sgn(x):\n # NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex.\n # For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j.\n # while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input)\n if x.dtype not in [np.complex64, np.complex128]:\n return reference_sign(x)\n\n out = (x / np.abs(x))\n if out.ndim == 0:\n # Handle x == 0 case\n if (x == 0):\n # Can't assign to np.complex object\n # So make a new one.\n return np.array(complex(0, 0), dtype=x.dtype)\n return out\n\n # Handle x == 0 case\n mask = (x == 0)\n out[mask] = complex(0, 0)\n return out\n\n\ndef reference_sigmoid(x):\n # 'scipy.special.expit' not supported for the input types\n if x.dtype in [np.complex64, np.complex128]:\n return (1 / (1 + np.exp(-x)))\n return scipy.special.expit(x)\n\n\ndef reference_lgamma(x):\n # scipy.special.gammaln returns `-inf` when input is `-inf`.\n # While Pytorch, C and C++, all return `inf` when input is `-inf`.\n # Reference:\n # https://en.cppreference.com/w/cpp/numeric/math/lgamma\n # https://en.cppreference.com/w/c/numeric/math/lgamma\n\n # To handle the above discrepancy,\n # we replace -inf with inf so values\n # that were originally -inf map to inf as expected\n if x.dtype.kind == 'f':\n x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x)\n\n out = scipy.special.gammaln(x)\n\n if x.dtype == np.float16:\n # `scipy.special.gammaln` returns output of float32 when input is float16,\n # while `torch.lgamma` preserves `float16`. But due to smaller range of float16,\n # Pytorch version outputs `inf` while SciPy returns finite values.\n out = out.astype(np.float16)\n\n return out\n\ndef reference_polygamma(x, n):\n # WEIRD `scipy.special.polygamma` behavior\n # >>> scipy.special.polygamma(0, np.array(501, dtype=np.float32)).dtype\n # dtype('float64')\n # >>> scipy.special.polygamma(0, np.array([501], dtype=np.float32)).dtype\n # dtype('float32')\n #\n # Thus we cast output to the default torch dtype.\n np_dtype = torch_to_numpy_dtype_dict[torch.get_default_dtype()]\n return scipy.special.polygamma(n, x).astype(np_dtype)\n\n\ndef reference_mvlgamma(x, d):\n if x.dtype == np.float16:\n return scipy.special.multigammaln(x, d).astype(np.float16)\n\n return scipy.special.multigammaln(x, d)\n\n\ndef gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs):\n \"\"\"Gradcheck wrapper for functions that take Hermitian matrices as input.\n\n They require a modified function because the finite-difference algorithm\n for calculating derivatives does not preserve the Hermitian property of the input.\n \"\"\"\n return op(input + input.conj().transpose(-2, -1), *args, **kwargs)\n\n\ndef gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):\n \"\"\"Gradcheck wrpper for functions that take lower or upper triangular matrices as input.\n\n They require a modified function because the finite-difference algorithm\n for calculating derivatives does not preserve the triangular property of the input.\n \"\"\"\n return op(input.triu() if upper else input.tril(), upper)\n\n\n# Operator database (sorted alphabetically)\nop_db: List[OpInfo] = [\n UnaryUfuncInfo('abs',\n aliases=('absolute', ),\n ref=np.abs,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat]),\n # Reference: https://github.com/pytorch/pytorch/issues/49224\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.int8], active_if=TEST_WITH_ASAN),\n # TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)\n # We can break the logic of the loop over all possible types but it is OK.\n # https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449\n SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes',\n dtypes=[torch.cfloat, torch.cdouble]),\n ),\n supports_inplace_autograd=False,\n assert_autodiffed=True),\n # NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952)\n UnaryUfuncInfo('acos',\n aliases=('arccos', ),\n ref=np.arccos,\n domain=(-1, 1),\n handles_complex_extremals=False,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"rsqrt_cpu\" not implemented for 'BFloat16'\n backward_dtypesIfCPU=all_types_and_complex_and(torch.bool),\n assert_autodiffed=True,\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-1,\n torch.complex64: 1e-2}),),\n safe_casts_outputs=True,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestGradients', 'test_fn_grad',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n SkipInfo('TestGradients', 'test_method_grad',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n SkipInfo('TestGradients', 'test_inplace_grad',\n dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n )),\n # NOTE: the derivative for inplace acosh is not implemented\n UnaryUfuncInfo('acosh',\n aliases=('arccosh', ),\n ref=np.arccosh,\n domain=(1, float('inf')),\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"rsqrt_cuda\" not implemented for 'BFloat16'\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n supports_inplace_autograd=False,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n # Reference: https://github.com/pytorch/pytorch/issues/50692\n SkipInfo('TestGradients', 'test_fn_grad',\n device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n SkipInfo('TestGradients', 'test_method_grad',\n device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),\n )),\n OpInfo('add',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n assert_autodiffed=True,\n sample_inputs_func=partial(sample_inputs_binary_pwise, alpha=2),\n supports_inplace_autograd=False),\n OpInfo('mul',\n aliases=('multiply',),\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_binary_pwise),\n OpInfo('sub',\n aliases=('subtract',),\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),\n assert_autodiffed=True,\n sample_inputs_func=partial(sample_inputs_binary_pwise, alpha=2),\n supports_inplace_autograd=False),\n OpInfo('addmm',\n # This addmm OpInfo is for when alpha and beta are not both equal to 1.\n # alpha=beta=1 is tested in the following opinfo, because that special case will\n # trigger addmm being decomposed by a jit pass.\n dtypes=floating_and_complex_types_and(torch.float16),\n dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n supports_inplace_autograd=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_addmm),\n OpInfo('addmm',\n # When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add.\n variant_test_name='decomposed',\n dtypes=floating_and_complex_types_and(torch.float16),\n dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n supports_inplace_autograd=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n autodiff_nonfusible_nodes=['aten::add', 'aten::mm'],\n sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1)),\n OpInfo('addmv',\n dtypes=floating_types(),\n dtypesIfCPU=all_types_and_complex_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,\n *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_types_and(torch.half),\n supports_inplace_autograd=False,\n skips=(\n # issue may fix: https://github.com/pytorch/pytorch/issues/55589\n # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.\n SkipInfo('TestCommon', 'test_out', dtypes=(torch.float32,)),\n # Reference: https://github.com/pytorch/pytorch/issues/55589\n SkipInfo('TestCommon', 'test_variant_consistency_eager'),\n ),\n sample_inputs_func=sample_inputs_addmv),\n OpInfo('addbmm',\n dtypes=floating_types(),\n dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_types_and(torch.half),\n skips=(\n # addbmm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),\n # https://github.com/pytorch/pytorch/issues/55907\n SkipInfo('TestCommon', 'test_variant_consistency_eager'),\n SkipInfo('TestOpInfo', 'test_supported_backward', dtypes=(torch.bfloat16, ),\n device_type='cuda', active_if=not SM53OrLater)),\n sample_inputs_func=sample_inputs_addbmm),\n OpInfo('baddbmm',\n dtypes=floating_types_and(torch.half),\n dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128,\n *[torch.bfloat16] if CUDA11OrLater else []),\n skips=(\n # baddbmm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),\n SkipInfo('TestOpInfo', 'test_supported_backward', dtypes=(torch.bfloat16, ),\n device_type='cuda', active_if=not SM53OrLater)),\n sample_inputs_func=sample_inputs_baddbmm),\n OpInfo('dot',\n dtypes=all_types_and_complex_and(torch.float16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_dot_vdot),\n OpInfo('vdot',\n dtypes=all_types_and_complex_and(torch.float16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n sample_inputs_func=sample_inputs_dot_vdot),\n OpInfo('bmm',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n skips=(\n # bmm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),\n SkipInfo('TestOpInfo', 'test_supported_backward', dtypes=(torch.bfloat16, ),\n device_type='cuda', active_if=not SM53OrLater)),\n sample_inputs_func=sample_inputs_bmm),\n OpInfo('mv',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n skips=(\n # bmm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n SkipInfo('TestOpInfo', 'test_supported_backward', dtypes=(torch.float16,)),\n # mv calls into addmv which doesn't fully support float16\n # RuntimeError: \"addmv_impl_cpu\" not implemented for 'Half'\n SkipInfo('TestOpInfo', 'test_supported_dtypes', dtypes=(torch.float16,)),),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_mv),\n OpInfo('addr',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n backward_dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool),\n # Reference: https://github.com/pytorch/pytorch/issues/50747\n supports_inplace_autograd=False,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/50747\n SkipInfo('TestCommon', 'test_variant_consistency_eager',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),),\n sample_inputs_func=sample_inputs_addr,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('addcmul',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16),\n assert_autodiffed=True,\n supports_inplace_autograd=False,\n skips=(\n # TODO: update sample inputs with for_inplace_variant kwarg to support this test\n SkipInfo('TestCommon', 'test_variant_consistency_eager'),),\n sample_inputs_func=sample_inputs_addcmul_addcdiv),\n OpInfo('addcdiv',\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n skips=(\n # TODO: update sample inputs with for_inplace_variant kwarg to support this test\n SkipInfo('TestCommon', 'test_variant_consistency_eager'),),\n sample_inputs_func=sample_inputs_addcmul_addcdiv),\n OpInfo('amax',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_amax_amin,),\n OpInfo('amin',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_amax_amin),\n OpInfo('argmax',\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_argmax_argmin,),\n OpInfo('argmin',\n dtypes=all_types_and(torch.float16, torch.bfloat16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_argmax_argmin,),\n UnaryUfuncInfo('asin',\n aliases=('arcsin', ),\n ref=np.arcsin,\n domain=(-1, 1),\n supports_sparse=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n safe_casts_outputs=True,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"rsqrt_cpu\" not implemented for 'BFloat16'\n backward_dtypesIfCPU=all_types_and_complex_and(torch.bool),\n assert_autodiffed=True,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS)\n )),\n # NOTE: derivative for inplace asinh is not implemented\n UnaryUfuncInfo('asinh',\n aliases=('arcsinh', ),\n ref=np.arcsinh,\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"rsqrt_cuda\" not implemented for 'BFloat16'\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n supports_inplace_autograd=False,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cdouble],\n active_if=IS_WINDOWS),\n )),\n UnaryUfuncInfo('atan',\n aliases=('arctan', ),\n ref=np.arctan,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n safe_casts_outputs=True,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n )),\n OpInfo('atan2',\n dtypes=all_types_and(torch.bool),\n dtypesIfCPU=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_atan2,\n ),\n UnaryUfuncInfo('atanh',\n aliases=('arctanh', ),\n ref=np.arctanh,\n domain=(-1, 1),\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n supports_inplace_autograd=False,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.cfloat],\n active_if=IS_WINDOWS),\n )),\n OpInfo('broadcast_to',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_broadcast_to),\n UnaryUfuncInfo('bitwise_not',\n ref=np.bitwise_not,\n dtypes=integral_types_and(torch.bool),\n supports_autograd=False),\n OpInfo('cdist',\n dtypes=floating_types(),\n supports_out=False,\n supports_gradgrad=False,\n sample_inputs_func=sample_inputs_cdist),\n UnaryUfuncInfo('ceil',\n ref=np.ceil,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True),\n OpInfo('cholesky',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_cholesky,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('cholesky_inverse',\n dtypes=floating_and_complex_types(),\n backward_dtypes=floating_types(),\n # TODO: RuntimeError: cholesky_inverse does not support automatic differentiation for outputs\n # with complex dtype.\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_cholesky_inverse,\n gradcheck_wrapper=gradcheck_wrapper_triangular_input,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n skips=(\n # cholesky_inverse does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),)),\n OpInfo('chunk',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_chunk,\n supports_out=False),\n OpInfo('symeig',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_symeig,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)\n ),\n # NOTE: clamp has seperate opinfos for scalar min/max (unary op) vs. tensors\n OpInfo('clamp',\n aliases=('clip',),\n dtypes=all_types_and(torch.half, torch.bfloat16),\n dtypesIfCPU=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_clamp),\n UnaryUfuncInfo('clamp',\n variant_test_name='scalar',\n aliases=('clip', ),\n decorators=(precisionOverride({torch.bfloat16: 7e-2, torch.float16: 1e-2}),),\n ref=np.clip,\n dtypes=all_types_and(torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/54841\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n ),\n sample_kwargs=sample_kwargs_clamp_scalar,\n sample_inputs_func=sample_inputs_clamp_scalar),\n UnaryUfuncInfo('positive',\n ref=np.positive,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n supports_out=False,\n ),\n UnaryUfuncInfo('conj',\n ref=np.conj,\n dtypes=all_types_and_complex_and(torch.bool,\n torch.bfloat16, torch.half),\n skips=(\n # File \"test_unary_ufuncs.py\", line 289, in test_reference_numerics\n # if not torch.can_cast(numpy_to_torch_dtype_dict[expected.dtype.type], dtype):\n # KeyError: <class 'numpy.intc'>\n # Following error in Windows CI\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.int],\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.int],\n active_if=IS_WINDOWS),\n )),\n OpInfo('view_as_real',\n dtypes=complex_types(),\n sample_inputs_func=sample_inputs_view_as_real,\n ),\n OpInfo('view_as_complex',\n dtypes=floating_types_and(torch.half),\n supports_out=False,\n skips=(\n # \"sum_cpu/sum_cuda\" not implemented for 'ComplexHalf'\n SkipInfo('TestOpInfo', 'test_supported_backward', dtypes=(torch.half,)),\n ),\n sample_inputs_func=sample_inputs_view_as_complex),\n OpInfo('complex',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_complex,\n ),\n OpInfo('copysign',\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_copysign,\n supports_inplace_autograd=False,\n ),\n UnaryUfuncInfo('cos',\n ref=np.cos,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n assert_autodiffed=True,\n handles_large_floats=False,\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),\n )),\n UnaryUfuncInfo('cosh',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/48641\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.int8]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', device_type='cpu',\n dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),\n )),\n OpInfo('cumsum',\n dtypesIfCPU=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half),\n skips=(\n # \"cumsum_out_{cpu,cuda}\" not implemented for 'Bool'\n SkipInfo('TestOpInfo', 'test_supported_dtypes',\n dtypes=(torch.bool,)),\n # cumsum does not handle correctly out= dtypes\n SkipInfo('TestCommon', 'test_out'),\n ),\n sample_inputs_func=sample_inputs_cumulative_ops),\n OpInfo('cumprod',\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16),\n skips=(\n # \"cumprod_out_{cpu, cuda}\" not implemented for 'Bool'\n SkipInfo('TestOpInfo', 'test_supported_dtypes',\n dtypes=(torch.bool,)),\n # cumprod does not handle correctly out= dtypes\n SkipInfo('TestCommon', 'test_out',\n dtypes=[torch.float32]),\n ),\n # gradgradcheck fails in fast_mode=True: #56275\n sample_inputs_func=sample_inputs_cumprod,\n gradcheck_fast_mode=False),\n OpInfo('cummax',\n dtypesIfCPU=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('cummin',\n dtypesIfCPU=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False),\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n UnaryUfuncInfo('deg2rad',\n ref=np.radians,\n decorators=(precisionOverride({torch.bfloat16: 7e-1,\n torch.float16: 7e-1}),),\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n ),\n safe_casts_outputs=True),\n OpInfo('diff',\n op=torch.diff,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_diff),\n OpInfo('div',\n variant_test_name='no_rounding_mode',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_div,\n skips=(SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n assert_autodiffed=True),\n OpInfo('div',\n variant_test_name='true_rounding',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_div, rounding_mode=None),\n skips=(SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n assert_autodiffed=True),\n OpInfo('div',\n variant_test_name='trunc_rounding',\n dtypes=all_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_div, rounding_mode='trunc'),\n skips=(SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n assert_autodiffed=True),\n OpInfo('div',\n variant_test_name='floor_rounding',\n dtypes=all_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=partial(sample_inputs_div, rounding_mode='floor'),\n skips=(SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n assert_autodiffed=True),\n UnaryUfuncInfo('exp',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.exp),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=[torch.bfloat16]),\n # Reference: https://github.com/pytorch/pytorch/issues/48010\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n ),\n assert_autodiffed=True,\n safe_casts_outputs=True),\n OpInfo('expand',\n op=lambda self, shape: self.expand(shape),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_expand,\n skips=(\n # Because expand does not have a function variant.\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),),\n supports_out=False),\n OpInfo('expand_as',\n op=lambda self, other: self.expand_as(other),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_expand_as,\n skips=(\n # Because expand_as does not have a function variant.\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),),\n supports_out=False),\n OpInfo('diag',\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCPU=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_diag),\n OpInfo('eq',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('fmax',\n op=torch.fmax,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,),\n OpInfo('fmin',\n op=torch.fmin,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,),\n UnaryUfuncInfo('frac',\n ref=lambda x: np.modf(x)[0],\n dtypes=floating_types_and(torch.bfloat16, torch.float16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n assert_autodiffed=True,\n # Reference for disabling extremals\n # https://github.com/pytorch/pytorch/issues/51948\n handles_extremals=False),\n SpectralFuncInfo('fft.fft',\n aten_name='fft_fft',\n ref=np.fft.fft,\n ndimensional=False,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types()),\n SpectralFuncInfo('fft.fftn',\n aten_name='fft_fftn',\n ref=np.fft.fftn,\n ndimensional=True,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n decorators=[precisionOverride(\n {torch.float: 1e-4, torch.cfloat: 1e-4})],),\n SpectralFuncInfo('fft.hfft',\n aten_name='fft_hfft',\n ref=np.fft.hfft,\n ndimensional=False,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False),\n SpectralFuncInfo('fft.rfft',\n aten_name='fft_rfft',\n ref=np.fft.rfft,\n ndimensional=False,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False),\n SpectralFuncInfo('fft.rfftn',\n aten_name='fft_rfftn',\n ref=np.fft.rfftn,\n ndimensional=True,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[precisionOverride({torch.float: 1e-4})],),\n SpectralFuncInfo('fft.ifft',\n aten_name='fft_ifft',\n ref=np.fft.ifft,\n ndimensional=False,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types()),\n SpectralFuncInfo('fft.ifftn',\n aten_name='fft_ifftn',\n ref=np.fft.ifftn,\n ndimensional=True,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n SpectralFuncInfo('fft.ihfft',\n aten_name='fft_ihfft',\n ref=np.fft.ihfft,\n ndimensional=False,\n dtypes=all_types_and(torch.bool),\n default_test_dtypes=floating_types(),\n check_batched_grad=False),\n SpectralFuncInfo('fft.irfft',\n aten_name='fft_irfft',\n ref=np.fft.irfft,\n ndimensional=False,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False),\n SpectralFuncInfo('fft.irfftn',\n aten_name='fft_irfftn',\n ref=np.fft.irfftn,\n ndimensional=True,\n dtypes=all_types_and_complex_and(torch.bool),\n default_test_dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n decorators=[\n DecorateInfo(\n precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4}),\n 'TestFFT', 'test_reference_nd')],\n ),\n UnaryUfuncInfo('floor',\n ref=np.floor,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True),\n OpInfo('flip',\n op=torch.flip,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_flip,\n supports_out=False),\n OpInfo('fliplr',\n op=torch.fliplr,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_fliplr_flipud,\n supports_out=False),\n OpInfo('flipud',\n op=torch.flipud,\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_fliplr_flipud,\n supports_out=False),\n UnaryUfuncInfo('i0',\n ref=np.i0,\n decorators=(precisionOverride({torch.bfloat16: 3e-1,\n torch.float16: 5e-1}),),\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_autograd=False),\n UnaryUfuncInfo('special.i0e',\n aten_name='special_i0e',\n ref=scipy.special.i0e if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.bfloat16: 3e-1,\n torch.float16: 3e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n supports_autograd=False,\n safe_casts_outputs=True),\n OpInfo('floor_divide',\n dtypes=all_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_floor_divide,\n decorators=[_wrap_warn_once(\"floor_divide is deprecated, and will be removed\")],\n skips=(\n # `test_duplicate_method_tests` doesn't raise any warning, as it doesn't actually\n # call the operator.\n SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n supports_autograd=False,\n ),\n UnaryUfuncInfo('frexp',\n op=torch.frexp,\n ref=np.frexp,\n dtypes=floating_types_and(torch.half),\n # skip testing torch.frexp as it is not supported by ROCm platform yet\n decorators=[skipCUDAIfRocm],\n supports_out=False,\n skips=(\n # skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs,\n # while theses tests currently requires output to a single tensor.\n SkipInfo('TestUnaryUfuncs', 'test_batch_vs_slicing'),\n SkipInfo('TestUnaryUfuncs', 'test_contig_vs_every_other'),\n SkipInfo('TestUnaryUfuncs', 'test_contig_vs_transposed'),\n SkipInfo('TestUnaryUfuncs', 'test_non_contig_expand'),\n SkipInfo('TestUnaryUfuncs', 'test_variant_consistency'),\n\n # skips test_reference_numerics due to error in Windows CI.\n # The np.frexp returns exponent as np.intc dtype on Windows platform,\n # and np.intc does not have the correspond torch dtype\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n active_if=IS_WINDOWS),\n )),\n OpInfo('ge',\n aliases=('greater_equal',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('geqrf',\n dtypes=floating_and_complex_types(),\n dtypesIfCPU=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_geqrf,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),\n OpInfo('gt',\n aliases=('greater',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n UnaryUfuncInfo('imag',\n ref=np.imag,\n dtypes=complex_types(),\n supports_out=False,\n supports_autograd=False,\n skips=(\n # Skip since real and imag don't have out variants.\n SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes'),\n )),\n OpInfo('gradient',\n dtypes=floating_and_complex_types_and(torch.int8, torch.int16,\n torch.int32, torch.int64,\n torch.bfloat16, torch.half),\n supports_out=False,\n skips=(\n # following tests give a runtime error with undefined value tensor\n # see discussion : https://github.com/pytorch/pytorch/issues/56660\n SkipInfo('TestCommon', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)),\n ),\n supports_inplace_autograd=False,\n sample_inputs_func=sample_inputs_gradient),\n OpInfo('inverse',\n op=torch.inverse,\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('le',\n aliases=('less_equal',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('linalg.det',\n op=torch.linalg.det,\n aliases=('det', ),\n dtypes=floating_and_complex_types(),\n # det doesn't support complex autograd, https://github.com/pytorch/pytorch/issues/57358\n backward_dtypes=floating_types(),\n aten_name='linalg_det',\n sample_inputs_func=sample_inputs_linalg_det,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n supports_inplace_autograd=False,\n skips=(\n # The following tests fail only on ROCm. This is probably\n # related to the fact that the current linalg.det backward is\n # unstable if the matrix has repeated singular values, see\n # https://github.com/pytorch/pytorch/issues/53364\n SkipInfo('TestGradients', 'test_fn_grad', device_type='cuda',\n dtypes=(torch.float64,), active_if=TEST_WITH_ROCM),\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda',\n dtypes=(torch.float64,), active_if=TEST_WITH_ROCM),\n SkipInfo('TestCommon', 'test_variant_consistency_jit', device_type='cuda',\n dtypes=(torch.float64, torch.float32), active_if=TEST_WITH_ROCM),\n )),\n OpInfo('linalg.cholesky',\n aten_name='linalg_cholesky',\n dtypes=floating_and_complex_types(),\n # TODO: RuntimeError: While computing batched gradients,\n # got: vmap: Calling Tensor.as_strided is not supported\n # unless the batch dims being vmapped over are at the front of the tensor (in memory layout).\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_cholesky,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)\n ),\n OpInfo('linalg.cholesky_ex',\n aten_name='linalg_cholesky_ex',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_cholesky,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)\n ),\n OpInfo('linalg.eig',\n aten_name='linalg_eig',\n op=torch.linalg.eig,\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.eigvals',\n aten_name='linalg_eigvals',\n op=torch.linalg.eigvals,\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.eigh',\n aten_name='linalg_eigh',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_eigh,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)\n ),\n OpInfo('linalg.eigvalsh',\n aten_name='linalg_eigvalsh',\n dtypes=floating_and_complex_types(),\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_eigh,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],),\n OpInfo('linalg.householder_product',\n aten_name='linalg_householder_product',\n op=torch.linalg.householder_product,\n aliases=('orgqr', ),\n dtypes=floating_and_complex_types(),\n # TODO: backward uses in-place operations that vmap doesn't like\n check_batched_grad=False,\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_householder_product,\n decorators=[skipCUDAIfNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack,\n # gradgrad checks are slow\n DecorateInfo(slowTest, 'TestGradients', 'test_fn_gradgrad'), ]),\n OpInfo('linalg.lstsq',\n aten_name='linalg_lstsq',\n op=torch.linalg.lstsq,\n dtypes=floating_and_complex_types(),\n supports_out=True,\n sample_inputs_func=sample_inputs_linalg_lstsq,\n check_batched_grad=False,\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n skips=(\n # skip because `linalg_lstsq` is not differentiable\n SkipInfo('TestGradients', 'test_fn_grad'),\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),\n )),\n OpInfo('linalg.matrix_power',\n aliases=('matrix_power',),\n aten_name='linalg_matrix_power',\n dtypes=floating_and_complex_types(),\n supports_inplace_autograd=False,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, skipCUDAIfRocm],\n sample_inputs_func=sample_inputs_linalg_matrix_power,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('linalg.multi_dot',\n # Need this lambda because gradcheck does not work with TensorList inputs\n aten_name='linalg_multi_dot',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, *[torch.bfloat16] if CUDA11OrLater else []),\n supports_inplace_autograd=False,\n # Batched grad checks fail for empty input tensors (see https://github.com/pytorch/pytorch/issues/53407)\n check_batched_grad=False,\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_multi_dot,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('linalg.norm',\n op=torch.linalg.norm,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n sample_inputs_func=sample_inputs_linalg_norm,\n aten_name='linalg_norm',\n skips=(\n # linalg.norm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('linalg.matrix_norm',\n aten_name='linalg_matrix_norm',\n dtypes=floating_and_complex_types(),\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n sample_inputs_func=sample_inputs_linalg_matrix_norm,\n skips=(\n # linalg.matrix_norm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('linalg.qr',\n aten_name='linalg_qr',\n op=torch.linalg.qr,\n dtypes=floating_and_complex_types(),\n # batched gradients do not work for empty inputs\n # https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_qr,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('linalg.slogdet',\n aten_name='linalg_slogdet',\n op=torch.linalg.slogdet,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_slogdet,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.vector_norm',\n op=torch.linalg.vector_norm,\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],\n sample_inputs_func=sample_inputs_linalg_vector_norm,\n aten_name='linalg_vector_norm',\n skips=(\n # linalg.vector_norm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n UnaryUfuncInfo('log',\n ref=np.log,\n domain=(0, float('inf')),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n )),\n UnaryUfuncInfo('log10',\n ref=np.log10,\n domain=(0, float('inf')),\n decorators=(precisionOverride({torch.bfloat16: 5e-2}),),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n assert_autodiffed=True,\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_WINDOWS),\n )),\n UnaryUfuncInfo('log1p',\n ref=np.log1p,\n domain=(-1, float('inf')),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n decorators=(precisionOverride({torch.bfloat16: 1e-1}),),\n safe_casts_outputs=True,\n assert_autodiffed=True),\n UnaryUfuncInfo('log2',\n ref=np.log2,\n domain=(0, float('inf')),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-1}),),\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.cfloat, torch.cdouble]),\n )),\n OpInfo('logaddexp',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.bfloat16),\n dtypesIfROCM=floating_types_and(torch.bfloat16),\n sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:\n (SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),\n OpInfo('logaddexp2',\n dtypes=floating_types(),\n dtypesIfCUDA=floating_types_and(torch.bfloat16),\n dtypesIfROCM=floating_types_and(torch.bfloat16),\n sample_inputs_func=lambda op_info, device, dtype, requires_grad=False, **kwargs:\n (SampleInput(make_tensor((S, S), device, dtype, requires_grad=requires_grad),\n args=(make_tensor((S, S), device, dtype, requires_grad=requires_grad),)),)),\n UnaryUfuncInfo('logical_not',\n ref=np.logical_not,\n decorators=(precisionOverride({torch.bfloat16: 7e-1,\n torch.float16: 5e-1}),),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n supports_autograd=False,\n skips=(\n # The function variant always returns BoolTensor\n # while the inplace variant preserves the input dtype.\n # >>> t = torch.randn(3)\n # >>> torch.logical_not(t)\n # tensor([False, False, False])\n # >>> torch.logical_not(t).dtype\n # torch.bool\n # >>> t.logical_not_().dtype\n # torch.float32\n SkipInfo('TestUnaryUfuncs', 'test_variant_consistency',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),\n SkipInfo('TestCommon', 'test_variant_consistency_eager',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),\n )),\n OpInfo('lt',\n aliases=('less',),\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('lu',\n op=torch.lu,\n dtypes=floating_and_complex_types(),\n supports_inplace_autograd=False,\n check_batched_gradgrad=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_lu,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),\n # we skip jit tests because lu_backward is impelemented as autograd.Function,\n # which does not support autograd with scripting\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),\n # Skip operator schema test because this is a functional and not an operator\n SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )),\n OpInfo('lu_unpack',\n op=torch.lu_unpack,\n dtypes=floating_and_complex_types(),\n supports_inplace_autograd=False,\n # we use in-place operations which cannot be avoided.\n # This cases vmap failures, hence we skip batched gradient checks\n check_batched_grad=False,\n supports_out=True,\n sample_inputs_func=sample_inputs_lu_unpack,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),\n )),\n OpInfo('masked_fill',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_fill,\n supports_out=False),\n OpInfo('masked_scatter',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_scatter,\n supports_out=False),\n OpInfo('masked_select',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_masked_select),\n OpInfo('matrix_exp',\n dtypesIfCPU=floating_and_complex_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_matrix_exp,\n supports_out=False),\n OpInfo('matmul',\n dtypes=floating_types(),\n dtypesIfCPU=all_types_and_complex(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_matmul,\n skips=(\n # matmul does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n # https://github.com/pytorch/pytorch/issues/55754\n SkipInfo('TestGradients', 'test_fn_grad',\n device_type='cpu', dtypes=(torch.complex128,)),\n # https://github.com/pytorch/pytorch/issues/55755\n SkipInfo('TestOpInfo', 'test_unsupported_dtypes',\n device_type='cpu', dtypes=(torch.float16,)),\n # Backward for BFloat16 isn't supported because of the error\n # \"RuntimeError: CUDA error: CUBLAS_STATUS_NOT_SUPPORTED when\n # calling cublasGemmStridedBatchedExFix.\"\n SkipInfo('TestOpInfo', 'test_supported_backward',\n device_type='cuda', dtypes=(torch.bfloat16,)),)),\n OpInfo('max',\n op=torch.max,\n variant_test_name='binary',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,\n assert_autodiffed=True,),\n OpInfo('max',\n op=torch.max,\n variant_test_name='reduction_with_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_reduction_with_dim,\n skips=(\n # max does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),)),\n OpInfo('max',\n op=torch.max,\n variant_test_name='reduction_no_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_out=False,\n sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),\n OpInfo('min',\n op=torch.min,\n variant_test_name='binary',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,\n assert_autodiffed=True,),\n OpInfo('min',\n op=torch.min,\n variant_test_name='reduction_with_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_reduction_with_dim,\n skips=(\n # min does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('min',\n op=torch.min,\n variant_test_name='reduction_no_dim',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n supports_out=False,\n sample_inputs_func=sample_inputs_max_min_reduction_no_dim,),\n OpInfo('sum',\n dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool),\n supports_out=False,\n sample_inputs_func=sample_inputs_reduction_wrapper(supports_multiple_dims=True)),\n OpInfo('nansum',\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n dtypesIfCPU=all_types_and(torch.float16, torch.bool),\n supports_out=False,\n sample_inputs_func=sample_inputs_reduction_wrapper(supports_multiple_dims=True)),\n # TODO(@heitorschueroff) Add test for dtype kwarg\n OpInfo('mean',\n dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_reduction_wrapper(supports_multiple_dims=True),\n # Need to skip out test because one of the overload for mean does not support it\n # TODO(@heitorschueroff) fix this when implementing ReductionInfo\n skips=(SkipInfo('TestCommon', 'test_out'),)),\n OpInfo('quantile',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_reduction_quantile),\n OpInfo('nanquantile',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_reduction_quantile),\n OpInfo('maximum',\n op=torch.maximum,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,),\n OpInfo('minimum',\n op=torch.minimum,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_max_min_binary,),\n OpInfo('nn.functional.hardswish',\n supports_autograd=True,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_hardswish,\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n supports_gradgrad=False,\n supports_out=False,\n autodiff_fusible_nodes=[\"aten::hardswish\"]),\n OpInfo('topk',\n dtypes=all_types(),\n dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),\n sample_inputs_func=sample_inputs_topk,\n skips=(\n # Topk is not raising a warning when the out is resized\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('mm',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCPU=all_types_and_complex_and(torch.float16, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_mm,\n skips=(\n # mm does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('mode',\n op=torch.mode,\n dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_mode,),\n MvlGammaInfo(variant_test_name='mvlgamma_p_1',\n domain=(1e-4, float('inf')),\n skips=skips_mvlgamma(),\n sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})),\n MvlGammaInfo(variant_test_name='mvlgamma_p_3',\n domain=(1.1, float('inf')),\n skips=skips_mvlgamma(skip_redundant=True) + (\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=(torch.float16,)),\n ),\n sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})),\n MvlGammaInfo(variant_test_name='mvlgamma_p_5',\n domain=(2.1, float('inf')),\n skips=skips_mvlgamma(skip_redundant=True) + (\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=(torch.float16,)),\n ),\n sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})),\n OpInfo('ne',\n aliases=('not_equal',),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_comparison_ops),\n OpInfo('narrow',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n sample_inputs_func=sample_inputs_narrow),\n UnaryUfuncInfo('neg',\n aliases=('negative', ),\n ref=np.negative,\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,),\n OpInfo('dist',\n op=torch.dist,\n dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),\n # \"pow\" not implemented for 'BFloat16' or 'half'\n backward_dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_dist,\n skips=(\n # dist does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('outer',\n op=torch.outer,\n aliases=('ger', ),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_outer,),\n OpInfo('ormqr',\n op=torch.ormqr,\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_ormqr,\n decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]),\n OpInfo('permute',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_permute),\n OpInfo('pow',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),\n # Due to AVX2 curently not being fully supported for Float16, log_vml_cpu can't be enabled\n # for Float16, causing this test to fail. pow's autograd for Float16 is thus currently\n # unsupported on CPU.\n backward_dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_pow,\n supports_inplace_autograd=False,\n assert_autodiffed=True),\n OpInfo('float_power',\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),\n sample_inputs_func=sample_inputs_pow),\n OpInfo('prod',\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n # \"cumprod_cuda\" not implemented for 'BFloat16'\n backward_dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16),\n skips=(\n # prod does not support the (Tensor, *, out) overload\n SkipInfo('TestCommon', 'test_out',\n dtypes=[torch.float32]),\n ),\n sample_inputs_func=sample_inputs_prod,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('qr',\n op=torch.qr,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_qr,\n # batched gradients do not work for empty inputs\n # https://github.com/pytorch/pytorch/issues/50743#issuecomment-767376085\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n UnaryUfuncInfo('rad2deg',\n ref=np.degrees,\n decorators=(precisionOverride({torch.bfloat16: 7e-1,\n torch.float16: 7e-1}),),\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n ),\n safe_casts_outputs=True),\n UnaryUfuncInfo('real',\n ref=np.real,\n dtypes=complex_types(),\n supports_out=False,\n supports_autograd=False,\n skips=(\n # Skip since real and imag don't have out variants.\n SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes'),\n )),\n OpInfo('roll',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n sample_inputs_func=sample_inputs_roll),\n OpInfo('rot90',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n sample_inputs_func=sample_inputs_rot90),\n UnaryUfuncInfo('round',\n ref=np.round,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n assert_autodiffed=True,),\n UnaryUfuncInfo('sin',\n ref=np.sin,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n handles_large_floats=False,\n handles_complex_extremals=False,\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),)),\n UnaryUfuncInfo('sinc',\n ref=np_sinc_with_fp16_as_fp32,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n handles_large_floats=False,\n handles_complex_extremals=False,\n safe_casts_outputs=True,\n decorators=(precisionOverride({torch.bfloat16: 1e-2,\n torch.float16: 1e-2}),),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/49133\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.cfloat]),\n )),\n UnaryUfuncInfo('sinh',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n decorators=(precisionOverride({torch.float16: 1e-2}),),\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n # Reference: https://github.com/pytorch/pytorch/issues/48641\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.int8]),\n )),\n UnaryUfuncInfo('sign',\n ref=reference_sign,\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/41245\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),\n )),\n UnaryUfuncInfo('sgn',\n ref=reference_sgn,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/41245\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),\n # Reference: https://github.com/pytorch/pytorch/issues/53958\n # Test fails in comparison on Nan as the `equal_nan` is True for\n # comparing the CPU tensors.\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.complex64, torch.complex128]),\n # Reference: https://github.com/pytorch/pytorch/issues/48486\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.complex64])\n )),\n OpInfo('__radd__',\n op=torch.Tensor.__radd__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::add'],),\n OpInfo('__rdiv__',\n op=torch.Tensor.__rdiv__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],),\n OpInfo('__rmul__',\n op=torch.Tensor.__rmul__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::mul'],),\n OpInfo('__rpow__',\n op=torch.Tensor.__rpow__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/54774\n # \"log2\" \"_vml_cpu\" not implemented for Half\n SkipInfo('TestOpInfo', 'test_supported_backward', device_type='cpu',\n dtypes=(torch.float16,)),\n\n SkipInfo('TestCommon', 'test_variant_consistency_jit',),),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::pow'],),\n OpInfo('__rsub__',\n op=torch.Tensor.__rsub__,\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n sample_inputs_func=sample_inputs_rbinops,\n supports_out=False,\n skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),\n assert_autodiffed=True,\n autodiff_nonfusible_nodes=['aten::rsub'],),\n OpInfo('rsub',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n variant_test_name='rsub_tensor',\n supports_out=False,\n supports_inplace_autograd=False,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/53797\n # JIT doesn't understand complex literals\n SkipInfo('TestCommon', 'test_variant_consistency_jit',\n dtypes=[torch.cfloat, torch.cdouble]),\n ),\n sample_inputs_func=partial(sample_inputs_rsub, variant='tensor'),),\n OpInfo('rsub',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),\n variant_test_name='rsub_scalar',\n supports_out=False,\n supports_inplace_autograd=False,\n sample_inputs_func=partial(sample_inputs_rsub, variant='scalar'),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/53797\n # JIT doesn't understand complex literals\n SkipInfo('TestCommon', 'test_variant_consistency_jit',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half)),),\n assert_autodiffed=True,),\n OpInfo('select',\n dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),\n sample_inputs_func=sample_inputs_select,\n supports_out=False),\n UnaryUfuncInfo('signbit',\n ref=np.signbit,\n dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),\n supports_autograd=False,),\n OpInfo('solve',\n op=torch.solve,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_legacy_solve,\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n skips=(SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('std',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n # std doesn't support complex autograd, https://github.com/pytorch/pytorch/issues/57358\n backward_dtypesIfCPU=floating_types_and(torch.half),\n backward_dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_std_var,\n # TODO: std does support out in some signatures\n supports_out=False,\n assert_autodiffed=True,\n ),\n UnaryUfuncInfo('tan',\n ref=np.tan,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.float64],\n active_if=TEST_WITH_ROCM),\n )),\n UnaryUfuncInfo('tanh',\n ref=np.tanh,\n decorators=(precisionOverride({torch.bfloat16: 1e-2}),),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # \"tanh_backward_cpu\" not implemented for 'BFloat16'\n backward_dtypesIfCPU=all_types_and_complex_and(torch.bool),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=(IS_MACOS or IS_WINDOWS)),\n )),\n OpInfo('tensor_split',\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCPU=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n skips=(SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n sample_inputs_func=sample_inputs_tensor_split,),\n OpInfo('hsplit',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n sample_inputs_func=sample_inputs_hsplit,),\n OpInfo('vsplit',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n sample_inputs_func=sample_inputs_vsplit,),\n OpInfo('dsplit',\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n supports_out=False,\n sample_inputs_func=sample_inputs_dsplit,),\n OpInfo('triangular_solve',\n op=torch.triangular_solve,\n dtypes=floating_and_complex_types(),\n supports_out=False,\n sample_inputs_func=sample_inputs_legacy_solve,\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n # CUDA gradchecks are slow and triangular solve backward is a composite operation\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n skips=(SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n UnaryUfuncInfo('trunc',\n aliases=('fix', ),\n ref=np.trunc,\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16),\n assert_autodiffed=True),\n UnaryUfuncInfo('exp2',\n aliases=('special.exp2', ),\n ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2),\n dtypes=all_types_and(torch.bool, torch.half),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True),\n UnaryUfuncInfo('expm1',\n aliases=('special.expm1', ),\n ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n )),\n UnaryUfuncInfo('nan_to_num',\n ref=np.nan_to_num,\n dtypes=all_types_and(torch.half, torch.bool),\n dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16),\n # Passing numpy_kwargs via sample_kwargs, as numpy does comparison\n # with BFloat16 in float, since it currently doesn't support BFloat16.\n # Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556\n sample_kwargs=lambda device, dtype, input: ({},\n {'posinf': torch.finfo(torch.bfloat16).max,\n 'neginf': torch.finfo(torch.bfloat16).min})\n if dtype is torch.bfloat16 else ({}, {})),\n UnaryUfuncInfo('reciprocal',\n ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal),\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True,\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/45690\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.cfloat, torch.cdouble]),\n # Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.bfloat16]),\n )),\n UnaryUfuncInfo('rsqrt',\n ref=lambda x: np.reciprocal(np.sqrt(x)),\n domain=(0, float('inf')),\n dtypes=all_types_and_complex_and(torch.bool),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n decorators=(precisionOverride({torch.half: 5e-2}),),\n safe_casts_outputs=True,\n assert_autodiffed=True,\n handles_complex_extremals=False),\n UnaryUfuncInfo('sqrt',\n ref=np.sqrt,\n supports_sparse=True,\n domain=(0, float('inf')),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n decorators=(precisionOverride({torch.bfloat16: 7e-2}),),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/47358\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],\n active_if=IS_MACOS),\n # Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16])),\n safe_casts_outputs=True,\n handles_complex_extremals=False),\n UnaryUfuncInfo('square',\n ref=np.square,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/52549\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.cfloat, torch.cdouble]),\n # >>> t = torch.tensor(complex(-0.01, float(\"inf\")))\n # >>> np.square(t.numpy())\n # (-inf-infj)\n # >>> t.square()\n # tensor(-inf-infj)\n # >>> t.cuda().square()\n # tensor(inf+nanj, device='cuda:0')\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),\n # Reference: https://github.com/pytorch/pytorch/pull/52551#issuecomment-782596181\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16]),\n ),),\n OpInfo('lerp',\n dtypes=floating_and_complex_types(),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_lerp,\n assert_autodiffed=True),\n OpInfo('linalg.inv',\n aten_name='linalg_inv',\n op=torch.linalg.inv,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_invertible,\n check_batched_gradgrad=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # linalg_inv does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n UnaryUfuncInfo('angle',\n ref=np.angle,\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2}),),\n safe_casts_outputs=True,\n supports_complex_to_float=True),\n OpInfo('linalg.solve',\n aten_name='linalg_solve',\n op=torch.linalg.solve,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_solve,\n check_batched_gradgrad=False,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('linalg.matrix_rank',\n aten_name='linalg_matrix_rank',\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.matrix_rank',\n aten_name='linalg_matrix_rank',\n variant_test_name='hermitian',\n dtypes=floating_and_complex_types(),\n supports_autograd=False,\n sample_inputs_func=sample_inputs_linalg_pinv_hermitian,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack]),\n OpInfo('linalg.pinv',\n aten_name='linalg_pinv',\n op=torch.linalg.pinv,\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('linalg.pinv',\n aten_name='linalg_pinv',\n variant_test_name='hermitian',\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n sample_inputs_func=sample_inputs_linalg_pinv_hermitian,\n gradcheck_wrapper=gradcheck_wrapper_hermitian_input,\n decorators=[skipCUDAIfNoMagma, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('eig',\n op=torch.eig,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_eig,\n decorators=[\n skipCUDAIfNoMagma,\n skipCPUIfNoLapack,\n skipCUDAIfRocm\n ],),\n OpInfo('einsum',\n # we need this lambda because SampleInput expects tensor input as the first argument\n # TODO(@heitorschueroff) update SampleInput to handle such cases\n op=lambda tensors, equation: torch.einsum(equation, tensors),\n dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half),\n supports_out=False,\n sample_inputs_func=sample_inputs_einsum,\n skips=(\n # test does not work with passing lambda for op\n # there's a test `test_einsum` in `test_jit.py` to handle this case\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),\n # The following dtypes are only supported for some inputs, ideally we should have\n # checked this in the einsum code but to keep BC we'll just skip the tests for now.\n SkipInfo('TestOpInfo', 'test_unsupported_dtypes',\n dtypes=[torch.bool]),\n SkipInfo('TestOpInfo', 'test_unsupported_dtypes',\n device_type='cuda', dtypes=integral_types_and(torch.bfloat16)))),\n OpInfo('svd',\n op=torch.svd,\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_svd,\n decorators=[\n skipCUDAIfNoMagmaAndNoCusolver,\n skipCUDAIfRocm,\n skipCPUIfNoLapack,\n # gradgrad checks are slow\n DecorateInfo(slowTest, 'TestGradients', 'test_fn_gradgrad'),\n ],\n skips=(\n # cuda gradchecks are very slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('linalg.svd',\n op=torch.linalg.svd,\n aten_name='linalg_svd',\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_svd,\n decorators=[\n skipCUDAIfNoMagmaAndNoCusolver,\n skipCUDAIfRocm,\n skipCPUIfNoLapack,\n # gradgrad checks are slow\n DecorateInfo(slowTest, 'TestGradients', 'test_fn_gradgrad'),\n ],\n skips=(\n # cuda gradchecks are very slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('linalg.svdvals',\n op=torch.linalg.svdvals,\n aten_name='linalg_svdvals',\n dtypes=floating_and_complex_types(),\n sample_inputs_func=sample_inputs_linalg_svdvals,\n check_batched_gradgrad=False,\n decorators=[\n skipCUDAIfNoMagmaAndNoCusolver,\n skipCPUIfNoLapack]),\n OpInfo('polar',\n dtypes=floating_types(),\n sample_inputs_func=sample_inputs_polar),\n # TODO(@kshitij12345): Refactor similar to `mvlgamma` entries.\n # To test reference numerics against multiple values of argument `n`,\n # we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4).\n # We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing.\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_0',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Probably related to the way the function is\n # scripted for JIT tests (or maybe not).\n # RuntimeError:\n # Arguments for call are not valid.\n # The following variants are available:\n # aten::polygamma(int n, Tensor self) -> (Tensor):\n # Expected a value of type 'Tensor' for argument 'self' but instead found type 'int'.\n # aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> (Tensor(a!)):\n # Expected a value of type 'Tensor' for argument 'self' but instead found type 'int'.\n # The original call is:\n # File \"<string>\", line 3\n # def the_method(i0):\n # return torch.polygamma(i0, 1)\n # ~~~~~~~~~~~~~~~ <--- HERE\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),),\n sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_1',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n SkipInfo('TestGradients'),\n SkipInfo('TestOpInfo'),\n SkipInfo('TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard'),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal'),\n ),\n sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1})),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_2',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n SkipInfo('TestGradients'),\n SkipInfo('TestOpInfo'),\n SkipInfo('TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_WITH_ROCM),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_WITH_ROCM),),\n sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2})),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_3',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n SkipInfo('TestGradients'),\n SkipInfo('TestOpInfo'),\n SkipInfo('TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_WITH_ROCM),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_WITH_ROCM),),\n sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3})),\n UnaryUfuncInfo('polygamma',\n op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),\n variant_test_name='polygamma_n_4',\n ref=reference_polygamma if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.float16: 5e-4, torch.float32: 5e-4}),),\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_polygamma,\n skips=(\n # Redundant tests\n SkipInfo('TestGradients'),\n SkipInfo('TestOpInfo'),\n SkipInfo('TestCommon'),\n # Mismatch: https://github.com/pytorch/pytorch/issues/55357\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_WITH_ROCM),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_WITH_ROCM),),\n sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4})),\n OpInfo('ravel',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_ravel,\n ),\n OpInfo('reshape',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_view_reshape,\n supports_out=False,\n ),\n OpInfo('reshape_as',\n op=lambda x, other: x.reshape_as(other),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_view_as_reshape_as,\n skips=(\n # Because reshape_as does not have a function variant.\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),),\n supports_out=False,\n ),\n OpInfo('view',\n op=lambda x, shape: x.view(shape),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n skips=(\n # Because view does not have a function variant.\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),),\n sample_inputs_func=sample_inputs_view_reshape,\n ),\n OpInfo('view_as',\n op=lambda x, other: x.view_as(other),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n skips=(\n # Because view_as does not have a function variant.\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),),\n sample_inputs_func=sample_inputs_view_as_reshape_as,\n ),\n OpInfo('pinverse',\n op=torch.pinverse,\n dtypes=floating_and_complex_types(),\n check_batched_grad=False,\n check_batched_gradgrad=False,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n supports_out=False,\n sample_inputs_func=sample_inputs_linalg_invertible,\n decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],\n skips=(\n # cuda gradchecks are slow\n # see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775\n SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),)),\n OpInfo('gather',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_gather,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,\n ),\n OpInfo('index_fill',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n skips=(SkipInfo('TestOpInfo', 'test_duplicate_method_tests'),),\n supports_out=False,\n sample_inputs_func=sample_inputs_index_fill),\n OpInfo('index_copy',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_index_copy,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('index_select',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_index_select,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('index_add',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_inputs_index_add,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n OpInfo('__getitem__',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_inplace_autograd=False,\n op=torch.Tensor.__getitem__,\n sample_inputs_func=sample_inputs_getitem,\n skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit'),)),\n OpInfo('index_put',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n supports_inplace_autograd=True,\n sample_inputs_func=sample_inputs_index_put,\n skips=(\n SkipInfo('TestCommon', 'test_variant_consistency_jit'),\n )),\n OpInfo('sort',\n dtypes=all_types_and(torch.bool, torch.float16),\n dtypesIfCUDA=all_types_and(torch.float16),\n dtypesIfROCM=all_types_and(torch.float16),\n sample_inputs_func=sample_inputs_sort,\n skips=(\n # sort does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),\n )),\n OpInfo('put',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n check_batched_gradgrad=False, # vmap complains of the sizes\n sample_inputs_func=sample_inputs_put),\n OpInfo('take',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n check_batched_grad=False, # vmap complains of the sizes\n sample_inputs_func=sample_inputs_take),\n OpInfo('scatter',\n dtypes=all_types_and_complex_and(torch.bool, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_scatter,\n supports_out=False),\n OpInfo('scatter_add',\n dtypes=all_types_and_complex_and(torch.bool, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_scatter_add,\n supports_out=False),\n OpInfo('stack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_stack,\n assert_autodiffed=True,\n skips=(\n # stack does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),),),\n OpInfo('hstack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_hstack_dstack_vstack,\n skips=(\n # hstack does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),),),\n OpInfo('hypot',\n dtypes=floating_types(),\n dtypesIfCPU=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_hypot,\n ),\n OpInfo('vstack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_hstack_dstack_vstack,\n skips=(\n # vstack does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),),),\n OpInfo('dstack',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n sample_inputs_func=sample_inputs_hstack_dstack_vstack,\n skips=(\n # dstack does not correctly warn when resizing out= inputs\n SkipInfo('TestCommon', 'test_out'),),),\n OpInfo('unfold',\n op=lambda x, *args: x.unfold(*args),\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n check_batched_gradgrad=False,\n skips=(\n # torch.unfold does not exist so we get a RuntimeError.\n SkipInfo('TestCommon', 'test_variant_consistency_jit',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),\n # Skip operator schema test because this is a functional and not an operator\n SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n ),\n sample_inputs_func=sample_inputs_unfold),\n OpInfo('msort',\n dtypes=all_types_and(torch.float16),\n check_batched_gradgrad=False,\n skips=(\n # msort does not correctly warn when resizing out= inputs.\n SkipInfo('TestCommon', 'test_out',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),\n # msort does not raise expected Runtime Error.\n SkipInfo('TestOpInfo', 'test_unsupported_dtypes', dtypes=[torch.bool]),\n ),\n sample_inputs_func=sample_inputs_msort),\n OpInfo('movedim',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_movedim_moveaxis),\n OpInfo('moveaxis',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_movedim_moveaxis),\n ShapeFuncInfo('repeat',\n op=lambda x, dims: x.repeat(dims),\n ref=np.tile,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n skips=(\n # torch.repeat does not exist so we get a RuntimeError.\n SkipInfo('TestCommon', 'test_variant_consistency_jit',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),\n ),\n sample_inputs_func=sample_repeat_tile),\n OpInfo('squeeze',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_squeeze),\n OpInfo('take_along_dim',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_inplace_autograd=False,\n sample_inputs_func=sample_inputs_take_along_dim,\n gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),\n ShapeFuncInfo('tile',\n ref=np.tile,\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n sample_inputs_func=sample_repeat_tile),\n OpInfo('unsqueeze',\n dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),\n supports_out=False,\n assert_autodiffed=True,\n sample_inputs_func=sample_unsqueeze),\n OpInfo('var',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),\n # var doesn't support complex autograd, https://github.com/pytorch/pytorch/issues/57358\n backward_dtypesIfCPU=floating_types_and(torch.half),\n backward_dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_std_var,\n # TODO: revisit, some var signatures do support out (see std, too)\n supports_out=False,\n assert_autodiffed=True,\n ),\n OpInfo('xlogy',\n dtypes=all_types_and(torch.bool),\n dtypesIfCPU=all_types_and(torch.bool, torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n supports_inplace_autograd=True,\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_xlogy),\n OpInfo('special.xlog1py',\n aten_name='special_xlog1py',\n dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n skips=(\n SkipInfo('TestOpInfo', 'test_supported_backward',\n device_type='cpu', dtypes=[torch.float16]),\n ),\n sample_inputs_func=sample_inputs_xlog1py),\n OpInfo('logsumexp',\n dtypes=floating_types_and(torch.bfloat16),\n dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half),\n assert_autodiffed=True,\n sample_inputs_func=sample_inputs_logsumexp),\n OpInfo('trace',\n dtypes=all_types_and_complex(),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_inplace_autograd=False,\n supports_out=False,\n sample_inputs_func=sample_inputs_trace),\n OpInfo('transpose',\n aliases=('swapdims', 'swapaxes'),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half),\n supports_out=False,\n sample_inputs_func=sample_inputs_transpose_swapdims),\n OpInfo('kron',\n dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n supports_inplace_autograd=False,\n sample_inputs_func=sample_inputs_kron),\n OpInfo('inner',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_inner),\n OpInfo('tensordot',\n dtypes=floating_and_complex_types_and(torch.half),\n dtypesIfCPU=all_types_and_complex_and(torch.half, torch.bfloat16),\n dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),\n dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_tensordot,\n skips=(\n # Currently failing due to an INTERNAL_ASSERT_FAILED error.\n # Reference: https://github.com/pytorch/pytorch/issues/56314\n SkipInfo(\"TestCommon\", \"test_variant_consistency_jit\", dtypes=[torch.float32]),\n # Skip operator schema test because this is a functional and not an operator.\n # Reference: https://github.com/pytorch/pytorch/issues/54574\n SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),\n )\n ),\n OpInfo('logcumsumexp',\n dtypes=floating_types_and(),\n dtypesIfCUDA=floating_types_and(torch.half),\n skips=(\n # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.\n SkipInfo('TestCommon', 'test_out', dtypes=(torch.float32,), device_type='cuda'),\n # logcumsumexp_backward not implemented for 'Half\n SkipInfo('TestOpInfo', 'test_supported_backward', dtypes=(torch.float16,), device_type='cuda'),\n ),\n sample_inputs_func=sample_inputs_logcumsumexp),\n UnaryUfuncInfo('sigmoid',\n aliases=('special.expit', ),\n ref=reference_sigmoid if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.complex64: 1e-1,\n torch.bfloat16: 1e-2}),),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/issues/56012\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cuda', dtypes=[torch.complex64]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cuda', dtypes=[torch.complex64]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.cfloat, torch.cdouble])),\n dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),\n # sigmoid doesn't support complex autograd, https://github.com/pytorch/pytorch/issues/48552\n backward_dtypesIfCPU=all_types_and(torch.bool, torch.bfloat16),\n backward_dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16),\n safe_casts_outputs=True,\n assert_autodiffed=True),\n UnaryUfuncInfo('digamma',\n ref=scipy.special.digamma if TEST_SCIPY else _NOTHING,\n decorators=(precisionOverride({torch.float16: 5e-1}),),\n dtypes=all_types_and(torch.bool),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True),\n UnaryUfuncInfo('special.entr',\n ref=scipy.special.entr if TEST_SCIPY else _NOTHING,\n aten_name='special_entr',\n decorators=(precisionOverride({torch.float16: 1e-1,\n torch.bfloat16: 1e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n skips=(\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.bfloat16, torch.float16]),\n ),\n supports_inplace_autograd=False,\n safe_casts_outputs=True,\n sample_inputs_func=sample_inputs_entr),\n UnaryUfuncInfo('erf',\n ref=scipy.special.erf if TEST_SCIPY else _NOTHING,\n aliases=('special.erf', ),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('erfc',\n ref=scipy.special.erfc if TEST_SCIPY else _NOTHING,\n aliases=('special.erfc', ),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n assert_autodiffed=True,\n safe_casts_outputs=True),\n UnaryUfuncInfo('erfinv',\n ref=scipy.special.erfinv if TEST_SCIPY else _NOTHING,\n aliases=('special.erfinv', ),\n decorators=(precisionOverride({torch.float16: 1e-2,\n torch.bfloat16: 1e-2,\n torch.float32: 1e-4}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n safe_casts_outputs=True,\n domain=(-1, 1),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < \"1.4.0\"),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < \"1.4.0\"),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < \"1.4.0\"),\n )),\n UnaryUfuncInfo('lgamma',\n ref=reference_lgamma if TEST_SCIPY else _NOTHING,\n aliases=('special.gammaln', ),\n decorators=(precisionOverride({torch.float16: 7e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half),\n # \"digamma\" not implemented for 'BFloat16'\n backward_dtypesIfCPU=all_types_and(torch.bool),\n skips=(\n # Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n device_type='cpu', dtypes=[torch.bfloat16]),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n device_type='cpu', dtypes=[torch.bfloat16]),\n # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',\n dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',\n dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),\n SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',\n dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),\n ),\n safe_casts_outputs=True),\n OpInfo(\n 'logdet',\n supports_out=False,\n sample_inputs_func=sample_inputs_logdet,\n decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma, skipCUDAIfRocm)),\n UnaryUfuncInfo('logit',\n ref=scipy.special.logit if TEST_SCIPY else _NOTHING,\n domain=(0, 1),\n aliases=('special.logit', ),\n decorators=(precisionOverride({torch.bfloat16: 5e-1,\n torch.float16: 5e-1}),),\n dtypes=all_types_and(torch.bool, torch.bfloat16),\n dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),\n sample_inputs_func=sample_inputs_logit,\n safe_casts_outputs=True),\n]\n\n# Common operator groupings\nunary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo)]\nspectral_funcs = [op for op in op_db if isinstance(op, SpectralFuncInfo)]\nsparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse is True]\nshape_funcs = [op for op in op_db if isinstance(op, ShapeFuncInfo)]\n\ndef index_variable(shape, max_indices, device=torch.device('cpu')):\n if not isinstance(shape, tuple):\n shape = (shape,)\n index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long()\n return index\n\n\ndef index_perm_variable(shape, max_indices):\n if not isinstance(shape, tuple):\n shape = (shape,)\n\n index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)\n return index\n\n\ndef gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')):\n assert len(shape) == 2\n assert index_dim < 2\n batch_dim = 1 - index_dim\n index = torch.zeros(*shape, dtype=torch.long, device=device)\n for i in range(shape[index_dim]):\n index.select(index_dim, i).copy_(\n torch.randperm(max_indices, device=device)[:shape[batch_dim]])\n if duplicate:\n index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))\n return index\n\n\ndef bernoulli_scalar():\n return torch.tensor(0, dtype=torch.bool).bernoulli_()\n\n\ndef mask_not_all_zeros(shape):\n assert len(shape) > 0\n while True:\n result = torch.randn(shape).gt(0)\n if result.sum() > 0:\n return result\n\n\ndef uniform_scalar(offset=0, requires_grad=False):\n v = torch.rand(()) + offset\n v.requires_grad = requires_grad\n return v\n\n\ndef normal_scalar_clamp(amin, amax, requires_grad=False):\n v = torch.randn(()).clamp(amin, amax)\n v.requires_grad = requires_grad\n return v\n\n\ndef prod_zeros(dim_size, dim_select):\n assert len(dim_select) == 2\n result = torch.randn(dim_size, dim_size, dim_size)\n result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()\n result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()\n result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()\n return result\n\n\nnon_differentiable = collections.namedtuple('non_differentiable', ['tensor'])\n\n\nclass dont_convert(tuple):\n pass\n\n\nclass NoArgsClass(object):\n def __iter__(self):\n return self\n\n def __next__(self):\n raise StopIteration()\n next = __next__ # Python 2 compatibility\n\n def __len__(self):\n return 0\n\nNO_ARGS = NoArgsClass()\n\ndef ident(x):\n return x\n\n# Do NOT add to this list. Method tests are being DEPRECATED and replaced by OpInfos.\n# See https://github.com/pytorch/pytorch/wiki/Writing-tests-in-PyTorch-1.8\n#\n# (\n# method name,\n# input size/constructing fn,\n# args (tuple represents shape of a tensor arg),\n# test variant name (will be used at test name suffix), // optional\n# (should_autodiff_node[bool], nonfusible_nodes, fusible_nodes) for autodiff, // optional\n# indices for possible dim arg, // optional\n# fn mapping output to part that should be gradcheck'ed, // optional\n# kwargs // optional\n# )\n# Note: some functions have separate schema for (Tensor other) and (Scalar other),\n# and it's possible that we only support AD for Scalar version but not Tensor\n# version, and vice versa.\n# When writing tests, only scalar(float/int) input triggers the Scalar schema.\n# uniform_scalar produces a scalar **Tensor** which won't match Scalar input.\ndef method_tests():\n set_rng_seed(SEED)\n return [\n ('div', (S, S, S), (torch.rand(S, S, S) + 0.1,), '', (True,)),\n ('div', (S, S, S), (torch.rand(S, S) + 0.1,), 'broadcast_rhs', (True,)),\n ('div', (S, S), (torch.rand(S, S, S) + 0.1,), 'broadcast_lhs', (True,)),\n ('div', (S, 1, S), (torch.rand(M, S) + 0.1,), 'broadcast_all', (True,)),\n ('div', (), (uniform_scalar(0.1),), 'scalar', (True,)),\n ('div', (S, S, S), (uniform_scalar(0.1),), 'scalar_broadcast_rhs', (True,)),\n ('div', (), (uniform_scalar(0.1),), 'scalar_broadcast_lhs', (True,)),\n ('div', torch.rand(S, S, S) + 1e-1, (3.14,), 'constant', (True,)),\n ('div', uniform_scalar(1e-1, requires_grad=True), (3.14,), 'scalar_constant', (True,)),\n ('true_divide', (S, S, S), (torch.rand(S, S, S) + 0.1,), '', (True,)),\n ('true_divide', (S, S, S), (torch.rand(S, S) + 0.1,), 'broadcast_rhs', (True,)),\n ('true_divide', (S, S), (torch.rand(S, S, S) + 0.1,), 'broadcast_lhs', (True,)),\n ('true_divide', (S, 1, S), (torch.rand(M, S) + 0.1,), 'broadcast_all', (True,)),\n ('true_divide', (), (uniform_scalar(0.1),), 'scalar', (True,)),\n ('true_divide', (S, S, S), (uniform_scalar(0.1),), 'scalar_broadcast_rhs', (True,)),\n ('true_divide', (), (uniform_scalar(0.1),), 'scalar_broadcast_lhs', (True,)),\n ('true_divide', torch.rand(S, S, S) + 1e-1, (3.14,), 'constant', (True,)),\n ('true_divide', uniform_scalar(1e-1, requires_grad=True), (3.14,), 'scalar_constant', (True,)),\n ('div', (S, S, S), (torch.rand(S, S, S, dtype=torch.cdouble) + 0.1,), 'complex', (True,)),\n ('div', (S, S, S), (torch.rand(S, S, dtype=torch.cdouble) + 0.1,), 'complex_broadcast_rhs', (True,)),\n ('div', (S, S), (torch.rand(S, S, S, dtype=torch.cdouble) + 0.1,), 'complex_broadcast_lhs', (True,)),\n ('div', (S, 1, S), (torch.rand(M, S, dtype=torch.cdouble) + 0.1,), 'complex_broadcast_all', (True,)),\n ('div', (), (uniform_scalar(0.1j),), 'complex_scalar', (True,)),\n ('div', (S, S, S), (uniform_scalar(0.1j),), 'complex_scalar_broadcast_rhs', (True,)),\n ('div', (), (uniform_scalar(0.1j),), 'complex_scalar_broadcast_lhs', (True,)),\n ('div', torch.rand(S, S, S, dtype=torch.cdouble) + 1e-1, (3.14j,), 'complex_constant', (True,)),\n ('div', uniform_scalar(1e-1j, requires_grad=True), (3.14j,), 'complex_scalar_constant', (True,)),\n ('t', (1, 2), NO_ARGS, '', (False,)),\n ('fmod', (S, S, S), (1.5,), '', (True,)),\n ('fmod', (), (1.5,), 'scalar', (True,)),\n ('fmod', (S, S, S), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor'),\n ('fmod', (S,), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor_broadcast_lhs'),\n ('fmod', (S, S, S), (non_differentiable(torch.rand(S) + 1.5),), 'tensor_broadcast_rhs'),\n ('fmod', (S, 1, S), (non_differentiable(torch.rand(S, S) + 1.5),), 'tensor_broadcast_all'),\n ('fmod', (), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor'),\n ('fmod', (), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'scalar_tensor_broadcast_lhs'),\n ('fmod', (S, S, S), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor_broadcast_rhs'),\n ('remainder', (S, S, S), (1.5,), '', (True,)),\n ('remainder', (), (1.5,), 'scalar', (True,)),\n ('remainder', (S, S, S), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor'),\n ('remainder', (S,), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'tensor_broadcast_lhs'),\n ('remainder', (S, 1, S), (non_differentiable(torch.rand(S, S) + 1.5),), 'tensor_broadcast_all'),\n ('remainder', (), (non_differentiable(uniform_scalar(1.5)),), 'scalar_tensor'),\n ('remainder', (), (non_differentiable(torch.rand(S, S, S) + 1.5),), 'scalar_tensor_broadcast_lhs'),\n ('kthvalue', (S, S, S), (2,)),\n ('kthvalue', (S, S, S), (2, 1,), 'dim', (), [1]),\n ('kthvalue', (S, S, S), (2, 1, True,), 'keepdim_dim', (), [1]),\n ('kthvalue', (S,), (2, 0,), 'dim_1d', (), [1]),\n ('kthvalue', (S,), (2, 0, True,), 'keepdim_dim_1d', (), [1]),\n ('kthvalue', (), (1,), 'scalar', (), ()),\n ('kthvalue', (), (1, 0,), 'scalar_dim', (), [1]),\n ('kthvalue', (), (1, 0, True), 'scalar_keepdim_dim', (), [1]),\n ('median', (S, S, S), NO_ARGS),\n ('median', (S, S, S), (1,), 'dim', (), [0]),\n ('median', (S, S, S), (1, True,), 'keepdim_dim', (), [0]),\n ('median', (), NO_ARGS, 'scalar'),\n ('median', (), (0,), 'scalar_dim', (), [0]),\n ('median', (), (0, True,), 'scalar_keepdim_dim', (), [0]),\n ('nanmedian', (S, S, S), NO_ARGS),\n ('nanmedian', (S, S, S), (1,), 'dim', (), [0]),\n ('nanmedian', (S, S, S), (1, True,), 'keepdim_dim', (), [0]),\n ('nanmedian', (), NO_ARGS, 'scalar'),\n ('nanmedian', (), (0,), 'scalar_dim', (), [0]),\n ('nanmedian', (), (0, True,), 'scalar_keepdim_dim', (), [0]),\n ('var_mean', (S, S, S), NO_ARGS, ''),\n ('var_mean', (S, S, S), (1,), 'dim', [0]),\n ('var_mean', (S, S, S), (1, True, True), 'keepdim_dim', [0]),\n ('var_mean', (S,), (0,), 'dim_1d', [0]),\n ('var_mean', (S,), (0, True, True), 'keepdim_dim_1d', [0]),\n ('std_mean', (S, S, S), NO_ARGS, ''),\n ('std_mean', (S, S, S), (1,), 'dim', [0]),\n ('std_mean', (S, S, S), (1, True, True), 'keepdim_dim', [0]),\n ('std_mean', (S,), (0,), 'dim_1d', [0]),\n ('std_mean', (S,), (0, True, True), 'keepdim_dim_1d', [0]),\n ('renorm', (S, S, S), (2, 1, 0.5), 'dim', (), [1]),\n ('renorm', (S, S, S), (1, 2, 3), 'norm_1'),\n ('renorm', (S, S, S), (inf, 2, 0.5), 'norm_inf'),\n ('log_softmax', (S, S, S), (1, torch.float64,), 'kwarg_dtype_would_break_jit_loader', (True,)),\n ('zero_', (S, S, S), NO_ARGS),\n ('zero_', (), NO_ARGS, 'scalar'),\n ('norm', (S, S), (), 'default'),\n ('norm', (S, S), (2,), '2'),\n ('norm', (S, S), (0,), '0'),\n ('norm', (S, S), (0.5,), '0_5'),\n ('norm', (S, S), (1,), '1'),\n ('norm', (S, S), (3,), '3'),\n ('norm', (S, S), (inf,), 'inf'),\n ('norm', (S, S), (-inf,), '-inf'),\n ('norm', (S, S), ('fro',), 'fro_default'),\n ('norm', (S, S), ('fro', [0, 1],), 'fro'),\n ('norm', (S, S), ('nuc',), 'nuc', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),\n ('norm', (S, S, S), ('nuc', [1, 2]), 'nuc_batched', (), NO_ARGS, [skipCPUIfNoLapack, skipCUDAIfNoMagma]),\n ('norm', (S, S), (-1,), 'neg_1'),\n ('norm', (S, S), (-2,), 'neg_2'),\n ('norm', (S, S), (-0.5,), 'neg_0_5'),\n ('norm', (S, S), (-1.5,), 'neg_1_5'),\n ('norm', (S, S), (-2, 1,), 'neg_2_2_dim', (), [1]),\n ('norm', (S, S), (-1, 1,), 'neg_1_2_dim', (), [1]),\n ('norm', (S, S), (0, 1,), '0_2_dim', (), [1]),\n ('norm', (S, S), (1, 1,), '1_2_dim', (), [1]),\n ('norm', (S, S), (2, 1,), '2_2_dim', (), [1]),\n ('norm', (S, S), (3, 1,), '3_2_dim', (), [1]),\n ('norm', (S, S), (inf, 1,), 'inf_2_dim'),\n ('norm', torch.rand(S, S, S) + 5e-2, (1.5,), '1_5_default'),\n ('norm', (S, S, S), (2, 1), '2_dim', (), [1]),\n ('norm', (S, S, S), (3, 1), '3_dim', (), [1]),\n ('norm', torch.rand(S, S, S) + 5e-2, (1.5, 1), '1_5_dim', (), [1]),\n ('norm', (S, S, S), (2, 1, True), 'keepdim_2_dim', (), [1]),\n ('norm', (S, S, S), (3, 1, True), 'keepdim_3_dim', (), [1]),\n ('norm', torch.rand(S, S, S) + 5e-2, (1.5, 1, True), 'keepdim_1_5_dim', (), [1]),\n ('norm', (), (2, 0), '2_dim_scalar', (), [1]),\n ('norm', (), (3, 0), '3_dim_scalar', (), [1]),\n ('norm', (), (2, 0, True), 'keepdim_2_dim_scalar', (), [1]),\n ('norm', (), (3, 0, True), 'keepdim_3_dim_scalar', (), [1]),\n ('clone', (S, M, S), NO_ARGS),\n ('clone', (), NO_ARGS, 'scalar'),\n ('contiguous', (S, S), NO_ARGS, '', (True,)),\n ('contiguous', torch.randn(S, S).transpose(0, 1), NO_ARGS, 'not_contiguous', (True,)),\n ('diag_embed', (S, S), NO_ARGS),\n ('diagonal', (M, M), NO_ARGS, '2d'),\n ('diagonal', (3, 5), NO_ARGS, '2d_wide'),\n ('diagonal', (3, 5), (2,), '2d_wide_pos'),\n ('diagonal', (3, 5), (-2,), '2d_wide_neg'),\n ('diagonal', (5, 3), NO_ARGS, '2d_tall'),\n ('diagonal', (5, 3), (2,), '2d_tall_pos'),\n ('diagonal', (5, 3), (-2,), '2d_tall_neg'),\n ('diagonal', (M, M), (1,), '2d_1'),\n ('diagonal', (M, M), (2,), '2d_2'),\n ('diagonal', (M, M, M), (1, 1, 2), '3d_1'),\n ('diagonal', (M, M, M), (2, 0, 1), '3d_2'),\n ('diagonal', (M, M, M), (-2, 0, 1), '3d_3'),\n ('tril', (M, M), NO_ARGS),\n ('tril', (M, M), (2,), 'idx'),\n ('tril', (S, M, M), NO_ARGS, 'batched'),\n ('tril', (S, M, M), (2,), 'batched_idx'),\n ('tril', (3, 3, S, S), NO_ARGS, 'more_batched'),\n ('triu', (M, M), NO_ARGS),\n ('triu', (M, M), (2,), 'idx'),\n ('triu', (S, M, M), NO_ARGS, 'batched'),\n ('triu', (S, M, M), (2,), 'batched_idx'),\n ('triu', (3, 3, S, S), NO_ARGS, 'more_batched'),\n ('cross', (S, 3), ((S, 3),)),\n ('cross', (S, 3, S), ((S, 3, S), 1), 'dim'),\n ('fill_', (S, S, S), (1,), 'number'),\n ('fill_', (), (1,), 'number_scalar'),\n ('fill_', (S, S, S), ((),), 'variable'),\n ('split', (S, S, S), (2,), '', (True,)),\n ('split', (S, S, S), (S, 1), 'dim', (True,), [1]),\n ('split', (S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],), 'size_list',\n (True, 'aten::split_with_sizes')),\n ('split', (S, S, S), ([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2), 'size_list_dim',\n (True, 'aten::split_with_sizes'), [1]),\n ('split_with_sizes', (S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],), '', (True,)),\n ('split_with_sizes', (S, S, S), ([int(S / 3), S - int(S / 3), 0],), 'size_0', (True, )),\n ('split_with_sizes', (S, S, S), ([int(S / 3), S - int(S / 3) * 2, int(S / 3)],), 'dim', (True, ), [1]),\n ('tensor_split', (S, S, S), (3,), 'sections', (False,)),\n ('tensor_split', (S, S, S), (3, 1), 'sections_dim', (False,), [1]),\n ('tensor_split', (S, S, S), ([2, 4],), 'indices', (False,)),\n ('tensor_split', (S, S, S), ([2, 4], 1), 'indices_dim', (False,), [1]),\n ('resize_', (S, S, S), (torch.Size([S * S, S])), 'fewer_dims'),\n ('resize_', (), (dont_convert(()),), 'scalar'),\n ('resize_', (), (torch.Size([1, 1, 1])), 'scalar_to_dims'),\n ('resize_as_', (), (non_differentiable(torch.tensor(5.)),), 'scalar'),\n ('resize_as_', (), (non_differentiable(torch.randn((1, 1, 1))),), 'scalar_to_dims'),\n ('resize_as_', (S, S, S), (non_differentiable(torch.randn(S * S, S)),)),\n ('where', (M, M), (mask_not_all_zeros((M, M)), (M, M)), '', (True,)),\n ('where', (M, 1, M), (mask_not_all_zeros((M, M)), (M, M, 1)), 'broadcast_all', (True,)),\n ('where', (), (bernoulli_scalar(), ()), 'scalar', (True,)),\n ('where', (M, 1, M), (bernoulli_scalar(), (M, M, 1)), 'scalar_broadcast_mask', (True,)),\n ('where', (), (mask_not_all_zeros((M, M)), ()), 'scalar_broadcast_non_mask', (True,)),\n ('to_sparse', (S, S), (), '', (), (), [], lambda x: x.to_dense())\n ]\n\ndef create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.double, device=None):\n if not isinstance(call_args, tuple):\n call_args = (call_args,)\n\n def map_arg(arg):\n def maybe_non_contig(tensor):\n return tensor if not non_contiguous else make_non_contiguous(tensor)\n\n if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):\n return arg\n elif isinstance(arg, tuple) and len(arg) == 0:\n var = torch.randn((), dtype=dtype, device=device)\n var.requires_grad = requires_grad\n return var\n elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):\n return Variable(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device)), requires_grad=requires_grad)\n # double check casting\n elif isinstance(arg, non_differentiable):\n if isinstance(arg.tensor, torch.Tensor):\n if arg.tensor.dtype == torch.float:\n return maybe_non_contig(arg.tensor.to(dtype=torch.double, device=device))\n if arg.tensor.dtype == torch.cfloat:\n return maybe_non_contig(arg.tensor.to(dtype=torch.cdouble, device=device))\n return maybe_non_contig(arg.tensor.to(device=device))\n return maybe_non_contig(arg.tensor.to(device=device))\n elif isinstance(arg, torch.Tensor):\n if arg.dtype == torch.float:\n arg = arg.double()\n if arg.dtype == torch.cfloat:\n arg = arg.to(torch.cdouble)\n if arg.is_complex() != dtype.is_complex:\n raise RuntimeError(\"User provided tensor is real for a test that runs with complex dtype, \",\n \"which is not supported for now\")\n # NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards\n v = maybe_non_contig(arg).detach().to(device=device).clone()\n v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex())\n return v\n elif callable(arg):\n return map_arg(arg(dtype=dtype, device=device))\n else:\n return arg\n args_out = tuple(map_arg(arg) for arg in call_args)\n kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}\n return args_out, kwargs_out\n\n\ndef _compare_trilu_indices(\n self, row, col, offset=0, dtype=torch.long, device='cpu'):\n if row == 0 or col == 0:\n # have to handle this separately as tril and triu does not take\n # empty matrix as input\n self.assertEqual(\n torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),\n torch.tril_indices(row, col, offset, dtype=dtype, device=device))\n\n self.assertEqual(\n torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),\n torch.triu_indices(row, col, offset, dtype=dtype, device=device))\n\n else:\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.ones(row, col, device='cpu')\n .tril(offset).nonzero().to(dtype).transpose(0, 1),\n torch.tril_indices(row, col, offset, dtype=dtype, device=device))\n\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.ones(row, col, device='cpu')\n .tril(offset).nonzero().to(dtype).transpose(0, 1),\n torch.tril_indices(row, col, offset, dtype=dtype, device=device))\n\n\ndef _compare_large_trilu_indices(\n self, row, col, offset=0, dtype=torch.long, device='cpu'):\n l = torch.ones(row, col, dtype=dtype, device='cpu').tril(offset) \\\n .nonzero()[-100:-1, :].transpose(0, 1).to(device)\n torch.cuda.empty_cache()\n\n r = torch.tril_indices(\n row, col, offset, dtype=dtype, device=device)[:, -100:-1]\n self.assertEqual(l, r)\n torch.cuda.empty_cache()\n\n l = torch.ones(row, col, dtype=dtype, device='cpu').triu(offset) \\\n .nonzero()[-100:-1, :].transpose(0, 1).to(device)\n torch.cuda.empty_cache()\n\n r = torch.triu_indices(\n row, col, offset, dtype=dtype, device=device)[:, -100:-1]\n self.assertEqual(l, r)\n torch.cuda.empty_cache()\n\n# (\n# row\n# col\n# offset (optional)\n# dtype (optional)\n# )\ntri_tests_args = [\n (1, 1),\n (3, 3),\n (3, 3, 1),\n (3, 3, 2),\n (3, 3, 200),\n (3, 3, -1),\n (3, 3, -2),\n (3, 3, -200),\n (0, 3, 0),\n (0, 3, 1),\n (0, 3, -1),\n (3, 0, 0),\n (3, 0, 1),\n (3, 0, -1),\n (0, 0, 0),\n (0, 0, 1),\n (0, 0, -1),\n (3, 6, 0),\n (3, 6, 1),\n (3, 6, 3),\n (3, 6, 9),\n (3, 6, -1),\n (3, 6, -3),\n (3, 6, -9),\n (6, 3, 0),\n (6, 3, 1),\n (6, 3, 3),\n (6, 3, 9),\n (6, 3, -1),\n (6, 3, -3),\n (6, 3, -9),\n (258, 253, 1, torch.float32),\n (257, 258, 1, torch.float64),\n (258, 258, 1, torch.short),\n (3, 513, 1, torch.long),\n (513, 3, 1, torch.int),\n (513, 0, 1, torch.double),\n (1024, 1024),\n (1024, 1024, 500, torch.float32),\n (1024, 1024, 1023),\n (1024, 1024, -500),\n (1023, 1025),\n (1025, 1023, 1022),\n (1024, 1024, -500),\n (3, 2028),\n (3, 2028, 1),\n (3, 2028, -1),\n (2028, 3),\n (2028, 1),\n (2028, 1, -1)\n]\n\ntri_large_tests_args: List[Tuple[int, ...]] = [\n # Large test cases below are deliberately commented out to speed up CI\n # tests and to avoid OOM error. When modifying implementations of\n # tril_indices and triu_indices, please enable these tests and make sure\n # they pass.\n #\n # (1, 268435455),\n # (5000, 5000),\n # (10000, 10000),\n # (268435455, 1),\n # (134217727, 2, 1),\n # (2, 134217727, 1),\n # (536870901, 1),\n # (1, 536870901),\n # (268435455, 2, 1),\n # (2, 268435455, 1)\n]\n\n\ndef run_additional_tri_tests(self, device):\n x = torch.ones(\n 3, 3, dtype=torch.long, device=device, layout=torch.strided)\n l = x.tril(0).nonzero().transpose(0, 1)\n u = x.triu(0).nonzero().transpose(0, 1)\n self.assertEqual(l, torch.tril_indices(3, 3, device=device))\n self.assertEqual(\n l, torch.tril_indices(3, 3, device=device, layout=torch.strided))\n\n self.assertEqual(u, torch.triu_indices(3, 3, device=device))\n self.assertEqual(\n u, torch.triu_indices(3, 3, device=device, layout=torch.strided))\n\n self.assertRaises(\n RuntimeError,\n lambda: torch.triu_indices(\n 1, 1, device=device, layout=torch.sparse_coo))\n\n self.assertRaises(\n RuntimeError,\n lambda: torch.tril_indices(\n 1, 1, device=device, layout=torch.sparse_coo))\n\n\ndef unpack_variables(args):\n if isinstance(args, tuple):\n return tuple(unpack_variables(elem) for elem in args)\n else:\n return args\n\n\nEXCLUDE_FUNCTIONAL = {\n 'addmm',\n 'addmm_',\n 'reshape',\n 'where' # argument order\n}\nEXCLUDE_GRADCHECK: Dict[str, Any] = {\n}\nEXCLUDE_GRADGRADCHECK: Dict[str, Any] = {\n}\nEXCLUDE_GRADGRADCHECK_BY_TEST_NAME = {\n # `other` expand_as(self, other) is not used in autograd.\n 'test_expand_as',\n 'test_cdist',\n}\n\n\ndef exclude_tensor_method(name, test_name):\n # there are no tensor equivalents for these (inplace or out)\n exclude_all_tensor_method_by_test_name = {\n 'test_slice',\n 'test_where',\n 'test_where_broadcast_all',\n 'test_where_scalar',\n 'test_where_scalar_broadcast_mask',\n 'test_where_scalar_broadcast_non_mask',\n 'test_var_mean_keepdim_dim_1d',\n 'test_var_mean_keepdim_dim',\n 'test_var_mean_dim_1d',\n 'test_var_mean_dim',\n 'test_var_mean',\n 'test_std_mean_keepdim_dim_1d',\n 'test_std_mean_keepdim_dim',\n 'test_std_mean_dim_1d',\n 'test_std_mean_dim',\n 'test_std_mean',\n }\n # there are no out-of-place tensor equivalents for these\n exclude_outplace_tensor_method = {\n 'index_fill',\n 'scatter',\n 'scatter_add',\n }\n if test_name in exclude_all_tensor_method_by_test_name:\n return True\n is_magic_method = name[:2] == '__' and name[-2:] == '__'\n is_inplace = name[-1] == \"_\" and not is_magic_method\n if not is_inplace and name in exclude_outplace_tensor_method:\n return True\n return False\n" ]
[ [ "torch.empty", "torch.get_default_dtype", "torch.testing._internal.common_utils.random_symmetric_matrix", "torch.rand", "torch.no_grad", "torch.tril_indices", "torch.testing._internal.common_utils.make_tensor", "torch.cuda.empty_cache", "torch.testing._internal.common_utils.random_hermitian_pd_matrix", "torch.testing.floating_types", "torch.randn", "torch.repeat_interleave", "torch.testing._internal.common_utils.is_iterable_of_tensors", "numpy.abs", "numpy.random.choice", "torch.testing._internal.common_utils.random_symmetric_pd_matrix", "numpy.sinc", "torch.testing.floating_types_and", "torch.device", "torch.testing.integral_types_and", "torch.triu_indices", "torch.testing._internal.common_utils._wrap_warn_once", "torch.testing.floating_and_complex_types_and", "torch.ones", "torch.testing._internal.common_utils.random_symmetric_psd_matrix", "numpy.sqrt", "torch.testing.floating_and_complex_types", "torch.testing.make_non_contiguous", "torch.empty_strided", "torch.tensor", "torch.testing._internal.common_utils.set_rng_seed", "torch.linalg.cholesky", "torch.testing.all_types_and_complex", "torch.testing._internal.common_device_type.skipIf", "torch.einsum", "torch.testing.all_types_and_complex_and", "torch.testing._internal.common_utils.random_well_conditioned_matrix", "torch.testing.complex_types", "torch.polygamma", "torch.testing._internal.common_device_type.precisionOverride", "numpy.sign", "torch.testing.all_types", "torch.Size", "numpy.modf", "numpy.exp", "torch.testing._internal.common_utils.random_fullrank_matrix_distinct_singular_value", "torch.linalg.svd", "torch.finfo", "torch.randperm", "torch.zeros", "torch.LongTensor", "torch.testing.all_types_and" ] ]
WJ-Lai/CenterNet-CentralNet
[ "d28a8c2438244782ccdd6805e555558b2c01ff46" ]
[ "src/lib/datasets/dataset/fir.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pycocotools.coco as coco\nfrom pycocotools.cocoeval import COCOeval\nimport numpy as np\nimport json\nimport os\n\nimport torch.utils.data as data\nimport src.config as cf\n\nsensor = 'fir'\n\nclass FIR(data.Dataset):\n num_classes = cf.own_dataset_num_classes\n default_resolution = [cf.train_size, cf.train_size]\n mean = np.array(cf.default_mean[sensor],\n dtype=np.float32).reshape(1, 1, 3)\n std = np.array(cf.default_std[sensor],\n dtype=np.float32).reshape(1, 1, 3)\n\n def __init__(self, opt, split):\n super(FIR, self).__init__()\n self.data_dir = os.path.join(opt.data_dir, sensor)\n self.img_dir = os.path.join(self.data_dir, 'images')\n if split == 'val':\n self.annot_path = os.path.join(\n self.data_dir, 'annotations', \n 'val.json')\n else:\n if opt.task == 'exdet':\n self.annot_path = os.path.join(\n self.data_dir, 'annotations', \n 'train.json')\n if split == 'test':\n self.annot_path = os.path.join(\n self.data_dir, 'annotations',\n opt.test_dataset+'.json')\n else:\n self.annot_path = os.path.join(\n self.data_dir, 'annotations', \n 'train.json')\n self.max_objs = 128\n if len(cf.categories)==5:\n self.class_name = [\n '__background__', 'bike', 'car', 'car_stop', 'color_cone', 'person']\n self._valid_ids = [0, 1, 2, 3, 4, 5]\n else:\n self.class_name = [\n '__background__', 'bike', 'car', 'color_cone', 'person']\n self._valid_ids = [0, 1, 2, 3, 4]\n\n self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}\n self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \\\n for v in range(1, self.num_classes + 1)]\n self._data_rng = np.random.RandomState(123)\n self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],\n dtype=np.float32)\n self._eig_vec = np.array([\n [-0.58752847, -0.69563484, 0.41340352],\n [-0.5832747, 0.00994535, -0.81221408],\n [-0.56089297, 0.71832671, 0.41158938]\n ], dtype=np.float32)\n # self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)\n # self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)\n\n self.split = split\n self.opt = opt\n\n print('==> initializing coco 2017 {} data.'.format(split))\n self.coco = coco.COCO(self.annot_path)\n self.images = self.coco.getImgIds()\n self.num_samples = len(self.images)\n\n print('Loaded {} {} samples'.format(split, self.num_samples))\n\n def _to_float(self, x):\n return float(\"{:.2f}\".format(x))\n\n def convert_eval_format(self, all_bboxes):\n # import pdb; pdb.set_trace()\n detections = []\n for image_id in all_bboxes:\n for cls_ind in all_bboxes[image_id]:\n category_id = self._valid_ids[cls_ind - 1]\n for bbox in all_bboxes[image_id][cls_ind]:\n bbox[2] -= bbox[0]\n bbox[3] -= bbox[1]\n score = bbox[4]\n bbox_out = list(map(self._to_float, bbox[0:4]))\n\n detection = {\n \"image_id\": int(image_id),\n \"category_id\": int(category_id),\n \"bbox\": bbox_out,\n \"score\": float(\"{:.2f}\".format(score))\n }\n if len(bbox) > 5:\n extreme_points = list(map(self._to_float, bbox[5:13]))\n detection[\"extreme_points\"] = extreme_points\n detections.append(detection)\n return detections\n\n def __len__(self):\n return self.num_samples\n\n def save_results(self, results, save_dir):\n json.dump(self.convert_eval_format(results), \n open('{}/results.json'.format(save_dir), 'w'))\n \n def run_eval(self, results, save_dir):\n # result_json = os.path.join(save_dir, \"results.json\")\n # detections = self.convert_eval_format(results)\n # json.dump(detections, open(result_json, \"w\"))\n self.save_results(results, save_dir)\n coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))\n coco_eval = COCOeval(self.coco, coco_dets, \"bbox\")\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n\n def cal_mAP(self, results, save_dir):\n self.save_results(results, save_dir)\n print('{}/results.json'.format(save_dir))\n coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))\n coco_eval = COCOeval(self.coco, coco_dets, \"bbox\")\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n return coco_eval.stats[1]" ]
[ [ "numpy.random.RandomState", "numpy.array" ] ]
Linohong/OpenNMT_dialog
[ "4a9e598afca780723d354d599815c320706af937" ]
[ "onmt/tests/test_random_sampling.py" ]
[ "import unittest\r\nfrom onmt.translate.random_sampling import RandomSampling\r\n\r\nimport torch\r\n\r\n\r\nclass TestRandomSampling(unittest.TestCase):\r\n BATCH_SZ = 3\r\n INP_SEQ_LEN = 53\r\n DEAD_SCORE = -1e20\r\n\r\n BLOCKED_SCORE = -10e20\r\n\r\n def test_advance_with_repeats_gets_blocked(self):\r\n n_words = 100\r\n repeat_idx = 47\r\n ngram_repeat = 3\r\n for batch_sz in [1, 3]:\r\n samp = RandomSampling(\r\n 0, 1, 2, batch_sz, torch.device(\"cpu\"), 0, ngram_repeat, set(),\r\n False, 30, 1., 5, torch.randint(0, 30, (batch_sz,)))\r\n for i in range(ngram_repeat + 4):\r\n # predict repeat_idx over and over again\r\n word_probs = torch.full(\r\n (batch_sz, n_words), -float('inf'))\r\n word_probs[:, repeat_idx] = 0\r\n attns = torch.randn(1, batch_sz, 53)\r\n samp.advance(word_probs, attns)\r\n if i <= ngram_repeat:\r\n expected_scores = torch.zeros((batch_sz, 1))\r\n self.assertTrue(samp.topk_scores.equal(expected_scores))\r\n else:\r\n self.assertTrue(\r\n samp.topk_scores.equal(\r\n torch.tensor(self.BLOCKED_SCORE)\r\n .repeat(batch_sz, 1)))\r\n\r\n def test_advance_with_some_repeats_gets_blocked(self):\r\n # batch 0 and 7 will repeat, the rest will advance\r\n n_words = 100\r\n repeat_idx = 47\r\n other_repeat_idx = 12\r\n ngram_repeat = 3\r\n for batch_sz in [1, 3, 13]:\r\n samp = RandomSampling(\r\n 0, 1, 2, batch_sz, torch.device(\"cpu\"), 0, ngram_repeat, set(),\r\n False, 30, 1., 5, torch.randint(0, 30, (batch_sz,)))\r\n for i in range(ngram_repeat + 4):\r\n word_probs = torch.full(\r\n (batch_sz, n_words), -float('inf'))\r\n # predict the same thing in batch 0 and 7 every i\r\n word_probs[0, repeat_idx] = 0\r\n if batch_sz > 7:\r\n word_probs[7, other_repeat_idx] = 0\r\n # push around what the other batches predict\r\n word_probs[1:7, repeat_idx + i] = 0\r\n if batch_sz > 7:\r\n word_probs[8:, repeat_idx + i] = 0\r\n attns = torch.randn(1, batch_sz, 53)\r\n samp.advance(word_probs, attns)\r\n if i <= ngram_repeat:\r\n self.assertFalse(\r\n samp.topk_scores.eq(\r\n self.BLOCKED_SCORE).any())\r\n else:\r\n # now batch 0 and 7 die\r\n self.assertTrue(samp.topk_scores[0].eq(self.BLOCKED_SCORE))\r\n if batch_sz > 7:\r\n self.assertTrue(samp.topk_scores[7].eq(\r\n self.BLOCKED_SCORE))\r\n self.assertFalse(\r\n samp.topk_scores[1:7].eq(\r\n self.BLOCKED_SCORE).any())\r\n if batch_sz > 7:\r\n self.assertFalse(\r\n samp.topk_scores[8:].eq(\r\n self.BLOCKED_SCORE).any())\r\n\r\n def test_repeating_excluded_index_does_not_die(self):\r\n # batch 0 will repeat excluded idx, batch 1 will repeat\r\n n_words = 100\r\n repeat_idx = 47 # will be repeated and should be blocked\r\n repeat_idx_ignored = 7 # will be repeated and should not be blocked\r\n ngram_repeat = 3\r\n for batch_sz in [1, 3, 17]:\r\n samp = RandomSampling(\r\n 0, 1, 2, batch_sz, torch.device(\"cpu\"), 0, ngram_repeat,\r\n {repeat_idx_ignored}, False, 30, 1., 5,\r\n torch.randint(0, 30, (batch_sz,)))\r\n for i in range(ngram_repeat + 4):\r\n word_probs = torch.full(\r\n (batch_sz, n_words), -float('inf'))\r\n word_probs[0, repeat_idx_ignored] = 0\r\n if batch_sz > 1:\r\n word_probs[1, repeat_idx] = 0\r\n word_probs[2:, repeat_idx + i] = 0\r\n attns = torch.randn(1, batch_sz, 53)\r\n samp.advance(word_probs, attns)\r\n if i <= ngram_repeat:\r\n self.assertFalse(samp.topk_scores.eq(\r\n self.BLOCKED_SCORE).any())\r\n else:\r\n # now batch 1 dies\r\n self.assertFalse(samp.topk_scores[0].eq(\r\n self.BLOCKED_SCORE).any())\r\n if batch_sz > 1:\r\n self.assertTrue(samp.topk_scores[1].eq(\r\n self.BLOCKED_SCORE).all())\r\n self.assertFalse(samp.topk_scores[2:].eq(\r\n self.BLOCKED_SCORE).any())\r\n\r\n def test_doesnt_predict_eos_if_shorter_than_min_len(self):\r\n # batch 0 will always predict EOS. The other batches will predict\r\n # non-eos scores.\r\n for batch_sz in [1, 3]:\r\n n_words = 100\r\n _non_eos_idxs = [47]\r\n valid_score_dist = torch.log_softmax(torch.tensor(\r\n [6., 5.]), dim=0)\r\n min_length = 5\r\n eos_idx = 2\r\n lengths = torch.randint(0, 30, (batch_sz,))\r\n samp = RandomSampling(\r\n 0, 1, 2, batch_sz, torch.device(\"cpu\"), min_length,\r\n False, set(), False, 30, 1., 1, lengths)\r\n all_attns = []\r\n for i in range(min_length + 4):\r\n word_probs = torch.full(\r\n (batch_sz, n_words), -float('inf'))\r\n # \"best\" prediction is eos - that should be blocked\r\n word_probs[0, eos_idx] = valid_score_dist[0]\r\n # include at least one prediction OTHER than EOS\r\n # that is greater than -1e20\r\n word_probs[0, _non_eos_idxs[0]] = valid_score_dist[1]\r\n word_probs[1:, _non_eos_idxs[0] + i] = 0\r\n\r\n attns = torch.randn(1, batch_sz, 53)\r\n all_attns.append(attns)\r\n samp.advance(word_probs, attns)\r\n if i < min_length:\r\n self.assertTrue(\r\n samp.topk_scores[0].allclose(valid_score_dist[1]))\r\n self.assertTrue(\r\n samp.topk_scores[1:].eq(0).all())\r\n elif i == min_length:\r\n # now batch 0 has ended and no others have\r\n self.assertTrue(samp.is_finished[0, :].eq(1).all())\r\n self.assertTrue(samp.is_finished[1:, 1:].eq(0).all())\r\n else: # i > min_length\r\n break\r\n\r\n def test_returns_correct_scores_deterministic(self):\r\n for batch_sz in [1, 13]:\r\n for temp in [1., 3.]:\r\n n_words = 100\r\n _non_eos_idxs = [47, 51, 13, 88, 99]\r\n valid_score_dist_1 = torch.log_softmax(torch.tensor(\r\n [6., 5., 4., 3., 2., 1.]), dim=0)\r\n valid_score_dist_2 = torch.log_softmax(torch.tensor(\r\n [6., 1.]), dim=0)\r\n eos_idx = 2\r\n lengths = torch.randint(0, 30, (batch_sz,))\r\n samp = RandomSampling(\r\n 0, 1, 2, batch_sz, torch.device(\"cpu\"), 0,\r\n False, set(), False, 30, temp, 1, lengths)\r\n\r\n # initial step\r\n i = 0\r\n word_probs = torch.full(\r\n (batch_sz, n_words), -float('inf'))\r\n # batch 0 dies on step 0\r\n word_probs[0, eos_idx] = valid_score_dist_1[0]\r\n # include at least one prediction OTHER than EOS\r\n # that is greater than -1e20\r\n word_probs[0, _non_eos_idxs] = valid_score_dist_1[1:]\r\n word_probs[1:, _non_eos_idxs[0] + i] = 0\r\n\r\n attns = torch.randn(1, batch_sz, 53)\r\n samp.advance(word_probs, attns)\r\n self.assertTrue(samp.is_finished[0].eq(1).all())\r\n samp.update_finished()\r\n self.assertEqual(\r\n samp.scores[0], [valid_score_dist_1[0] / temp])\r\n if batch_sz == 1:\r\n self.assertTrue(samp.done)\r\n continue\r\n else:\r\n self.assertFalse(samp.done)\r\n\r\n # step 2\r\n i = 1\r\n word_probs = torch.full(\r\n (batch_sz - 1, n_words), -float('inf'))\r\n # (old) batch 8 dies on step 1\r\n word_probs[7, eos_idx] = valid_score_dist_2[0]\r\n word_probs[0:7, _non_eos_idxs[:2]] = valid_score_dist_2\r\n word_probs[8:, _non_eos_idxs[:2]] = valid_score_dist_2\r\n\r\n attns = torch.randn(1, batch_sz, 53)\r\n samp.advance(word_probs, attns)\r\n\r\n self.assertTrue(samp.is_finished[7].eq(1).all())\r\n samp.update_finished()\r\n self.assertEqual(\r\n samp.scores[8], [valid_score_dist_2[0] / temp])\r\n\r\n # step 3\r\n i = 2\r\n word_probs = torch.full(\r\n (batch_sz - 2, n_words), -float('inf'))\r\n # everything dies\r\n word_probs[:, eos_idx] = 0\r\n\r\n attns = torch.randn(1, batch_sz, 53)\r\n samp.advance(word_probs, attns)\r\n\r\n self.assertTrue(samp.is_finished.eq(1).all())\r\n samp.update_finished()\r\n for b in range(batch_sz):\r\n if b != 0 and b != 8:\r\n self.assertEqual(samp.scores[b], [0])\r\n self.assertTrue(samp.done)\r\n\r\n def test_returns_correct_scores_non_deterministic(self):\r\n for batch_sz in [1, 13]:\r\n for temp in [1., 3.]:\r\n n_words = 100\r\n _non_eos_idxs = [47, 51, 13, 88, 99]\r\n valid_score_dist_1 = torch.log_softmax(torch.tensor(\r\n [6., 5., 4., 3., 2., 1.]), dim=0)\r\n valid_score_dist_2 = torch.log_softmax(torch.tensor(\r\n [6., 1.]), dim=0)\r\n eos_idx = 2\r\n lengths = torch.randint(0, 30, (batch_sz,))\r\n samp = RandomSampling(\r\n 0, 1, 2, batch_sz, torch.device(\"cpu\"), 0,\r\n False, set(), False, 30, temp, 2, lengths)\r\n\r\n # initial step\r\n i = 0\r\n for _ in range(100):\r\n word_probs = torch.full(\r\n (batch_sz, n_words), -float('inf'))\r\n # batch 0 dies on step 0\r\n word_probs[0, eos_idx] = valid_score_dist_1[0]\r\n # include at least one prediction OTHER than EOS\r\n # that is greater than -1e20\r\n word_probs[0, _non_eos_idxs] = valid_score_dist_1[1:]\r\n word_probs[1:, _non_eos_idxs[0] + i] = 0\r\n\r\n attns = torch.randn(1, batch_sz, 53)\r\n samp.advance(word_probs, attns)\r\n if samp.is_finished[0].eq(1).all():\r\n break\r\n else:\r\n self.fail(\"Batch 0 never ended (very unlikely but maybe \"\r\n \"due to stochasticisty. If so, please increase \"\r\n \"the range of the for-loop.\")\r\n samp.update_finished()\r\n self.assertEqual(\r\n samp.scores[0], [valid_score_dist_1[0] / temp])\r\n if batch_sz == 1:\r\n self.assertTrue(samp.done)\r\n continue\r\n else:\r\n self.assertFalse(samp.done)\r\n\r\n # step 2\r\n i = 1\r\n for _ in range(100):\r\n word_probs = torch.full(\r\n (batch_sz - 1, n_words), -float('inf'))\r\n # (old) batch 8 dies on step 1\r\n word_probs[7, eos_idx] = valid_score_dist_2[0]\r\n word_probs[0:7, _non_eos_idxs[:2]] = valid_score_dist_2\r\n word_probs[8:, _non_eos_idxs[:2]] = valid_score_dist_2\r\n\r\n attns = torch.randn(1, batch_sz, 53)\r\n samp.advance(word_probs, attns)\r\n if samp.is_finished[7].eq(1).all():\r\n break\r\n else:\r\n self.fail(\"Batch 8 never ended (very unlikely but maybe \"\r\n \"due to stochasticisty. If so, please increase \"\r\n \"the range of the for-loop.\")\r\n\r\n samp.update_finished()\r\n self.assertEqual(\r\n samp.scores[8], [valid_score_dist_2[0] / temp])\r\n\r\n # step 3\r\n i = 2\r\n for _ in range(250):\r\n word_probs = torch.full(\r\n (samp.alive_seq.shape[0], n_words), -float('inf'))\r\n # everything dies\r\n word_probs[:, eos_idx] = 0\r\n\r\n attns = torch.randn(1, batch_sz, 53)\r\n samp.advance(word_probs, attns)\r\n if samp.is_finished.any():\r\n samp.update_finished()\r\n if samp.is_finished.eq(1).all():\r\n break\r\n else:\r\n self.fail(\"All batches never ended (very unlikely but \"\r\n \"maybe due to stochasticisty. If so, please \"\r\n \"increase the range of the for-loop.\")\r\n\r\n for b in range(batch_sz):\r\n if b != 0 and b != 8:\r\n self.assertEqual(samp.scores[b], [0])\r\n self.assertTrue(samp.done)\r\n" ]
[ [ "torch.randint", "torch.randn", "torch.tensor", "torch.zeros", "torch.device" ] ]
ltriess/pointnet2_keras
[ "29be56161c8c772442b85b8fda300d10ff7fe7b3" ]
[ "pointnet2/modules/feature_propagation.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"Larissa Triess\"\n__email__ = \"[email protected]\"\n\n\nfrom typing import List\n\nimport tensorflow as tf\nfrom my_tf_ops.knn_op import k_nearest_neighbor_op as get_knn\n\nfrom ..layers.sample_and_group import group\n\n\nclass FeaturePropagationModule(tf.keras.models.Model):\n \"\"\"PointNet Feature Propagation Module\n\n Arguments:\n mlp_units : List[int]\n Output units for each point-wise mlp.\n feature_norm : str\n The feature normalization to use. Can be `batch` for batch normalization\n or `layer` for layer normalization. If None, no normalization is applied.\n num_neighbors : int\n The number of neighbors to consider for interpolation. Default is 3.\n\n Raises:\n ValueError if feature normalization is not valid.\n\n \"\"\"\n\n def __init__(\n self, mlp_units: List[int], feature_norm: str = None, num_neighbors: int = 3\n ):\n super().__init__()\n\n self.K = num_neighbors\n\n if feature_norm not in {None, \"batch\", \"layer\"}:\n raise ValueError(f\"Received unknown feature norm `{feature_norm}`!\")\n\n self.mlp = tf.keras.models.Sequential(name=\"feature_propagation_mlp\")\n for unit in mlp_units:\n self.mlp.add(\n tf.keras.layers.Conv1D(unit, kernel_size=1, strides=1, padding=\"valid\")\n )\n if feature_norm == \"batch\":\n self.mlp.add(tf.keras.layers.BatchNormalization())\n elif feature_norm == \"layer\":\n self.mlp.add(tf.keras.layers.LayerNormalization())\n else:\n pass\n self.mlp.add(tf.keras.layers.LeakyReLU())\n\n def get_neighbor_indices_and_distances(\n self, points_hr, points_lr, epsilon: float = 1e-10\n ) -> (tf.Tensor, tf.Tensor):\n \"\"\"Computes the indices and distances to the K nearest neighbors.\n\n We could use the knn op directly to get the squared distances with\n ```python\n indices, distances = tf.map_fn(\n lambda x: list(get_knn(x[0], x[1], self.K)),\n [points_lr, points_hr],\n dtype=[tf.int32, tf.float32],\n )\n ```\n But then the gradient propagation might in some cases not work properly.\n Therefore, we only get the indices from the op and subsequently re-compute\n the distances.\n\n Arguments:\n points_hr : tf.Tensor(shape=(B, M[i-1], 3), dtype=tf.float32)\n points_lr : tf.Tensor(shape=(B, M[i], 3), dtype=tf.float32)\n epsilon : float\n\n Returns:\n indices : tf.Tensor(shape=(B, M[i-1], K), dtype=tf.int32)\n distances : tf.Tensor(shape=(B, M[i-1], K), dtype=tf.float32)\n \"\"\"\n\n # indices: (B, M[i-1], K)\n indices = tf.map_fn(\n lambda x: get_knn(x[0], x[1], self.K)[0], # only return the indices\n [points_lr, points_hr], # points, queries\n dtype=tf.int32,\n )\n\n # points_lr (B, M[i], 3) (+) indices (B, M[i-1], K) --> (B, M[i-1], K, 3)\n grouped_points = group(points_lr, indices)\n\n # Compute the distances: sqrt[(x - x')^2 + (y - y')^2 + (z + z')^2]\n diff = points_hr[:, :, tf.newaxis, :] - grouped_points # (B, M[i-1], K, 3)\n distances = tf.norm(diff, axis=-1) # (B, M[i-1], K)\n\n distances = tf.maximum(distances, epsilon) # avoid diving by zero afterwards\n\n return indices, distances\n\n def call(\n self,\n inputs: List[tf.Tensor],\n training: tf.Tensor = None,\n mask: tf.Tensor = None,\n ):\n \"\"\"Call of PointNet Feature Propagation\n\n Arguments:\n inputs : List[tf.Tensor] of length 4\n Must contain the following tensors:\n [0] - tf.Tensor(shape=(B, M[i-1], 3), dtype=tf.float32)\n xyz-points at level i-1\n [1] - tf.Tensor(shape=(B, M[i], 3), dtype=tf.float32)\n xyz-points at level i\n [2] - tf.Tensor(shape=(B, M[i-1], C[i-1]), dtype=tf.float32)\n features at level i-1 (can be None)\n [3] - tf.Tensor(shape=(B, M[i], C[i]), dtype=tf.float32)\n features at level i\n M[x] are the number of points or neighbor hoods at abstraction level x\n with M[x] < M[x-1] (level i is sparser than level i-1).\n training: tf.Tensor(shape=(), dtype=tf.bool)\n mask: tf.Tensor\n\n Returns:\n tf.Tensor(shape=(B, M[i-1], mlp[-1]), dtype=tf.float32)\n \"\"\"\n\n if not isinstance(inputs, list):\n raise ValueError(\"Inputs must be a list of tensors!\")\n if not len(inputs) == 4:\n raise ValueError(\n \"Feature propagation module must be called with a list of four tensors. \"\n \"See documentation!\"\n )\n\n points_hr = inputs[0] # (B, M[i-1], 3)\n points_lr = inputs[1] # (B, M[i], 3)\n features_hr = inputs[2] # (B, M[i-1], C[i-1]) or None\n features_lr = inputs[3] # (B, M[i], C[i])\n\n indices, distances = self.get_neighbor_indices_and_distances(\n points_hr=points_hr, points_lr=points_lr\n ) # 2x(B, M[i-1], K) with K=3\n\n # Compute the weighting factor for each neighbor.\n distances_inv = tf.divide(1.0, distances) # (B, M[i-1], K)\n weight = distances_inv / tf.reduce_sum(distances_inv, axis=-1, keepdims=True)\n\n check = tf.compat.v1.assert_equal(\n tf.shape(points_lr)[1],\n tf.shape(features_lr)[1],\n message=\"Number of points and number of features does not match!\",\n )\n with tf.control_dependencies([check]):\n # Gather three features from points_lr to match one group in points_hr.\n # (B, M[i], C) and (B, M[i-1], K) --> (B, M[i-1], K, C[i])\n grouped_features = group(features_lr, indices)\n # Interpolate the feature from the K neighbors.\n # Weighted sum over K reduces dimension to (B, M[i-1], C[i]).\n interpolated_features = tf.reduce_sum(\n grouped_features * weight[..., tf.newaxis], axis=2\n )\n\n if features_hr is not None:\n # Concatenate original and interpolated features to (B, M[i-1], C[i]+C[i-1]).\n interpolated_features = tf.concat(\n [interpolated_features, features_hr], axis=-1\n )\n\n # Compute new features from interpolations.\n processed_features = self.mlp(\n interpolated_features, training=training, mask=mask\n ) # (B, M[i-1], mlp[-1])\n return processed_features\n" ]
[ [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.norm", "tensorflow.keras.models.Sequential", "tensorflow.divide", "tensorflow.shape", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.layers.LeakyReLU", "tensorflow.keras.layers.BatchNormalization", "tensorflow.concat", "tensorflow.reduce_sum", "tensorflow.maximum", "tensorflow.control_dependencies" ] ]
ajyl/MIME
[ "7c34f3ae6dc8f8b9e6fb89b5bfa016fbaa445018" ]
[ "model/complex_res_gate.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass ComplexResGate(nn.Module):\n def __init__(self, embedding_size):\n super(ComplexResGate, self).__init__()\n self.fc1 = nn.Linear(2*embedding_size, 2*embedding_size)\n self.fc2 = nn.Linear(2*embedding_size, embedding_size)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, m, m_tild):\n m_concat = torch.cat((m, m_tild), dim=2)\n x = self.fc1(m_concat)\n z = self.sigmoid(x)\n y = self.fc2(z * m_concat)\n# return y, None, None\n return y\n" ]
[ [ "torch.nn.Sigmoid", "torch.nn.Linear", "torch.cat" ] ]
rohitsanjay/radial_rl
[ "5daa30ec57319db8d0dd6bee10cf0f41832ef0f3" ]
[ "DQN/ibp.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef initial_bounds(x0, epsilon):\n '''\n x0 = input, b x c x h x w\n '''\n upper = x0+epsilon\n lower = x0-epsilon\n return upper, lower\n\ndef weighted_bound(layer, prev_upper, prev_lower):\n prev_mu = (prev_upper + prev_lower)/2\n prev_r = (prev_upper - prev_lower)/2\n mu = layer(prev_mu)\n if type(layer)==nn.Linear:\n r = F.linear(prev_r, torch.abs(layer.weight))\n elif type(layer)==nn.Conv2d:\n r = F.conv2d(prev_r, torch.abs(layer.weight), stride=layer.stride, padding=layer.padding)\n \n upper = mu + r\n lower = mu - r\n return upper, lower\n\ndef activation_bound(layer, prev_upper, prev_lower):\n upper = layer(prev_upper)\n lower = layer(prev_lower)\n return upper, lower\n\ndef network_bounds(model, x0, epsilon):\n '''\n get inteval bound progation upper and lower bounds for the actiavtion of a model\n \n model: a nn.Sequential module\n x0: input, b x input_shape\n epsilon: float, the linf distance bound is calculated over\n '''\n upper, lower = initial_bounds(x0, epsilon)\n for layer in model.modules():\n if type(layer) in (nn.Sequential,):\n pass\n elif type(layer) in (nn.ReLU, nn.Sigmoid, nn.Tanh, nn.MaxPool2d, nn.Flatten):\n upper, lower = activation_bound(layer, upper, lower)\n elif type(layer) in (nn.Linear, nn.Conv2d):\n upper, lower = weighted_bound(layer, upper, lower)\n else:\n print('Unsupported layer:', type(layer))\n return upper, lower\n" ]
[ [ "torch.abs" ] ]
dn070017/GenEpi
[ "e6ee35e0b024408b80b75c25dd0b63c77a6e0339" ]
[ "genepi/tools/randomized_l1.py" ]
[ "\"\"\"\nNote:\nThis script is imported from scikit-learn 0.20.X,\nbecause scikit-learn 0.21.0 is no longer supported these functions\n\"\"\"\n\n\"\"\"\nRandomized Lasso/Logistic: feature selection based on Lasso and\nsparse Logistic Regression\n\"\"\"\n\n# Author: Gael Varoquaux, Alexandre Gramfort\n#\n# License: BSD 3 clause\n\nimport itertools\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nfrom scipy.sparse import issparse\nfrom scipy import sparse\nfrom scipy.interpolate import interp1d\n\nfrom sklearn.linear_model.base import _preprocess_data\nfrom sklearn.base import BaseEstimator\nfrom sklearn.utils import Memory, Parallel, delayed\nfrom sklearn.feature_selection.base import SelectorMixin\nfrom sklearn.utils import (as_float_array, check_random_state, check_X_y, safe_mask, deprecated)\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.linear_model.least_angle import lars_path, LassoLarsIC\nfrom sklearn.linear_model.logistic import LogisticRegression\nfrom sklearn.exceptions import ConvergenceWarning\n\nfrom . import six\n\nimport warnings\nwarnings.filterwarnings('ignore')\n# ignore all future warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n###############################################################################\n# Randomized linear model: feature selection\n\ndef _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,\n n_jobs=None, verbose=False, pre_dispatch='3*n_jobs',\n random_state=None, sample_fraction=.75, **params):\n random_state = check_random_state(random_state)\n # We are generating 1 - weights, and not weights\n n_samples, n_features = X.shape\n\n if not (0 < scaling < 1):\n raise ValueError(\n \"'scaling' should be between 0 and 1. Got %r instead.\" % scaling)\n\n scaling = 1. - scaling\n scores_ = 0.0\n for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,\n pre_dispatch=pre_dispatch)(\n delayed(estimator_func)(\n X, y, weights=scaling * random_state.randint(\n 0, 2, size=(n_features,)),\n mask=(random_state.rand(n_samples) < sample_fraction),\n verbose=max(0, verbose - 1),\n **params)\n for _ in range(n_resampling)):\n scores_ += active_set\n\n scores_ /= n_resampling\n return scores_\n\n\n@deprecated(\"The class BaseRandomizedLinearModel is deprecated in 0.19\"\n \" and will be removed in 0.21.\")\nclass BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,\n SelectorMixin)):\n \"\"\"Base class to implement randomized linear models for feature selection\n This implements the strategy by Meinshausen and Buhlman:\n stability selection with randomized sampling, and random re-weighting of\n the penalty.\n \"\"\"\n\n @abstractmethod\n def __init__(self):\n pass\n\n _preprocess_data = staticmethod(_preprocess_data)\n\n def fit(self, X, y):\n \"\"\"Fit the model using X, y as training data.\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training data.\n y : array-like, shape = [n_samples]\n Target values. Will be cast to X's dtype if necessary\n Returns\n -------\n self : object\n Returns an instance of self.\n \"\"\"\n X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,\n ensure_min_samples=2, estimator=self)\n X = as_float_array(X, copy=False)\n n_samples, n_features = X.shape\n\n X, y, X_offset, y_offset, X_scale = \\\n self._preprocess_data(X, y, self.fit_intercept, self.normalize)\n\n estimator_func, params = self._make_estimator_and_params(X, y)\n memory = self.memory\n if memory is None:\n memory = Memory(cachedir=None, verbose=0)\n elif isinstance(memory, six.string_types):\n memory = Memory(cachedir=memory, verbose=0)\n elif not isinstance(memory, Memory):\n raise ValueError(\"'memory' should either be a string or\"\n \" a sklearn.utils.Memory\"\n \" instance, got 'memory={!r}' instead.\".format(\n type(memory)))\n\n scores_ = memory.cache(\n _resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']\n )(\n estimator_func, X, y,\n scaling=self.scaling, n_resampling=self.n_resampling,\n n_jobs=self.n_jobs, verbose=self.verbose,\n pre_dispatch=self.pre_dispatch, random_state=self.random_state,\n sample_fraction=self.sample_fraction, **params)\n\n if scores_.ndim == 1:\n scores_ = scores_[:, np.newaxis]\n self.all_scores_ = scores_\n self.scores_ = np.max(self.all_scores_, axis=1)\n return self\n\n def _make_estimator_and_params(self, X, y):\n \"\"\"Return the parameters passed to the estimator\"\"\"\n raise NotImplementedError\n\n def _get_support_mask(self):\n \"\"\"Get the boolean mask indicating which features are selected.\n Returns\n -------\n support : boolean array of shape [# input features]\n An element is True iff its corresponding feature is selected\n for retention.\n \"\"\"\n check_is_fitted(self, 'scores_')\n return self.scores_ > self.selection_threshold\n\n\n###############################################################################\n# Randomized lasso: regression settings\n\ndef _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,\n precompute=False, eps=np.finfo(np.float).eps,\n max_iter=500):\n X = X[safe_mask(X, mask)]\n y = y[mask]\n\n # Center X and y to avoid fit the intercept\n X -= X.mean(axis=0)\n y -= y.mean()\n\n alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64))\n\n X = (1 - weights) * X\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', ConvergenceWarning)\n alphas_, _, coef_ = lars_path(X, y,\n Gram=precompute, copy_X=False,\n copy_Gram=False, alpha_min=np.min(alpha),\n method='lasso', verbose=verbose,\n max_iter=max_iter, eps=eps)\n\n if len(alpha) > 1:\n if len(alphas_) > 1: # np.min(alpha) < alpha_min\n interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],\n bounds_error=False, fill_value=0.)\n scores = (interpolator(alpha) != 0.0)\n else:\n scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)\n else:\n scores = coef_[:, -1] != 0.0\n return scores\n\n\n@deprecated(\"The class RandomizedLasso is deprecated in 0.19\"\n \" and will be removed in 0.21.\")\nclass RandomizedLasso(BaseRandomizedLinearModel):\n \"\"\"Randomized Lasso.\n Randomized Lasso works by subsampling the training data and\n computing a Lasso estimate where the penalty of a random subset of\n coefficients has been scaled. By performing this double\n randomization several times, the method assigns high scores to\n features that are repeatedly selected across randomizations. This\n is known as stability selection. In short, features selected more\n often are considered good features.\n Parameters\n ----------\n alpha : float, 'aic', or 'bic', optional\n The regularization parameter alpha parameter in the Lasso.\n Warning: this is not the alpha parameter in the stability selection\n article which is scaling.\n scaling : float, optional\n The s parameter used to randomly scale the penalty of different\n features.\n Should be between 0 and 1.\n sample_fraction : float, optional\n The fraction of samples to be used in each randomized design.\n Should be between 0 and 1. If 1, all samples are used.\n n_resampling : int, optional\n Number of randomized models.\n selection_threshold : float, optional\n The score above which features should be selected.\n fit_intercept : boolean, optional\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n verbose : boolean or integer, optional\n Sets the verbosity amount\n normalize : boolean, optional, default True\n If True, the regressors X will be normalized before regression.\n This parameter is ignored when `fit_intercept` is set to False.\n When the regressors are normalized, note that this makes the\n hyperparameters learned more robust and almost independent of\n the number of samples. The same property is not valid for\n standardized data. However, if you wish to standardize, please\n use `preprocessing.StandardScaler` before calling `fit` on an\n estimator with `normalize=False`.\n precompute : True | False | 'auto' | array-like\n Whether to use a precomputed Gram matrix to speed up calculations.\n If set to 'auto' let us decide.\n The Gram matrix can also be passed as argument, but it will be used\n only for the selection of parameter alpha, if alpha is 'aic' or 'bic'.\n max_iter : integer, optional\n Maximum number of iterations to perform in the Lars algorithm.\n eps : float, optional\n The machine-precision regularization in the computation of the\n Cholesky diagonal factors. Increase this for very ill-conditioned\n systems. Unlike the 'tol' parameter in some iterative\n optimization-based algorithms, this parameter does not control\n the tolerance of the optimization.\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n n_jobs : int or None, optional (default=None)\n Number of CPUs to use during the resampling.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n pre_dispatch : int, or string, optional\n Controls the number of jobs that get dispatched during parallel\n execution. Reducing this number can be useful to avoid an\n explosion of memory consumption when more jobs get dispatched\n than CPUs can process. This parameter can be:\n - None, in which case all the jobs are immediately\n created and spawned. Use this for lightweight and\n fast-running jobs, to avoid delays due to on-demand\n spawning of the jobs\n - An int, giving the exact number of total jobs that are\n spawned\n - A string, giving an expression as a function of n_jobs,\n as in '2*n_jobs'\n memory : None, str or object with the joblib.Memory interface, optional \\\n (default=None)\n Used for internal caching. By default, no caching is done.\n If a string is given, it is the path to the caching directory.\n Attributes\n ----------\n scores_ : array, shape = [n_features]\n Feature scores between 0 and 1.\n all_scores_ : array, shape = [n_features, n_reg_parameter]\n Feature scores between 0 and 1 for all values of the regularization \\\n parameter. The reference article suggests ``scores_`` is the max of \\\n ``all_scores_``.\n Examples\n --------\n >>> from sklearn.linear_model import RandomizedLasso\n >>> randomized_lasso = RandomizedLasso() # doctest: +SKIP\n References\n ----------\n Stability selection\n Nicolai Meinshausen, Peter Buhlmann\n Journal of the Royal Statistical Society: Series B\n Volume 72, Issue 4, pages 417-473, September 2010\n DOI: 10.1111/j.1467-9868.2010.00740.x\n See also\n --------\n RandomizedLogisticRegression, Lasso, ElasticNet\n \"\"\"\n def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,\n n_resampling=200, selection_threshold=.25,\n fit_intercept=True, verbose=False,\n normalize=True, precompute='auto',\n max_iter=500,\n eps=np.finfo(np.float).eps, random_state=None,\n n_jobs=None, pre_dispatch='3*n_jobs',\n memory=None):\n self.alpha = alpha\n self.scaling = scaling\n self.sample_fraction = sample_fraction\n self.n_resampling = n_resampling\n self.fit_intercept = fit_intercept\n self.max_iter = max_iter\n self.verbose = verbose\n self.normalize = normalize\n self.precompute = precompute\n self.eps = eps\n self.random_state = random_state\n self.n_jobs = n_jobs\n self.selection_threshold = selection_threshold\n self.pre_dispatch = pre_dispatch\n self.memory = memory\n\n def _make_estimator_and_params(self, X, y):\n alpha = self.alpha\n if isinstance(alpha, six.string_types) and alpha in ('aic', 'bic'):\n model = LassoLarsIC(precompute=self.precompute,\n criterion=self.alpha,\n max_iter=self.max_iter,\n eps=self.eps)\n model.fit(X, y)\n self.alpha_ = alpha = model.alpha_\n\n precompute = self.precompute\n # A precomputed Gram array is useless, since _randomized_lasso\n # change X a each iteration\n if hasattr(precompute, '__array__'):\n precompute = 'auto'\n assert precompute in (True, False, None, 'auto')\n return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,\n eps=self.eps,\n precompute=precompute)\n\n\n###############################################################################\n# Randomized logistic: classification settings\n\ndef _randomized_logistic(X, y, weights, mask, C=1., verbose=False,\n fit_intercept=True, tol=1e-3):\n X = X[safe_mask(X, mask)]\n y = y[mask]\n if issparse(X):\n size = len(weights)\n weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))\n X = X * weight_dia\n else:\n X *= (1 - weights)\n\n C = np.atleast_1d(np.asarray(C, dtype=np.float64))\n if C.ndim > 1:\n raise ValueError(\"C should be 1-dimensional array-like, \"\n \"but got a {}-dimensional array-like instead: {}.\"\n .format(C.ndim, C))\n\n scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)\n\n for this_C, this_scores in zip(C, scores.T):\n # XXX : would be great to do it with a warm_start ...\n clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,\n fit_intercept=fit_intercept,\n solver='liblinear', multi_class='ovr')\n clf.fit(X, y)\n this_scores[:] = np.any(\n np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)\n return scores\n\n\n@deprecated(\"The class RandomizedLogisticRegression is deprecated in 0.19\"\n \" and will be removed in 0.21.\")\nclass RandomizedLogisticRegression(BaseRandomizedLinearModel):\n \"\"\"Randomized Logistic Regression\n Randomized Logistic Regression works by subsampling the training\n data and fitting a L1-penalized LogisticRegression model where the\n penalty of a random subset of coefficients has been scaled. By\n performing this double randomization several times, the method\n assigns high scores to features that are repeatedly selected across\n randomizations. This is known as stability selection. In short,\n features selected more often are considered good features.\n Parameters\n ----------\n C : float or array-like of shape [n_reg_parameter], optional, default=1\n The regularization parameter C in the LogisticRegression.\n When C is an array, fit will take each regularization parameter in C\n one by one for LogisticRegression and store results for each one\n in ``all_scores_``, where columns and rows represent corresponding\n reg_parameters and features.\n scaling : float, optional, default=0.5\n The s parameter used to randomly scale the penalty of different\n features.\n Should be between 0 and 1.\n sample_fraction : float, optional, default=0.75\n The fraction of samples to be used in each randomized design.\n Should be between 0 and 1. If 1, all samples are used.\n n_resampling : int, optional, default=200\n Number of randomized models.\n selection_threshold : float, optional, default=0.25\n The score above which features should be selected.\n tol : float, optional, default=1e-3\n tolerance for stopping criteria of LogisticRegression\n fit_intercept : boolean, optional, default=True\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n verbose : boolean or integer, optional\n Sets the verbosity amount\n normalize : boolean, optional, default True\n If True, the regressors X will be normalized before regression.\n This parameter is ignored when `fit_intercept` is set to False.\n When the regressors are normalized, note that this makes the\n hyperparameters learnt more robust and almost independent of the number\n of samples. The same property is not valid for standardized data.\n However, if you wish to standardize, please use\n `preprocessing.StandardScaler` before calling `fit` on an estimator\n with `normalize=False`.\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n If None, the random number generator is the RandomState instance used\n by `np.random`.\n n_jobs : int or None, optional (default=None)\n Number of CPUs to use during the resampling.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n pre_dispatch : int, or string, optional\n Controls the number of jobs that get dispatched during parallel\n execution. Reducing this number can be useful to avoid an\n explosion of memory consumption when more jobs get dispatched\n than CPUs can process. This parameter can be:\n - None, in which case all the jobs are immediately\n created and spawned. Use this for lightweight and\n fast-running jobs, to avoid delays due to on-demand\n spawning of the jobs\n - An int, giving the exact number of total jobs that are\n spawned\n - A string, giving an expression as a function of n_jobs,\n as in '2*n_jobs'\n memory : None, str or object with the joblib.Memory interface, optional \\\n (default=None)\n Used for internal caching. By default, no caching is done.\n If a string is given, it is the path to the caching directory.\n Attributes\n ----------\n scores_ : array, shape = [n_features]\n Feature scores between 0 and 1.\n all_scores_ : array, shape = [n_features, n_reg_parameter]\n Feature scores between 0 and 1 for all values of the regularization \\\n parameter. The reference article suggests ``scores_`` is the max \\\n of ``all_scores_``.\n Examples\n --------\n >>> from sklearn.linear_model import RandomizedLogisticRegression\n >>> randomized_logistic = RandomizedLogisticRegression() # doctest: +SKIP\n References\n ----------\n Stability selection\n Nicolai Meinshausen, Peter Buhlmann\n Journal of the Royal Statistical Society: Series B\n Volume 72, Issue 4, pages 417-473, September 2010\n DOI: 10.1111/j.1467-9868.2010.00740.x\n See also\n --------\n RandomizedLasso, LogisticRegression\n \"\"\"\n def __init__(self, C=1, scaling=.5, sample_fraction=.75,\n n_resampling=200,\n selection_threshold=.25, tol=1e-3,\n fit_intercept=True, verbose=False,\n normalize=True,\n random_state=None,\n n_jobs=None, pre_dispatch='3*n_jobs',\n memory=None):\n self.C = C\n self.scaling = scaling\n self.sample_fraction = sample_fraction\n self.n_resampling = n_resampling\n self.fit_intercept = fit_intercept\n self.verbose = verbose\n self.normalize = normalize\n self.tol = tol\n self.random_state = random_state\n self.n_jobs = n_jobs\n self.selection_threshold = selection_threshold\n self.pre_dispatch = pre_dispatch\n self.memory = memory\n\n def _make_estimator_and_params(self, X, y):\n params = dict(C=self.C, tol=self.tol,\n fit_intercept=self.fit_intercept)\n return _randomized_logistic, params\n\n def _preprocess_data(self, X, y, fit_intercept, normalize=False):\n \"\"\"Center the data in X but not in y\"\"\"\n X, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,\n normalize=normalize)\n return X, y, X_offset, y, X_scale\n\n\n###############################################################################\n# Stability paths\ndef _lasso_stability_path(X, y, mask, weights, eps):\n \"Inner loop of lasso_stability_path\"\n X = X * weights[np.newaxis, :]\n X = X[safe_mask(X, mask), :]\n y = y[mask]\n\n alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]\n alpha_min = eps * alpha_max # set for early stopping in path\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', ConvergenceWarning)\n alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,\n alpha_min=alpha_min)\n # Scale alpha by alpha_max\n alphas /= alphas[0]\n # Sort alphas in ascending order\n alphas = alphas[::-1]\n coefs = coefs[:, ::-1]\n # Get rid of the alphas that are too small\n mask = alphas >= eps\n # We also want to keep the first one: it should be close to the OLS\n # solution\n mask[0] = True\n alphas = alphas[mask]\n coefs = coefs[:, mask]\n return alphas, coefs\n\n\n@deprecated(\"The function lasso_stability_path is deprecated in 0.19\"\n \" and will be removed in 0.21.\")\ndef lasso_stability_path(X, y, scaling=0.5, random_state=None,\n n_resampling=200, n_grid=100,\n sample_fraction=0.75,\n eps=4 * np.finfo(np.float).eps, n_jobs=None,\n verbose=False):\n \"\"\"Stability path based on randomized Lasso estimates\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n training data.\n y : array-like, shape = [n_samples]\n target values.\n scaling : float, optional, default=0.5\n The alpha parameter in the stability selection article used to\n randomly scale the features. Should be between 0 and 1.\n random_state : int, RandomState instance or None, optional, default=None\n The generator used to randomize the design. If int, random_state is\n the seed used by the random number generator; If RandomState instance,\n random_state is the random number generator; If None, the random number\n generator is the RandomState instance used by `np.random`.\n n_resampling : int, optional, default=200\n Number of randomized models.\n n_grid : int, optional, default=100\n Number of grid points. The path is linearly reinterpolated\n on a grid between 0 and 1 before computing the scores.\n sample_fraction : float, optional, default=0.75\n The fraction of samples to be used in each randomized design.\n Should be between 0 and 1. If 1, all samples are used.\n eps : float, optional\n Smallest value of alpha / alpha_max considered\n n_jobs : int or None, optional (default=None)\n Number of CPUs to use during the resampling.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary <n_jobs>`\n for more details.\n verbose : boolean or integer, optional\n Sets the verbosity amount\n Returns\n -------\n alphas_grid : array, shape ~ [n_grid]\n The grid points between 0 and 1: alpha/alpha_max\n scores_path : array, shape = [n_features, n_grid]\n The scores for each feature along the path.\n \"\"\"\n X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'])\n rng = check_random_state(random_state)\n\n if not (0 < scaling < 1):\n raise ValueError(\"Parameter 'scaling' should be between 0 and 1.\"\n \" Got %r instead.\" % scaling)\n\n n_samples, n_features = X.shape\n\n paths = Parallel(n_jobs=n_jobs, verbose=verbose)(\n delayed(_lasso_stability_path)(\n X, y, mask=rng.rand(n_samples) < sample_fraction,\n weights=1. - scaling * rng.randint(0, 2, size=(n_features,)),\n eps=eps)\n for k in range(n_resampling))\n\n all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))\n # Take approximately n_grid values\n stride = int(max(1, int(len(all_alphas) / float(n_grid))))\n all_alphas = all_alphas[::stride]\n if not all_alphas[-1] == 1:\n all_alphas.append(1.)\n all_alphas = np.array(all_alphas)\n scores_path = np.zeros((n_features, len(all_alphas)))\n\n for alphas, coefs in paths:\n if alphas[0] != 0:\n alphas = np.r_[0, alphas]\n coefs = np.c_[np.ones((n_features, 1)), coefs]\n if alphas[-1] != all_alphas[-1]:\n alphas = np.r_[alphas, all_alphas[-1]]\n coefs = np.c_[coefs, np.zeros((n_features, 1))]\n scores_path += (interp1d(alphas, coefs,\n kind='nearest', bounds_error=False,\n fill_value=0, axis=-1)(all_alphas) != 0)\n\n scores_path /= n_resampling\n return all_alphas, scores_path" ]
[ [ "sklearn.utils.validation.check_is_fitted", "numpy.ones", "sklearn.utils.as_float_array", "scipy.interpolate.interp1d", "numpy.asarray", "sklearn.utils.deprecated", "scipy.sparse.dia_matrix", "sklearn.utils.safe_mask", "sklearn.linear_model.logistic.LogisticRegression", "numpy.abs", "sklearn.linear_model.least_angle.lars_path", "sklearn.utils.check_random_state", "numpy.zeros", "sklearn.utils.Parallel", "numpy.max", "numpy.min", "sklearn.utils.Memory", "numpy.finfo", "sklearn.linear_model.least_angle.LassoLarsIC", "scipy.sparse.issparse", "sklearn.utils.delayed", "numpy.array", "sklearn.utils.check_X_y", "numpy.dot" ] ]
tdye24/LightningFL
[ "48bb4a452082411e051cdb3a2e98ede6bbc91bbf" ]
[ "models/fedsp/mnist/MNIST.py" ]
[ "import torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass MNIST(nn.Module):\r\n def __init__(self):\r\n super(MNIST, self).__init__()\r\n self.shared_encoder = torch.nn.Sequential(\r\n nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(2, 2),\r\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(2, 2),\r\n nn.Flatten()\r\n )\r\n\r\n self.private_encoder = torch.nn.Sequential(\r\n nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(2, 2),\r\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(2, 2),\r\n nn.Flatten()\r\n )\r\n\r\n self.clf = torch.nn.Sequential(\r\n torch.nn.Linear(64*7*7*2, 512), # 乘2因为global_feat和local_feat拼在一起\r\n torch.nn.ReLU(inplace=True),\r\n torch.nn.Linear(512, 10)\r\n )\r\n\r\n def forward(self, x):\r\n gFeature = self.shared_encoder(x)\r\n lFeature = self.private_encoder(x)\r\n\r\n feature = torch.cat((gFeature, lFeature), dim=-1)\r\n output = self.clf(feature)\r\n return output\r\n\r\n\r\nif __name__ == '__main__':\r\n model = MNIST()\r\n _x = torch.rand((50, 1, 28, 28))\r\n _output = model(_x)\r\n print(f'{_x.shape}->{_output.shape}')\r\n print(\"Parameters in total {}\".format(sum(x.numel() for x in model.parameters())))\r\n\r\n print(\"Comm.\")\r\n total = 0\r\n for key, param in model.named_parameters():\r\n if key.startswith('shared'):\r\n total += param.numel()\r\n print(\"Comm. Parameters {}\".format(total))" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.Flatten", "torch.rand", "torch.nn.Conv2d", "torch.nn.ReLU", "torch.cat" ] ]
fchapoton/cars
[ "c145e12c8b984d5c496c29cff474628044f6216e" ]
[ "cars/steps/rasterization.py" ]
[ "#!/usr/bin/env python\n# coding: utf8\n#\n# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).\n#\n# This file is part of CARS\n# (see https://github.com/CNES/cars).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nThis module is reponsible for the rasterization step:\n- it contains all functions related to 3D representation on a 2D raster grid\nTODO: refactor in several files and remove too-many-lines\n\"\"\"\n# pylint: disable=too-many-lines\n\n# Standard imports\nimport logging\nimport math\nimport time\nimport warnings\nfrom typing import List, Tuple, Union\n\n# Third party imports\nimport numpy as np\nimport pandas\nimport xarray as xr\nfrom numba import boolean, float64, int64, njit\nfrom numba.core.errors import NumbaPerformanceWarning\nfrom osgeo import osr\nfrom scipy.spatial import cKDTree # pylint: disable=no-name-in-module\n\n# CARS imports\nfrom cars.core import constants as cst\nfrom cars.core import projection\nfrom cars.steps import points_cloud\n\nwarnings.filterwarnings(\"ignore\", category=NumbaPerformanceWarning)\n\n\ndef compute_xy_starts_and_sizes(\n resolution: float, cloud: pandas.DataFrame\n) -> Tuple[float, float, int, int]:\n \"\"\"\n Compute xstart, ystart, xsize and ysize\n of the rasterization grid from a set of points\n\n :param resolution: Resolution of rasterized cells,\n expressed in cloud CRS units\n :param cloud: set of points as returned\n by the create_combined_cloud function\n :return: a tuple (xstart, ystart, xsize, ysize)\n \"\"\"\n worker_logger = logging.getLogger(\"distributed.worker\")\n\n # Derive xstart\n xmin = np.nanmin(cloud[cst.X].values)\n xmax = np.nanmax(cloud[cst.X].values)\n worker_logger.debug(\"Points x coordinate range: [{},{}]\".format(xmin, xmax))\n\n # Clamp to a regular grid\n x_start = np.floor(xmin / resolution) * resolution\n x_size = int(1 + np.floor((xmax - x_start) / resolution))\n\n # Derive ystart\n ymin = np.nanmin(cloud[cst.Y].values)\n ymax = np.nanmax(cloud[cst.Y].values)\n worker_logger.debug(\"Points y coordinate range: [{},{}]\".format(ymin, ymax))\n\n # Clamp to a regular grid\n y_start = np.ceil(ymax / resolution) * resolution\n y_size = int(1 + np.floor((y_start - ymin) / resolution))\n\n return x_start, y_start, x_size, y_size\n\n\ndef simple_rasterization_dataset(\n cloud_list: List[xr.Dataset],\n resolution: float,\n epsg: int,\n color_list: List[xr.Dataset] = None,\n xstart: float = None,\n ystart: float = None,\n xsize: int = None,\n ysize: int = None,\n sigma: float = None,\n radius: int = 1,\n margin: int = 0,\n dsm_no_data: int = np.nan,\n color_no_data: int = np.nan,\n msk_no_data: int = 65535,\n grid_points_division_factor: int = None,\n small_cpn_filter_params: Union[\n None, points_cloud.SmallComponentsFilterParams\n ] = None,\n statistical_filter_params: Union[\n None, points_cloud.StatisticalFilterParams\n ] = None,\n dump_filter_cloud: bool = False,\n) -> Union[xr.Dataset, Tuple[xr.Dataset, pandas.DataFrame]]:\n \"\"\"\n Wrapper of simple_rasterization\n that has xarray.Dataset as inputs and outputs.\n\n :param cloud_list: list of cloud points to rasterize\n :param resolution: Resolution of rasterized cells,\n expressed in cloud CRS units or None\n :param epsg: epsg code for the CRS of the final raster\n :param color_list: Additional list of images\n with bands to rasterize (same size as cloud_list), or None\n :param xstart: xstart of the rasterization grid\n (if None, will be estimated by the function)\n :param ystart: ystart of the rasterization grid\n (if None, will be estimated by the function)\n :param xsize: xsize of the rasterization grid\n (if None, will be estimated by the function)\n :param ysize: ysize of the rasterization grid\n (if None, will be estimated by the function)\n :param sigma: sigma for gaussian interpolation.\n (If None, set to resolution)\n :param radius: Radius for hole filling.\n :param margin: Margin used to invalidate cells too close to epipolar border.\n Can only be used if input lists are of size 1.\n :param dsm_no_data: no data value to use in the final raster\n :param color_no_data: no data value to use in the final colored raster\n :param msk_no_data: no data value to use in the final mask image\n :param grid_points_division_factor: number of blocs to use to divide\n the grid points (memory optimization, reduce the highest memory peak).\n If it is not set, the factor is automatically set to construct\n 700000 points blocs.\n :param small_cpn_filter_params: small component points_cloud parameters\n :param statistical_filter_params: statistical points_cloud parameters\n :param dump_filter_cloud: activate to dump filtered cloud\n alongside rasterized cloud and color\n :return: Rasterized cloud and Color\n (in a tuple with the filtered cloud if dump_filter_cloud is activated)\n \"\"\"\n\n if small_cpn_filter_params is None:\n on_ground_margin = 0\n else:\n on_ground_margin = small_cpn_filter_params.on_ground_margin\n\n # combined clouds\n roi = (\n resolution is not None\n and xstart is not None\n and ystart is not None\n and xsize is not None\n and ysize is not None\n )\n cloud, cloud_epsg = points_cloud.create_combined_cloud(\n cloud_list,\n epsg,\n resolution=resolution,\n xstart=xstart,\n ystart=ystart,\n xsize=xsize,\n ysize=ysize,\n color_list=color_list,\n on_ground_margin=on_ground_margin,\n epipolar_border_margin=margin,\n radius=radius,\n with_coords=True,\n )\n\n # filter combined cloud\n if small_cpn_filter_params is not None:\n worker_logger = logging.getLogger(\"distributed.worker\")\n\n spatial_ref = osr.SpatialReference()\n spatial_ref.ImportFromEPSG(cloud_epsg)\n if spatial_ref.IsGeographic():\n worker_logger.warning(\n \"The points cloud to filter is not in a cartographic system. \"\n \"The filter's default parameters might not be adapted \"\n \"to this referential. Convert the points \"\n \"cloud to ECEF to ensure a proper points_cloud.\"\n )\n tic = time.process_time()\n cloud, filtered_elt_pos_infos = points_cloud.small_components_filtering(\n cloud,\n small_cpn_filter_params.connection_val,\n small_cpn_filter_params.nb_pts_threshold,\n small_cpn_filter_params.clusters_distance_threshold,\n filtered_elt_pos=small_cpn_filter_params.filtered_elt_msk,\n )\n toc = time.process_time()\n worker_logger.debug(\n \"Small components cloud filtering done in {} seconds\".format(\n toc - tic\n )\n )\n\n if small_cpn_filter_params.filtered_elt_msk:\n points_cloud.add_cloud_filtering_msk(\n cloud_list,\n filtered_elt_pos_infos,\n \"filtered_elt_mask\",\n small_cpn_filter_params.msk_value,\n )\n\n if statistical_filter_params is not None:\n worker_logger = logging.getLogger(\"distributed.worker\")\n\n spatial_ref = osr.SpatialReference()\n spatial_ref.ImportFromEPSG(cloud_epsg)\n if spatial_ref.IsGeographic():\n worker_logger.warning(\n \"The points cloud to filter is not in a cartographic system. \"\n \"The filter's default parameters might not be adapted \"\n \"to this referential. Convert the points \"\n \"cloud to ECEF to ensure a proper filtering.\"\n )\n tic = time.process_time()\n (\n cloud,\n filtered_elt_pos_infos,\n ) = points_cloud.statistical_outliers_filtering(\n cloud,\n statistical_filter_params.k,\n statistical_filter_params.std_dev_factor,\n filtered_elt_pos=statistical_filter_params.filtered_elt_msk,\n )\n toc = time.process_time()\n worker_logger.debug(\n \"Statistical cloud filtering done in {} seconds\".format(toc - tic)\n )\n\n if statistical_filter_params.filtered_elt_msk:\n points_cloud.add_cloud_filtering_msk(\n cloud_list,\n filtered_elt_pos_infos,\n \"filtered_elt_mask\",\n statistical_filter_params.msk_value,\n )\n # If the points cloud is not in the right epsg referential, it is converted\n if cloud_epsg != epsg:\n projection.points_cloud_conversion_dataframe(cloud, cloud_epsg, epsg)\n\n # compute roi from the combined clouds if it is not set\n if not roi:\n xstart, ystart, xsize, ysize = compute_xy_starts_and_sizes(\n resolution, cloud\n )\n\n # rasterize clouds\n raster = rasterize(\n cloud,\n resolution,\n epsg,\n x_start=xstart,\n y_start=ystart,\n x_size=xsize,\n y_size=ysize,\n sigma=sigma,\n radius=radius,\n hgt_no_data=dsm_no_data,\n color_no_data=color_no_data,\n msk_no_data=msk_no_data,\n grid_points_division_factor=grid_points_division_factor,\n )\n\n if dump_filter_cloud:\n return raster, cloud\n\n return raster\n\n\ndef compute_values_1d(\n x_start: float, y_start: float, x_size: int, y_size: int, resolution: float\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Compute the x and y values as 1d arrays\n\n :param x_start: x start of the rasterization grid\n :param y_start: y start of the rasterization grid\n :param x_size: x size of the rasterization grid\n :param y_size: y size of the rasterization grid\n :param resolution: Resolution of rasterized cells,\n in cloud CRS units or None.\n :return: a tuple composed of the x and y 1d arrays\n \"\"\"\n x_values_1d = np.linspace(\n x_start + 0.5 * resolution,\n x_start + resolution * (x_size + 0.5),\n x_size,\n endpoint=False,\n )\n y_values_1d = np.linspace(\n y_start - 0.5 * resolution,\n y_start - resolution * (y_size + 0.5),\n y_size,\n endpoint=False,\n )\n\n return x_values_1d, y_values_1d\n\n\ndef compute_grid_points(\n x_start: float, y_start: float, x_size: int, y_size: int, resolution: float\n) -> np.ndarray:\n \"\"\"\n Compute the grid points\n\n :param x_start: x start of the rasterization grid\n :param y_start: y start of the rasterization grid\n :param x_size: x size of the rasterization grid\n :param y_size: y size of the rasterization grid\n :param resolution: Resolution of rasterized cells,\n expressed in cloud CRS units or None.\n :return: Grid point as a numpy array\n \"\"\"\n\n x_values_1d, y_values_1d = compute_values_1d(\n x_start, y_start, x_size, y_size, resolution\n )\n x_values_2d, y_values_2d = np.meshgrid(x_values_1d, y_values_1d)\n grid_points = np.stack((x_values_2d, y_values_2d), axis=2).reshape(-1, 2)\n\n return grid_points\n\n\ndef flatten_index_list(nd_list):\n \"\"\"\n Converts neighbors indices jagged array into a linear 1d array and\n the number of neighbors for each grid point.\n\n :param nd_list: indices of each neighbor.\n :type nd_list: list of list of int.\n :return: the flattened neighbors ids list\n and the list of neighbors count for each grid point.\n :rtype: a tuple of 2 1d int64 numpy.ndarray.\n \"\"\"\n lengths = np.array([len(list) for list in nd_list]) # number of neighbors\n list_1d = np.concatenate(nd_list).astype(int)\n\n return list_1d, lengths\n\n\ndef search_neighbors(\n grid_points: np.ndarray,\n cloud_tree: cKDTree,\n radius: int,\n resolution: float,\n worker_logger: logging.Logger,\n) -> List[List[int]]:\n \"\"\"\n Search for neighbors of the grid points in the cloud kdTree\n\n :param grid_points: Grid points\n :param cloud_tree: Points cloud kdTree\n :param radius: Radius for hole filling.\n :param resolution: Resolution of rasterized cells,\n expressed in cloud CRS units or None.\n :param worker_logger: logger\n :return: The list of neighbors\n \"\"\"\n # build a kD-tree with rasterization grid cells center coordinates\n tic = time.process_time()\n grid_tree = cKDTree(grid_points)\n toc = time.process_time()\n worker_logger.debug(\n \"Neighbors search: \"\n \"Grid point kD-tree built in {} seconds\".format(toc - tic)\n )\n\n # perform neighborhood query for all grid points\n tic = time.process_time()\n neighbors_list = grid_tree.query_ball_tree(\n cloud_tree, (radius + 0.5) * resolution\n )\n toc = time.process_time()\n worker_logger.debug(\n \"Neighbors search: Neighborhood query done in {} seconds\".format(\n toc - tic\n )\n )\n\n return neighbors_list\n\n\ndef get_flatten_neighbors(\n grid_points: np.ndarray,\n cloud: pandas.DataFrame,\n radius: int,\n resolution: float,\n worker_logger: logging.Logger,\n grid_points_division_factor: int = None,\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Get the grid point neighbors of the cloud as flatten array.\n\n This is done by slicing the grid points by blocs in\n order to reduce the memory peak induced by the list\n of neighbors retrieve from the kdTree query done in the\n search_neighbors function.\n\n :param grid_points: Grid points\n :param cloud: Combined cloud\n as returned by the create_combined_cloud function\n :param radius: Radius for hole filling.\n :param resolution: Resolution of rasterized cells,\n expressed in cloud CRS units or None.\n :param worker_logger: logger\n :param grid_points_division_factor: number of blocs to use to divide\n the grid points (memory optimization, reduce the highest memory peak).\n If it is not set,\n the factor is automatically set to construct 700000 points blocs.\n :return: the flattened neighbors ids list, the list start index for each\n grid point and the list of neighbors count for each grid point.\n \"\"\"\n # Build a KDTree for with cloud points coordinates.\n tic = time.process_time()\n cloud_tree = cKDTree(cloud.loc[:, [cst.X, cst.Y]].values)\n toc = time.process_time()\n worker_logger.debug(\n \"Neighbors search: Point cloud kD-tree built in {} seconds\".format(\n toc - tic\n )\n )\n\n # compute blocs indexes (memory optimization)\n nb_grid_points = grid_points.shape[0]\n\n if grid_points_division_factor is None:\n default_bloc_size = 700000\n grid_points_division_factor = math.ceil(\n nb_grid_points / default_bloc_size\n )\n worker_logger.debug(\n \"The grid points will be divided in {} blocs\".format(\n grid_points_division_factor\n )\n )\n\n if nb_grid_points < grid_points_division_factor:\n grid_points_division_factor = 1\n index_division = np.linspace(\n 0, nb_grid_points, grid_points_division_factor + 1\n )\n\n # compute neighbors per blocs\n neighbors_id = None\n n_count = None\n for i in range(grid_points_division_factor):\n sub_grid = grid_points[\n int(index_division[i]) : int(index_division[i + 1]), :\n ]\n neighbors_list = search_neighbors(\n sub_grid, cloud_tree, radius, resolution, worker_logger\n )\n\n # reorganize neighborhood query results with one as 1d arrays to be\n # compatible with numba.\n neighbors_id_cur, n_count_cur = flatten_index_list(neighbors_list)\n\n if neighbors_id is None:\n neighbors_id = neighbors_id_cur\n else:\n neighbors_id = np.concatenate(\n [neighbors_id, neighbors_id_cur], axis=0\n )\n\n if n_count is None:\n n_count = n_count_cur\n else:\n n_count = np.concatenate([n_count, n_count_cur], axis=0)\n\n # compute starts indexes of each grid points\n start_ids = np.cumsum(np.concatenate(([0], n_count[:-1])))\n\n return neighbors_id, start_ids, n_count\n\n\ndef compute_vector_raster_and_stats(\n cloud: pandas.DataFrame,\n data_valid: np.ndarray,\n x_start: float,\n y_start: float,\n x_size: int,\n y_size: int,\n resolution: float,\n sigma: float,\n radius: int,\n msk_no_data: int,\n worker_logger: logging.Logger,\n grid_points_division_factor: int,\n) -> Tuple[\n np.ndarray,\n np.ndarray,\n np.ndarray,\n np.ndarray,\n np.ndarray,\n Union[None, np.ndarray],\n]:\n \"\"\"\n Compute vectorized raster and its statistics.\n\n :param cloud: Combined cloud\n as returned by the create_combined_cloud function\n :param data_valid: mask of points\n which are not on the border of its original epipolar image.\n To compute a cell it has to have at least one data valid,\n for which case it is considered that no contributing\n points from other neighbor tiles are missing.\n :param x_start: x start of the rasterization grid\n :param y_start: y start of the rasterization grid\n :param x_size: x size of the rasterization grid\n :param y_size: y size of the rasterization grid\n :param resolution: Resolution of rasterized cells,\n expressed in cloud CRS units or None.\n :param sigma: Sigma for gaussian interpolation. If None, set to resolution\n :param radius: Radius for hole filling.\n :param msk_no_data: No data value to use for the rasterized mask\n :param worker_logger: Logger\n :param grid_points_division_factor: Number of blocs to use to divide\n the grid points (memory optimization, reduce the highest memory peak).\n If it is not set, the factor is automatically set\n to construct 700000 points blocs.\n :return: a tuple with rasterization results and statistics.\n \"\"\"\n # Build a grid of cell centers coordinates\n tic = time.process_time()\n grid_points = compute_grid_points(\n x_start, y_start, x_size, y_size, resolution\n )\n toc = time.process_time()\n worker_logger.debug(\n \"Cell centers array built in {} seconds\".format(toc - tic)\n )\n\n # Search for neighbors\n tic = time.process_time()\n neighbors_id, start_ids, n_count = get_flatten_neighbors(\n grid_points,\n cloud,\n radius,\n resolution,\n worker_logger,\n grid_points_division_factor,\n )\n toc = time.process_time()\n worker_logger.debug(\n \"Total neighbors search done in {} seconds\".format(toc - tic)\n )\n\n # perform rasterization with gaussian interpolation\n tic = time.process_time()\n clr_bands = [\n band\n for band in cloud\n if str.find(band, cst.POINTS_CLOUD_CLR_KEY_ROOT) >= 0\n ]\n cloud_band = [cst.X, cst.Y, cst.Z]\n cloud_band.extend(clr_bands)\n\n out, mean, stdev, n_pts, n_in_cell = gaussian_interp(\n cloud.loc[:, cloud_band].values,\n data_valid.astype(bool),\n neighbors_id,\n start_ids,\n n_count,\n grid_points,\n resolution,\n sigma,\n )\n toc = time.process_time()\n worker_logger.debug(\n \"Vectorized rasterization done in {} seconds\".format(toc - tic)\n )\n\n if cst.POINTS_CLOUD_MSK in cloud.columns:\n msk = mask_interp(\n cloud.loc[:, [cst.X, cst.Y, cst.POINTS_CLOUD_MSK]].values,\n data_valid.astype(np.bool),\n neighbors_id,\n start_ids,\n n_count,\n grid_points,\n sigma,\n no_data_val=msk_no_data,\n undefined_val=msk_no_data,\n )\n else:\n msk = None\n\n return out, mean, stdev, n_pts, n_in_cell, msk\n\n\n@njit(\n (float64[:, :], boolean[:], int64, int64[:], int64[:], int64[:]),\n nogil=True,\n cache=True,\n)\ndef get_neighbors_from_points_array(\n points: np.ndarray,\n data_valid: np.ndarray,\n i_grid: int,\n neighbors_id: np.ndarray,\n neighbors_start: np.ndarray,\n neighbors_count: np.ndarray,\n) -> Union[np.ndarray, None]:\n \"\"\"\n Use the outputs of the get_flatten_neighbors function\n to get the neighbors of the i_grid point in the points numpy array.\n\n :param points: points numpy array (one line = one point)\n :param data_valid: valid data mask corresponding to the points\n :param i_grid: \"get_flatten_neighbors\" outputs index function used\n :param neighbors_id: the flattened neighbors ids list\n :param neighbors_start: the flattened neighbors start indexes\n :param neighbors_count: the flattened neighbors counts\n :return: a numpy array containing only the i_grid point neighbors\n or None if the point has no neighbors (or no valid neighbors)\n \"\"\"\n n_neighbors = neighbors_count[i_grid]\n\n if n_neighbors == 0:\n return None\n\n n_start = neighbors_start[i_grid]\n neighbors = points[neighbors_id[n_start : n_start + n_neighbors]]\n n_valid = np.sum(data_valid[neighbors_id[n_start : n_start + n_neighbors]])\n\n # discard if grid point has no valid neighbor in point cloud\n if n_valid == 0:\n return None\n\n return neighbors\n\n\n@njit(\n (\n float64[:, :],\n boolean[:],\n int64[:],\n int64[:],\n int64[:],\n float64[:, :],\n float64,\n int64,\n int64,\n ),\n nogil=True,\n cache=True,\n)\ndef mask_interp(\n mask_points: np.ndarray,\n data_valid: np.ndarray,\n neighbors_id: np.ndarray,\n neighbors_start: np.ndarray,\n neighbors_count: np.ndarray,\n grid_points: np.ndarray,\n sigma: float,\n no_data_val: int = 65535,\n undefined_val: int = 65535,\n) -> np.ndarray:\n \"\"\"\n Interpolates mask data at grid point locations.\n\n Each points contained into a terrain cell have a weight\n depending on its distance to the cell center.\n For each classes, the weights are accumulated.\n The class with the higher accumulated score is then used\n as the terrain cell's final value.\n\n :param mask_points: mask data, one point per row\n (first column is the x position, second is the y position,\n last column is the mask value).\n :param data_valid: flattened validity mask.\n :param neighbors_id: flattened neighboring cloud point indices.\n :param neighbors_start: flattened grid point neighbors start indices.\n :param neighbors_count: flattened grid point neighbor count.\n :param grid_points: grid point location, one per row.\n :param sigma: sigma parameter for weights computation.\n :param no_data_val: no data value.\n :param undefined_val: value in case of score equality.\n :return: The interpolated mask\n \"\"\"\n # mask rasterization result\n result = np.full((neighbors_count.size, 1), no_data_val, dtype=np.uint16)\n for i_grid in range(neighbors_count.size):\n p_sample = grid_points[i_grid]\n\n neighbors = get_neighbors_from_points_array(\n mask_points,\n data_valid,\n i_grid,\n neighbors_id,\n neighbors_start,\n neighbors_count,\n )\n if neighbors is None:\n continue\n\n # grid point to neighbors distance\n neighbors_vec = neighbors[:, :2] - p_sample\n distances = np.sqrt(np.sum(neighbors_vec * neighbors_vec, axis=1))\n\n # score computation\n weights = np.exp(-(distances ** 2) / (2 * sigma ** 2))\n\n val = []\n val_cum_weight = []\n for neighbor_idx in range(len(neighbors)):\n msk_val = neighbors[neighbor_idx, 2:]\n\n if msk_val != 0: # only masked points are taken into account\n if msk_val in val:\n msk_val_index = val.index(msk_val)\n val_cum_weight[msk_val_index] += weights[neighbor_idx]\n else:\n val.append(msk_val)\n val_cum_weight.append(weights[neighbor_idx])\n\n # search for higher score\n if len(val) != 0:\n arr_val_cum_weight = np.asarray(val_cum_weight)\n ind_max_weight = np.argmax(arr_val_cum_weight)\n\n max_weight_values = [\n val[i]\n for i in range(len(val))\n if val_cum_weight[i] == val_cum_weight[ind_max_weight]\n ]\n if len(max_weight_values) == 1:\n result[i_grid] = val[ind_max_weight]\n else:\n result[i_grid] = undefined_val\n else: # no masked points in the terrain cell\n result[i_grid] = 0\n\n return result\n\n\n@njit(\n (\n float64[:, :],\n boolean[:],\n int64[:],\n int64[:],\n int64[:],\n float64[:, :],\n float64,\n float64,\n ),\n nogil=True,\n cache=True,\n)\ndef gaussian_interp(\n cloud_points,\n data_valid,\n neighbors_id,\n neighbors_start,\n neighbors_count,\n grid_points,\n resolution,\n sigma,\n):\n \"\"\"\n Interpolates point cloud data at grid point locations and produces\n quality statistics.\n\n :param cloud_points: point cloud data, one point per row.\n :type cloud_points: float64 numpy.ndarray.\n :param data_valid: flattened validity mask.\n :type data_valid: bool numpy.ndarray.\n :param neighbors_id: flattened neighboring cloud point indices.\n :type neighbors_id: int64 numpy.ndarray.\n :param neighbors_start: flattened grid point neighbors start indices.\n :type neighbors_start: int64 numpy.ndarray.\n :param neighbors_count: flattened grid point neighbor count.\n :type neighbors_count: int64 numpy.ndarray.\n :param grid_points: grid point location, one per row.\n :type grid_points: float64 numpy.ndarray.\n :param resolution: rasterization resolution.\n :type resolution: float.\n :param sigma: sigma parameter of gaussian interpolation.\n :type sigma: float\n :return: a tuple with rasterization results and statistics.\n \"\"\"\n\n # rasterization result for both height and color(s)\n result = np.full(\n (neighbors_count.size, cloud_points.shape[1] - 2),\n np.nan,\n dtype=np.float32,\n )\n\n # statistics layers\n layer_mean = np.full(\n (neighbors_count.size, cloud_points.shape[1] - 2),\n np.nan,\n dtype=np.float32,\n )\n layer_stdev = np.full(\n (neighbors_count.size, cloud_points.shape[1] - 2),\n np.nan,\n dtype=np.float32,\n )\n n_pts = np.zeros(neighbors_count.size, np.uint16)\n n_pts_in_cell = np.zeros(neighbors_count.size, np.uint16)\n\n for i_grid in range(neighbors_count.size):\n\n p_sample = grid_points[i_grid]\n\n neighbors = get_neighbors_from_points_array(\n cloud_points,\n data_valid,\n i_grid,\n neighbors_id,\n neighbors_start,\n neighbors_count,\n )\n if neighbors is None:\n continue\n\n # grid point to neighbors distance\n neighbors_vec = neighbors[:, :2] - p_sample\n distances = np.sqrt(np.sum(neighbors_vec * neighbors_vec, axis=1))\n\n # interpolation weights computation\n min_dist = np.amin(distances)\n weights = np.exp(-((distances - min_dist) ** 2) / (2 * sigma ** 2))\n total_weight = np.sum(weights)\n\n n_pts[i_grid] = neighbors_vec.shape[0]\n\n # interpolate point cloud data\n result[i_grid] = np.dot(weights, neighbors[:, 2:]) / total_weight\n\n # compute statistic for each layer\n for n_layer in range(2, cloud_points.shape[1]):\n layer_stdev[i_grid][n_layer - 2] = np.std(neighbors[:, n_layer])\n layer_mean[i_grid][n_layer - 2] = np.mean(neighbors[:, n_layer])\n\n n_pts_in_cell[i_grid] = np.sum(\n (np.abs(neighbors_vec[:, 0]) < 0.5 * resolution)\n & (np.abs(neighbors_vec[:, 1]) < 0.5 * resolution)\n )\n\n return result, layer_mean, layer_stdev, n_pts, n_pts_in_cell\n\n\ndef create_raster_dataset(\n raster: np.ndarray,\n x_start: float,\n y_start: float,\n x_size: int,\n y_size: int,\n resolution: float,\n hgt_no_data: int,\n color_no_data: int,\n epsg: int,\n mean: np.ndarray,\n stdev: np.ndarray,\n n_pts: np.ndarray,\n n_in_cell: np.ndarray,\n msk: np.ndarray = None,\n) -> xr.Dataset:\n \"\"\"\n Create final raster xarray dataset\n\n :param raster: height and colors\n :param x_start: x start of the rasterization grid\n :param y_start: y start of the rasterization grid\n :param x_size: x size of the rasterization grid\n :param y_size: y size of the rasterization grid\n :param resolution: Resolution of rasterized cells,\n expressed in cloud CRS units or None.\n :param hgt_no_data: no data value to use for height\n :param color_no_data: no data value to use for color\n :param epsg: epsg code for the CRS of the final raster\n :param mean: mean of height and colors\n :param stdev: standard deviation of height and colors\n :param n_pts: number of points that are stricty in a cell\n :param n_in_cell: number of points which contribute to a cell\n :param msk: raster msk\n :return: the raster xarray dataset\n \"\"\"\n raster_dims = (cst.Y, cst.X)\n n_layers = raster.shape[-1]\n x_values_1d, y_values_1d = compute_values_1d(\n x_start, y_start, x_size, y_size, resolution\n )\n raster_coords = {cst.X: x_values_1d, cst.Y: y_values_1d}\n hgt = np.nan_to_num(raster[..., 0], nan=hgt_no_data)\n raster_out = xr.Dataset(\n {cst.RASTER_HGT: ([cst.Y, cst.X], hgt)}, coords=raster_coords\n )\n\n if raster.shape[-1] > 1: # rasterizer produced color output\n band = range(1, raster.shape[-1])\n # CAUTION: band/channel is set as the first dimension.\n clr = np.nan_to_num(np.rollaxis(raster[:, :, 1:], 2), nan=color_no_data)\n color_out = xr.Dataset(\n {cst.RASTER_COLOR_IMG: ([cst.BAND, cst.Y, cst.X], clr)},\n coords={**raster_coords, cst.BAND: band},\n )\n # update raster output with color data\n raster_out = xr.merge((raster_out, color_out))\n\n raster_out.attrs[cst.EPSG] = epsg\n raster_out.attrs[cst.RESOLUTION] = resolution\n\n # statics layer for height output\n raster_out[cst.RASTER_HGT_MEAN] = xr.DataArray(\n mean[..., 0], coords=raster_coords, dims=raster_dims\n )\n raster_out[cst.RASTER_HGT_STD_DEV] = xr.DataArray(\n stdev[..., 0], coords=raster_coords, dims=raster_dims\n )\n\n # add each band statistics\n for i_layer in range(1, n_layers):\n raster_out[\"{}{}\".format(cst.RASTER_BAND_MEAN, i_layer)] = xr.DataArray(\n mean[..., i_layer], coords=raster_coords, dims=raster_dims\n )\n raster_out[\n \"{}{}\".format(cst.RASTER_BAND_STD_DEV, i_layer)\n ] = xr.DataArray(\n stdev[..., i_layer], coords=raster_coords, dims=raster_dims\n )\n\n raster_out[cst.RASTER_NB_PTS] = xr.DataArray(n_pts, dims=raster_dims)\n raster_out[cst.RASTER_NB_PTS_IN_CELL] = xr.DataArray(\n n_in_cell, dims=raster_dims\n )\n\n if msk is not None:\n raster_out[cst.RASTER_MSK] = xr.DataArray(msk, dims=raster_dims)\n\n return raster_out\n\n\ndef rasterize(\n cloud: pandas.DataFrame,\n resolution: float,\n epsg: int,\n x_start: float,\n y_start: float,\n x_size: int,\n y_size: int,\n sigma: float = None,\n radius: int = 1,\n hgt_no_data: int = -32768,\n color_no_data: int = 0,\n msk_no_data: int = 65535,\n grid_points_division_factor: int = None,\n) -> Union[xr.Dataset, None]:\n \"\"\"\n Rasterize a point cloud with its color bands to a Dataset\n that also contains quality statistics.\n\n :param cloud: Combined cloud\n as returned by the create_combined_cloud function\n :param resolution: Resolution of rasterized cells,\n expressed in cloud CRS units or None.\n :param epsg: epsg code for the CRS of the final raster\n :param x_start: x start of the rasterization grid\n :param y_start: y start of the rasterization grid\n :param x_size: x size of the rasterization grid\n :param y_size: y size of the rasterization grid\n :param sigma: sigma for gaussian interpolation. If None, set to resolution\n :param radius: Radius for hole filling.\n :param hgt_no_data: no data value to use for height\n :param color_no_data: no data value to use for color\n :param msk_no_data: no data value to use in the final mask image\n :param grid_points_division_factor: number of blocs to use to divide\n the grid points (memory optimization, reduce the highest memory peak).\n If it is not set, the factor is automatically set to\n construct 700000 points blocs.\n :return: Rasterized cloud color and statistics.\n \"\"\"\n worker_logger = logging.getLogger(\"distributed.worker\")\n\n if sigma is None:\n sigma = resolution\n\n # generate validity mask from margins and all masks of cloud data.\n data_valid = cloud[cst.POINTS_CLOUD_VALID_DATA].values\n\n # If no valid points are found in cloud, return default values\n if np.size(data_valid) == 0:\n worker_logger.debug(\"No points to rasterize, returning None\")\n return None\n\n worker_logger.debug(\n \"Rasterization grid: start=[{},{}], size=[{},{}], resolution={}\".format(\n x_start, y_start, x_size, y_size, resolution\n )\n )\n\n out, mean, stdev, n_pts, n_in_cell, msk = compute_vector_raster_and_stats(\n cloud,\n data_valid,\n x_start,\n y_start,\n x_size,\n y_size,\n resolution,\n sigma,\n radius,\n msk_no_data,\n worker_logger,\n grid_points_division_factor,\n )\n\n # reshape data as a 2d grid.\n tic = time.process_time()\n shape_out = (y_size, x_size)\n out = out.reshape(shape_out + (-1,))\n mean = mean.reshape(shape_out + (-1,))\n stdev = stdev.reshape(shape_out + (-1,))\n n_pts = n_pts.reshape(shape_out)\n n_in_cell = n_in_cell.reshape(shape_out)\n\n if msk is not None:\n msk = msk.reshape(shape_out)\n\n toc = time.process_time()\n worker_logger.debug(\"Output reshaping done in {} seconds\".format(toc - tic))\n\n # build output dataset\n tic = time.process_time()\n raster_out = create_raster_dataset(\n out,\n x_start,\n y_start,\n x_size,\n y_size,\n resolution,\n hgt_no_data,\n color_no_data,\n epsg,\n mean,\n stdev,\n n_pts,\n n_in_cell,\n msk,\n )\n\n toc = time.process_time()\n worker_logger.debug(\n \"Final raster formatting into a xarray.Dataset \"\n \"done in {} seconds\".format(toc - tic)\n )\n\n return raster_out\n" ]
[ [ "numpy.sum", "numpy.asarray", "numpy.size", "numpy.stack", "numpy.nan_to_num", "numpy.meshgrid", "numpy.rollaxis", "numpy.concatenate", "numpy.abs", "scipy.spatial.cKDTree", "numpy.linspace", "numpy.mean", "numpy.ceil", "numpy.zeros", "numpy.argmax", "numpy.std", "numpy.nanmax", "numpy.floor", "numpy.exp", "numpy.nanmin", "numpy.amin", "numpy.dot", "numpy.full" ] ]
theofpa/failing-loudly
[ "da5498babf39bdd9ba534265d67bed77b290e5b4" ]
[ "pipeline.py" ]
[ "# -------------------------------------------------\n# IMPORTS\n# -------------------------------------------------\n\nimport numpy as np\nfrom tensorflow import set_random_seed\nseed = 1\nnp.random.seed(seed)\nset_random_seed(seed)\n\nimport keras\nimport tempfile\nimport keras.models\n\nfrom keras import backend as K \nfrom shift_detector import *\nfrom shift_locator import *\nfrom shift_applicator import *\nfrom data_utils import *\nfrom shared_utils import *\nimport os\nimport sys\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n\n# -------------------------------------------------\n# PLOTTING HELPERS\n# -------------------------------------------------\n\n\nrc('font',**{'family':'serif','serif':['Times']})\nrc('text', usetex=True)\nrc('axes', labelsize=22)\nrc('xtick', labelsize=22)\nrc('ytick', labelsize=22)\nrc('legend', fontsize=13)\n\nmpl.rcParams['text.latex.preamble'] = [r'\\usepackage{amsmath}']\n\n\ndef clamp(val, minimum=0, maximum=255):\n if val < minimum:\n return minimum\n if val > maximum:\n return maximum\n return val\n\n\ndef colorscale(hexstr, scalefactor):\n hexstr = hexstr.strip('#')\n\n if scalefactor < 0 or len(hexstr) != 6:\n return hexstr\n\n r, g, b = int(hexstr[:2], 16), int(hexstr[2:4], 16), int(hexstr[4:], 16)\n\n r = clamp(r * scalefactor)\n g = clamp(g * scalefactor)\n b = clamp(b * scalefactor)\n\n return \"#%02x%02x%02x\" % (int(r), int(g), int(b))\n\n\ndef errorfill(x, y, yerr, color=None, alpha_fill=0.2, ax=None, fmt='-o', label=None):\n ax = ax if ax is not None else plt.gca()\n if color is None:\n color = next(ax._get_lines.prop_cycler)['color']\n if np.isscalar(yerr) or len(yerr) == len(y):\n ymin = y - yerr\n ymax = y + yerr\n elif len(yerr) == 2:\n ymin, ymax = yerr\n ax.semilogx(x, y, fmt, color=color, label=label)\n ax.fill_between(x, np.clip(ymax, 0, 1), np.clip(ymin, 0, 1), color=color, alpha=alpha_fill)\n\n\ndef make_keras_picklable():\n def __getstate__(self):\n model_str = \"\"\n with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:\n keras.models.save_model(self, fd.name, overwrite=True)\n model_str = fd.read()\n d = { 'model_str': model_str }\n return d\n\n def __setstate__(self, state):\n with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:\n fd.write(state['model_str'])\n fd.flush()\n model = keras.models.load_model(fd.name)\n self.__dict__ = model.__dict__\n\n\n cls = keras.models.Model\n cls.__getstate__ = __getstate__\n cls.__setstate__ = __setstate__\n\n\nlinestyles = ['-', '-.', '--', ':']\nbrightness = [1.25, 1.0, 0.75, 0.5]\nformat = ['-o', '-h', '-p', '-s', '-D', '-<', '->', '-X']\nmarkers = ['o', 'h', 'p', 's', 'D', '<', '>', 'X']\ncolors_old = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',\n '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',\n '#bcbd22', '#17becf']\ncolors = ['#2196f3', '#f44336', '#9c27b0', '#64dd17', '#009688', '#ff9800', '#795548', '#607d8b']\n\n# -------------------------------------------------\n# CONFIG\n# -------------------------------------------------\n\nmake_keras_picklable()\n\ndatset = sys.argv[1]\ntest_type = sys.argv[3]\n\n# Define results path and create directory.\npath = './paper_results/'\npath += test_type + '/'\npath += datset + '_'\npath += sys.argv[2] + '/'\nif not os.path.exists(path):\n os.makedirs(path)\n\n# Define DR methods.\ndr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value, DimensionalityReduction.SRP.value, DimensionalityReduction.UAE.value, DimensionalityReduction.TAE.value, DimensionalityReduction.BBSDs.value, DimensionalityReduction.BBSDh.value]\nif test_type == 'multiv':\n dr_techniques = [DimensionalityReduction.NoRed.value, DimensionalityReduction.PCA.value, DimensionalityReduction.SRP.value, DimensionalityReduction.UAE.value, DimensionalityReduction.TAE.value, DimensionalityReduction.BBSDs.value]\nif test_type == 'univ':\n dr_techniques_plot = dr_techniques.copy()\n dr_techniques_plot.append(DimensionalityReduction.Classif.value)\nelse:\n dr_techniques_plot = dr_techniques.copy()\n\n# Define test types and general test sample sizes.\ntest_types = [td.value for td in TestDimensionality]\nif test_type == 'multiv':\n od_tests = []\n md_tests = [MultidimensionalTest.MMD.value]\n samples = [10, 20, 50, 100, 200, 500, 1000]\nelse:\n od_tests = [OnedimensionalTest.KS.value]\n md_tests = []\n samples = [10, 20, 50, 100, 200, 500, 1000, 10000]\ndifference_samples = 20\n\n# Number of random runs to average results over.\nrandom_runs = 5\n\n# Significance level.\nsign_level = 0.05\n\n# Whether to calculate accuracy for malignancy quantification.\ncalc_acc = True\n\n# Define shift types.\nif sys.argv[2] == 'small_gn_shift':\n shifts = ['small_gn_shift_0.1',\n 'small_gn_shift_0.5',\n 'small_gn_shift_1.0']\nelif sys.argv[2] == 'medium_gn_shift':\n shifts = ['medium_gn_shift_0.1',\n 'medium_gn_shift_0.5',\n 'medium_gn_shift_1.0']\nelif sys.argv[2] == 'large_gn_shift':\n shifts = ['large_gn_shift_0.1',\n 'large_gn_shift_0.5',\n 'large_gn_shift_1.0']\nelif sys.argv[2] == 'adversarial_shift':\n shifts = ['adversarial_shift_0.1',\n 'adversarial_shift_0.5',\n 'adversarial_shift_1.0']\nelif sys.argv[2] == 'ko_shift':\n shifts = ['ko_shift_0.1',\n 'ko_shift_0.5',\n 'ko_shift_1.0']\n if test_type == 'univ':\n samples = [10, 20, 50, 100, 200, 500, 1000, 9000]\nelif sys.argv[2] == 'orig':\n shifts = ['rand', 'orig']\n brightness = [1.25, 0.75]\nelif sys.argv[2] == 'small_image_shift':\n shifts = ['small_img_shift_0.1',\n 'small_img_shift_0.5',\n 'small_img_shift_1.0']\nelif sys.argv[2] == 'medium_image_shift':\n shifts = ['medium_img_shift_0.1',\n 'medium_img_shift_0.5',\n 'medium_img_shift_1.0']\nelif sys.argv[2] == 'large_image_shift':\n shifts = ['large_img_shift_0.1',\n 'large_img_shift_0.5',\n 'large_img_shift_1.0']\nelif sys.argv[2] == 'medium_img_shift+ko_shift':\n shifts = ['medium_img_shift_0.5+ko_shift_0.1',\n 'medium_img_shift_0.5+ko_shift_0.5',\n 'medium_img_shift_0.5+ko_shift_1.0']\n if test_type == 'univ':\n samples = [10, 20, 50, 100, 200, 500, 1000, 9000]\nelif sys.argv[2] == 'only_zero_shift+medium_img_shift':\n shifts = ['only_zero_shift+medium_img_shift_0.1',\n 'only_zero_shift+medium_img_shift_0.5',\n 'only_zero_shift+medium_img_shift_1.0']\n samples = [10, 20, 50, 100, 200, 500, 1000]\nelse:\n shifts = []\n \nif datset == 'coil100' and test_type == 'univ':\n samples = [10, 20, 50, 100, 200, 500, 1000, 2400]\n\nif datset == 'mnist_usps':\n samples = [10, 20, 50, 100, 200, 500, 1000]\n\n# -------------------------------------------------\n# PIPELINE START\n# -------------------------------------------------\n\n# Stores p-values for all experiments of a shift class.\nsamples_shifts_rands_dr_tech = np.ones((len(samples), len(shifts), random_runs, len(dr_techniques_plot))) * (-1)\n\nred_dim = -1\nred_models = [None] * len(DimensionalityReduction)\n\n# Iterate over all shifts in a shift class.\nfor shift_idx, shift in enumerate(shifts):\n\n shift_path = path + shift + '/'\n if not os.path.exists(shift_path):\n os.makedirs(shift_path)\n\n # Stores p-values for a single shift.\n rand_run_p_vals = np.ones((len(samples), len(dr_techniques_plot), random_runs)) * (-1)\n\n # Stores accuracy values for malignancy detection.\n val_accs = np.ones((random_runs, len(samples))) * (-1)\n te_accs = np.ones((random_runs, len(samples))) * (-1)\n dcl_accs = np.ones((len(samples), random_runs)) * (-1)\n\n # Average over a few random runs to quantify robustness.\n for rand_run in range(random_runs):\n\n print(\"Random run %s\" % rand_run)\n\n rand_run_path = shift_path + str(rand_run) + '/'\n if not os.path.exists(rand_run_path):\n os.makedirs(rand_run_path)\n\n np.random.seed(rand_run)\n set_random_seed(rand_run)\n\n # Load data.\n (X_tr_orig, y_tr_orig), (X_val_orig, y_val_orig), (X_te_orig, y_te_orig), orig_dims, nb_classes = \\\n import_dataset(datset, shuffle=True)\n X_tr_orig = normalize_datapoints(X_tr_orig, 255.)\n X_te_orig = normalize_datapoints(X_te_orig, 255.)\n X_val_orig = normalize_datapoints(X_val_orig, 255.)\n\n # Apply shift.\n if shift == 'orig':\n print('Original')\n (X_tr_orig, y_tr_orig), (X_val_orig, y_val_orig), (X_te_orig, y_te_orig), orig_dims, nb_classes = import_dataset(datset)\n X_tr_orig = normalize_datapoints(X_tr_orig, 255.)\n X_te_orig = normalize_datapoints(X_te_orig, 255.)\n X_val_orig = normalize_datapoints(X_val_orig, 255.)\n X_te_1 = X_te_orig.copy()\n y_te_1 = y_te_orig.copy()\n else:\n (X_te_1, y_te_1) = apply_shift(X_te_orig, y_te_orig, shift, orig_dims, datset)\n\n X_te_2 , y_te_2 = random_shuffle(X_te_1, y_te_1)\n\n # Check detection performance for different numbers of samples from test.\n for si, sample in enumerate(samples):\n\n print(\"Sample %s\" % sample)\n\n sample_path = rand_run_path + str(sample) + '/'\n if not os.path.exists(sample_path):\n os.makedirs(sample_path)\n\n X_te_3 = X_te_2[:sample,:]\n y_te_3 = y_te_2[:sample]\n\n if test_type == 'multiv':\n X_val_3 = X_val_orig[:1000,:]\n y_val_3 = y_val_orig[:1000]\n else:\n X_val_3 = X_val_orig[:sample,:]\n y_val_3 = y_val_orig[:sample]\n\n X_tr_3 = np.copy(X_tr_orig)\n y_tr_3 = np.copy(y_tr_orig)\n\n # Detect shift.\n shift_detector = ShiftDetector(dr_techniques, test_types, od_tests, md_tests, sign_level, red_models,\n sample, datset)\n (od_decs, ind_od_decs, ind_od_p_vals), \\\n (md_decs, ind_md_decs, ind_md_p_vals), \\\n red_dim, red_models, val_acc, te_acc = shift_detector.detect_data_shift(X_tr_3, y_tr_3, X_val_3, y_val_3,\n X_te_3, y_te_3, orig_dims,\n nb_classes)\n \n val_accs[rand_run, si] = val_acc\n te_accs[rand_run, si] = te_acc\n\n if test_type == 'multiv':\n print(\"Shift decision: \", ind_md_decs.flatten())\n print(\"Shift p-vals: \", ind_md_p_vals.flatten())\n\n rand_run_p_vals[si,:,rand_run] = ind_md_p_vals.flatten()\n else:\n print(\"Shift decision: \", ind_od_decs.flatten())\n print(\"Shift p-vals: \", ind_od_p_vals.flatten())\n \n if DimensionalityReduction.Classif.value not in dr_techniques_plot:\n rand_run_p_vals[si,:,rand_run] = ind_od_p_vals.flatten()\n continue\n\n # Characterize shift via domain classifier.\n shift_locator = ShiftLocator(orig_dims, dc=DifferenceClassifier.FFNNDCL, sign_level=sign_level)\n model, score, (X_tr_dcl, y_tr_dcl, y_tr_old, X_te_dcl, y_te_dcl, y_te_old) = shift_locator.build_model(X_tr_3, y_tr_3, X_te_3, y_te_3)\n test_indices, test_perc, dec, p_val = shift_locator.most_likely_shifted_samples(model, X_te_dcl, y_te_dcl)\n\n # K.clear_session()\n\n rand_run_p_vals[si,:,rand_run] = np.append(ind_od_p_vals.flatten(), p_val)\n\n if datset == 'mnist':\n samp_shape = (28,28)\n cmap = 'gray'\n elif datset == 'cifar10' or datset == 'cifar10_1' or datset == 'coil100' or datset == 'svhn':\n samp_shape = (32,32,3)\n cmap = None\n elif datset == 'mnist_usps':\n samp_shape = (16,16)\n cmap = 'gray'\n\n if dec:\n most_conf_test_indices = test_indices[test_perc > 0.8]\n\n top_same_samples_path = sample_path + 'top_same'\n if not os.path.exists(top_same_samples_path):\n os.makedirs(top_same_samples_path)\n\n rev_top_test_ind = test_indices[::-1][:difference_samples]\n least_conf_samples = X_te_dcl[rev_top_test_ind]\n for j in range(len(rev_top_test_ind)):\n samp = least_conf_samples[j, :]\n fig = plt.imshow(samp.reshape(samp_shape), cmap=cmap)\n plt.axis('off')\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.get_yaxis().set_visible(False)\n plt.savefig(\"%s/%s.pdf\" % (top_same_samples_path, j), bbox_inches='tight', pad_inches=0)\n plt.clf()\n\n j = j + 1\n\n top_different_samples_path = sample_path + 'top_diff'\n if not os.path.exists(top_different_samples_path):\n os.makedirs(top_different_samples_path)\n\n if calc_acc:\n print('-------------------')\n print(\"Len of most conf: %s\" % len(most_conf_test_indices))\n print(y_te_old[most_conf_test_indices])\n if len(most_conf_test_indices) > 0:\n y_te_dcl_pred = shift_detector.classify_data(X_tr_3, y_tr_3, X_val_3, y_val_3,\n X_te_dcl[most_conf_test_indices],\n y_te_dcl[most_conf_test_indices],\n orig_dims, nb_classes)\n print(y_te_dcl_pred)\n dcl_class_acc = np.sum(np.equal(y_te_dcl_pred, y_te_old[most_conf_test_indices])\n .astype(int))/len(y_te_dcl_pred)\n dcl_accs[si,rand_run] = dcl_class_acc\n print(\"dcl_class_acc: \", dcl_class_acc)\n print('-------------------')\n\n most_conf_samples = X_te_dcl[most_conf_test_indices]\n for j in range(len(most_conf_samples)):\n if j < difference_samples:\n samp = most_conf_samples[j,:]\n fig = plt.imshow(samp.reshape(samp_shape), cmap=cmap)\n plt.axis('off')\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.get_yaxis().set_visible(False)\n plt.savefig(\"%s/test_%s.pdf\" % (top_different_samples_path, j), bbox_inches='tight',\n pad_inches=0)\n plt.clf()\n \n j = j + 1\n else:\n break\n\n\n # most_conf_samples = X_te_dcl[most_conf_test_indices]\n # original_indices = []\n # j = 0\n # for i in range(len(most_conf_samples)):\n # samp = most_conf_samples[i,:]\n # ind = np.where(np.all(X_te_3==samp,axis=1))\n # if len(ind[0]) > 0:\n # original_indices.append(np.asscalar(ind[0]))\n #\n # if j < difference_samples:\n # fig = plt.imshow(samp.reshape(samp_shape), cmap=cmap)\n # plt.axis('off')\n # fig.axes.get_xaxis().set_visible(False)\n # fig.axes.get_yaxis().set_visible(False)\n # plt.savefig(\"%s/%s.pdf\" % (top_different_samples_path,j), bbox_inches='tight',\n # pad_inches = 0)\n # plt.clf()\n #\n # j = j + 1\n\n for dr_idx, dr in enumerate(dr_techniques_plot):\n plt.semilogx(np.array(samples), rand_run_p_vals[:,dr_idx,rand_run], format[dr], color=colors[dr], label=\"%s\" % DimensionalityReduction(dr).name)\n plt.axhline(y=sign_level, color='k')\n plt.xlabel('Number of samples from test')\n plt.ylabel('$p$-value')\n plt.savefig(\"%s/dr_sample_comp_noleg.pdf\" % rand_run_path, bbox_inches='tight')\n plt.legend()\n plt.savefig(\"%s/dr_sample_comp.pdf\" % rand_run_path, bbox_inches='tight')\n plt.clf()\n\n np.savetxt(\"%s/dr_method_p_vals.csv\" % rand_run_path, rand_run_p_vals[:,:,rand_run], delimiter=\",\")\n\n np.random.seed(seed)\n set_random_seed(seed)\n\n mean_p_vals = np.mean(rand_run_p_vals, axis=2)\n std_p_vals = np.std(rand_run_p_vals, axis=2)\n\n mean_val_accs = np.mean(val_accs, axis=0)\n std_val_accs = np.std(val_accs, axis=0)\n\n mean_te_accs = np.mean(te_accs, axis=0)\n std_te_accs = np.std(te_accs, axis=0)\n\n if calc_acc and test_type == 'univ':\n mean_dcl_accs = []\n std_dcl_accs = []\n for si, sample in enumerate(samples):\n avg_val = 0\n elem_count = 0\n elem_list = []\n for rand_run in range(random_runs):\n current_val = dcl_accs[si, rand_run]\n if current_val == -1:\n continue\n elem_list.append(current_val)\n avg_val = avg_val + current_val\n elem_count = elem_count + 1\n std_dcl_accs.append(np.std(np.array(elem_list)))\n if elem_count > 1:\n avg_val = avg_val / elem_count\n else:\n avg_val = -1\n mean_dcl_accs.append(avg_val)\n\n mean_dcl_accs = np.array(mean_dcl_accs)\n std_dcl_accs = np.array(std_dcl_accs)\n smpl_array = np.array(samples)\n min_one_indices = np.where(mean_dcl_accs == -1)\n\n print(\"mean_dcl_accs: \", mean_dcl_accs)\n print(\"std_dcl_accs: \", std_dcl_accs)\n print(\"smpl_array: \", smpl_array)\n\n print(\"-----------------\")\n\n smpl_array = np.delete(smpl_array, min_one_indices)\n mean_dcl_accs = np.delete(mean_dcl_accs, min_one_indices)\n std_dcl_accs = np.delete(std_dcl_accs, min_one_indices)\n\n print(\"mean_dcl_accs: \", mean_dcl_accs)\n print(\"std_dcl_accs: \", std_dcl_accs)\n print(\"smpl_array: \", smpl_array)\n\n accs = np.ones((4, len(samples))) * (-1)\n accs[0] = mean_val_accs\n accs[1] = std_val_accs\n accs[2] = mean_te_accs\n accs[3] = std_te_accs\n\n dcl_accs = np.ones((3, len(smpl_array))) * (-1)\n dcl_accs[0] = smpl_array\n dcl_accs[1] = mean_dcl_accs\n dcl_accs[2] = std_dcl_accs\n\n np.savetxt(\"%s/accs.csv\" % shift_path, accs, delimiter=\",\")\n np.savetxt(\"%s/dcl_accs.csv\" % shift_path, dcl_accs, delimiter=\",\")\n\n errorfill(np.array(samples), mean_val_accs, std_val_accs, fmt='-o', color=colors[0], label=r\"$p$\")\n errorfill(np.array(samples), mean_te_accs, std_te_accs, fmt='-s', color=colors[3], label=r\"$q$\")\n if len(smpl_array) > 0:\n errorfill(smpl_array, mean_dcl_accs, std_dcl_accs, fmt='--X', color=colors[7], label=r\"Classif\")\n plt.xlabel('Number of samples from test')\n plt.ylabel('Accuracy')\n plt.savefig(\"%s/accs.pdf\" % shift_path, bbox_inches='tight')\n plt.legend()\n plt.savefig(\"%s/accs_leg.pdf\" % shift_path, bbox_inches='tight')\n plt.clf()\n \n\n for dr_idx, dr in enumerate(dr_techniques_plot):\n errorfill(np.array(samples), mean_p_vals[:,dr_idx], std_p_vals[:,dr_idx], fmt=format[dr], color=colors[dr], label=\"%s\" % DimensionalityReduction(dr).name)\n plt.axhline(y=sign_level, color='k')\n plt.xlabel('Number of samples from test')\n plt.ylabel('$p$-value')\n plt.savefig(\"%s/dr_sample_comp_noleg.pdf\" % shift_path, bbox_inches='tight')\n plt.legend()\n plt.savefig(\"%s/dr_sample_comp.pdf\" % shift_path, bbox_inches='tight')\n plt.clf()\n\n for dr_idx, dr in enumerate(dr_techniques_plot):\n errorfill(np.array(samples), mean_p_vals[:,dr_idx], std_p_vals[:,dr_idx], fmt=format[dr], color=colors[dr])\n plt.xlabel('Number of samples from test')\n plt.ylabel('$p$-value')\n plt.axhline(y=sign_level, color='k', label='sign_level')\n plt.savefig(\"%s/%s_conf.pdf\" % (shift_path, DimensionalityReduction(dr).name), bbox_inches='tight')\n plt.clf()\n\n np.savetxt(\"%s/mean_p_vals.csv\" % shift_path, mean_p_vals, delimiter=\",\")\n np.savetxt(\"%s/std_p_vals.csv\" % shift_path, std_p_vals, delimiter=\",\")\n\n for dr_idx, dr in enumerate(dr_techniques_plot):\n samples_shifts_rands_dr_tech[:,shift_idx,:,dr_idx] = rand_run_p_vals[:,dr_idx,:]\n\n np.save(\"%s/samples_shifts_rands_dr_tech.npy\" % (path), samples_shifts_rands_dr_tech)\n\nfor dr_idx, dr in enumerate(dr_techniques_plot):\n dr_method_results = samples_shifts_rands_dr_tech[:,:,:,dr_idx]\n\n mean_p_vals = np.mean(dr_method_results, axis=2)\n std_p_vals = np.std(dr_method_results, axis=2)\n\n for idx, shift in enumerate(shifts):\n errorfill(np.array(samples), mean_p_vals[:, idx], std_p_vals[:, idx], fmt=linestyles[idx]+markers[dr], color=colorscale(colors[dr],brightness[idx]), label=\"%s\" % shift.replace('_', '\\\\_'))\n plt.xlabel('Number of samples from test')\n plt.ylabel('$p$-value')\n plt.axhline(y=sign_level, color='k')\n plt.savefig(\"%s/%s_conf_noleg.pdf\" % (path, DimensionalityReduction(dr).name), bbox_inches='tight')\n plt.legend()\n plt.savefig(\"%s/%s_conf.pdf\" % (path, DimensionalityReduction(dr).name), bbox_inches='tight')\n plt.clf()\n\nnp.save(\"%s/samples_shifts_rands_dr_tech.npy\" % (path), samples_shifts_rands_dr_tech)\n" ]
[ [ "numpy.save", "numpy.savetxt", "numpy.random.seed", "numpy.copy", "matplotlib.pyplot.ylabel", "numpy.isscalar", "matplotlib.pyplot.savefig", "matplotlib.pyplot.gca", "matplotlib.rc", "numpy.delete", "numpy.where", "numpy.mean", "matplotlib.pyplot.axhline", "matplotlib.pyplot.axis", "numpy.equal", "matplotlib.pyplot.clf", "tensorflow.set_random_seed", "numpy.std", "matplotlib.pyplot.legend", "numpy.clip", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
lc0/autokeras
[ "413508a5f6aaa38ee7aba719aadb057c0b029591" ]
[ "examples/celeb_age.py" ]
[ "\"\"\"\nRegression tasks estimate a numeric variable, such as the price of a house or voter\nturnout.\n\nThis example is adapted from a\n[notebook](https://gist.github.com/mapmeld/98d1e9839f2d1f9c4ee197953661ed07) which\nestimates a person's age from their image, trained on the\n[IMDB-WIKI](https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/) photographs of famous\npeople.\n\nFirst, prepare your image data in a numpy.ndarray or tensorflow.Dataset format. Each\nimage must have the same shape, meaning each has the same width, height, and color\nchannels as other images in the set.\n\"\"\"\n\n\"\"\"\n### Connect your Google Drive for Data\n\"\"\"\n\n\nimport os\nfrom datetime import datetime\nfrom datetime import timedelta\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom google.colab import drive\nfrom PIL import Image\nfrom scipy.io import loadmat\nfrom tensorflow.python.keras.utils.data_utils import Sequence\nimport autokeras as ak\ndrive.mount(\"/content/drive\")\n\n\"\"\"\n### Install AutoKeras and TensorFlow\n\nDownload the master branch to your Google Drive for this tutorial. In general, you can\nuse *pip install autokeras* .\n\"\"\"\n\n\"\"\"shell\n!pip install -v \"/content/drive/My Drive/AutoKeras-dev/autokeras-master.zip\"\n!pip uninstall keras-tuner\n!pip install\ngit+git://github.com/keras-team/keras-tuner.git@d2d69cba21a0b482a85ce2a38893e2322e139c01\n\"\"\"\n\n\"\"\"shell\n!pip install tensorflow==2.2.0\n\"\"\"\n\n\"\"\"\n###**Import IMDB Celeb images and metadata**\n\"\"\"\n\n\"\"\"shell\n!mkdir ./drive/My\\ Drive/mlin/celebs\n\"\"\"\n\n\"\"\"shell\n! wget -O ./drive/My\\ Drive/mlin/celebs/imdb_0.tar\nhttps://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/imdb_0.tar\n\"\"\"\n\n\"\"\"shell\n! cd ./drive/My\\ Drive/mlin/celebs && tar -xf imdb_0.tar\n! rm ./drive/My\\ Drive/mlin/celebs/imdb_0.tar\n\"\"\"\n\n\"\"\"\nUncomment and run the below cell if you need to re-run the cells again and above don't\nneed to install everything from the beginning.\n\"\"\"\n\n# ! cd ./drive/My\\ Drive/mlin/celebs.\n\n\"\"\"shell\n! ls ./drive/My\\ Drive/mlin/celebs/imdb/\n\"\"\"\n\n\"\"\"shell\n! wget https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/imdb_meta.tar\n! tar -xf imdb_meta.tar\n! rm imdb_meta.tar\n\"\"\"\n\n\"\"\"\n###**Converting from MATLAB date to actual Date-of-Birth**\n\"\"\"\n\n\ndef datenum_to_datetime(datenum):\n \"\"\"\n Convert Matlab datenum into Python datetime.\n \"\"\"\n days = datenum % 1\n hours = days % 1 * 24\n minutes = hours % 1 * 60\n seconds = minutes % 1 * 60\n try:\n return (\n datetime.fromordinal(int(datenum))\n + timedelta(days=int(days))\n + timedelta(hours=int(hours))\n + timedelta(minutes=int(minutes))\n + timedelta(seconds=round(seconds))\n - timedelta(days=366)\n )\n except:\n return datenum_to_datetime(700000)\n\n\nprint(datenum_to_datetime(734963))\n\n\"\"\"\n### **Opening MatLab file to Pandas DataFrame**\n\"\"\"\n\n\nx = loadmat(\"imdb/imdb.mat\")\n\n\nmdata = x[\"imdb\"] # variable in mat file\nmdtype = mdata.dtype # dtypes of structures are \"unsized objects\"\nndata = {n: mdata[n][0, 0] for n in mdtype.names}\ncolumns = [n for n, v in ndata.items()]\n\nrows = []\nfor col in range(0, 10):\n values = list(ndata.items())[col]\n for num, val in enumerate(values[1][0], start=0):\n if col == 0:\n rows.append([])\n if num > 0:\n if columns[col] == \"dob\":\n rows[num].append(datenum_to_datetime(int(val)))\n elif columns[col] == \"photo_taken\":\n rows[num].append(datetime(year=int(val), month=6, day=30))\n else:\n rows[num].append(val)\n\ndt = map(lambda row: np.array(row), np.array(rows[1:]))\n\ndf = pd.DataFrame(data=dt, index=range(0, len(rows) - 1), columns=columns)\nprint(df.head())\n\nprint(columns)\nprint(df[\"full_path\"])\n\n\"\"\"\n### **Calculating age at time photo was taken**\n\"\"\"\n\ndf[\"age\"] = (df[\"photo_taken\"] - df[\"dob\"]).astype(\"int\") / 31558102e9\nprint(df[\"age\"])\n\n\"\"\"\n### **Creating dataset**\n\n\n* We sample 200 of the images which were included in this first download.\n* Images are resized to 128x128 to standardize shape and conserve memory\n* RGB images are converted to grayscale to standardize shape\n* Ages are converted to ints\n\n\n\"\"\"\n\n\ndef df2numpy(train_set):\n images = []\n for img_path in train_set[\"full_path\"]:\n img = (\n Image.open(\"./drive/My Drive/mlin/celebs/imdb/\" + img_path[0])\n .resize((128, 128))\n .convert(\"L\")\n )\n images.append(np.asarray(img, dtype=\"int32\"))\n\n image_inputs = np.array(images)\n\n ages = train_set[\"age\"].astype(\"int\").to_numpy()\n return image_inputs, ages\n\n\ntrain_set = df[df[\"full_path\"] < \"02\"].sample(200)\ntrain_imgs, train_ages = df2numpy(train_set)\n\ntest_set = df[df[\"full_path\"] < \"02\"].sample(100)\ntest_imgs, test_ages = df2numpy(test_set)\n\n\"\"\"\n### **Training using AutoKeras**\n\"\"\"\n\n\n# Initialize the image regressor\nreg = ak.ImageRegressor(max_trials=15) # AutoKeras tries 15 different models.\n\n# Find the best model for the given training data\nreg.fit(train_imgs, train_ages)\n\n# Predict with the chosen model:\n# predict_y = reg.predict(test_images) # Uncomment if required\n\n# Evaluate the chosen model with testing data\nprint(reg.evaluate(test_images, test_ages))\n\n\"\"\"\n### **Validation Data**\n\nBy default, AutoKeras use the last 20% of training data as validation data. As shown in\nthe example below, you can use validation_split to specify the percentage.\n\"\"\"\n\nreg.fit(\n train_imgs,\n train_ages,\n # Split the training data and use the last 15% as validation data.\n validation_split=0.15,\n epochs=3,\n)\n\n\"\"\"\nYou can also use your own validation set instead of splitting it from the training data\nwith validation_data.\n\"\"\"\n\nsplit = 460000\nx_val = train_imgs[split:]\ny_val = train_ages[split:]\nx_train = train_imgs[:split]\ny_train = train_ages[:split]\nreg.fit(\n x_train,\n y_train,\n # Use your own validation set.\n validation_data=(x_val, y_val),\n epochs=3,\n)\n\n\"\"\"\n### **Customized Search Space**\n\nFor advanced users, you may customize your search space by using AutoModel instead of\nImageRegressor. You can configure the ImageBlock for some high-level configurations,\ne.g., block_type for the type of neural network to search, normalize for whether to do\ndata normalization, augment for whether to do data augmentation. You can also choose not\nto specify these arguments, which would leave the different choices to be tuned\nautomatically. See the following example for detail.\n\"\"\"\n\n\ninput_node = ak.ImageInput()\noutput_node = ak.ImageBlock(\n # Only search ResNet architectures.\n block_type=\"resnet\",\n # Normalize the dataset.\n normalize=True,\n # Do not do data augmentation.\n augment=False,\n)(input_node)\noutput_node = ak.RegressionHead()(output_node)\nreg = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=10)\nreg.fit(x_train, y_train, epochs=3)\n\n\"\"\"\nThe usage of AutoModel is similar to the functional API of Keras. Basically, you are\nbuilding a graph, whose edges are blocks and the nodes are intermediate outputs of\nblocks. To add an edge from input_node to output_node with output_node =\nak.some_block(input_node).\nYou can even also use more fine grained blocks to customize the search space even\nfurther. See the following example.\n\"\"\"\n\n\ninput_node = ak.ImageInput()\noutput_node = ak.Normalization()(input_node)\noutput_node = ak.ImageAugmentation(translation_factor=0.3)(output_node)\noutput_node = ak.ResNetBlock(version=\"v2\")(output_node)\noutput_node = ak.RegressionHead()(output_node)\nclf = ak.AutoModel(inputs=input_node, outputs=output_node, max_trials=10)\nclf.fit(x_train, y_train, epochs=3)\n\n\"\"\"\n### **Data Format**\n\"\"\"\n\n\"\"\"\nThe AutoKeras ImageClassifier is quite flexible for the data format.\n\nFor the image, it accepts data formats both with and without the channel dimension. The\nimages in the IMDB-Wiki dataset do not have a channel dimension. Each image is a matrix\nwith shape (128, 128). AutoKeras also accepts images with a channel dimension at last,\ne.g., (32, 32, 3), (28, 28, 1).\n\nFor the classification labels, AutoKeras accepts both plain labels, i.e. strings or\nintegers, and one-hot encoded labels, i.e. vectors of 0s and 1s.\n\nSo if you prepare your data in the following way, the ImageClassifier should still work.\n\"\"\"\n\n# Reshape the images to have the channel dimension.\ntrain_imgs = train_imgs.reshape(train_imgs.shape + (1,))\ntest_imgs = test_imgs.reshape(test_imgs.shape + (1,))\n\nprint(train_imgs.shape) # (200, 128, 128, 1)\nprint(test_imgs.shape) # (100, 128, 128, 1)\nprint(train_ages[:3])\n\n\"\"\"\nWe also support using tf.data.Dataset format for the training data. In this case, the\nimages would have to be 3-dimentional. The labels have to be one-hot encoded for\nmulti-class classification to be wrapped into tensorflow Dataset.\n\"\"\"\n\n\ntrain_set = tf.data.Dataset.from_tensor_slices(((train_imgs,), (train_ages,)))\ntest_set = tf.data.Dataset.from_tensor_slices(((test_imgs,), (test_ages,)))\n\nreg = ak.ImageRegressor(max_trials=15)\n# Feed the tensorflow Dataset to the classifier.\nreg.fit(train_set)\n# Predict with the best model.\npredicted_y = clf.predict(test_set)\n# Evaluate the best model with testing data.\nprint(clf.evaluate(test_set))\n\n\"\"\"\n## References\n\n[Main Reference\nNotebook](https://gist.github.com/mapmeld/98d1e9839f2d1f9c4ee197953661ed07),\n[Dataset](https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/),\n[ImageRegressor](/image_regressor),\n[ResNetBlock](/block/#resnetblock-class),\n[ImageInput](/node/#imageinput-class),\n[AutoModel](/auto_model/#automodel-class),\n[ImageBlock](/block/#imageblock-class),\n[Normalization](/preprocessor/#normalization-class),\n[ImageAugmentation](/preprocessor/#image-augmentation-class),\n[RegressionHead](/head/#regressionhead-class).\n\n\"\"\"\n" ]
[ [ "scipy.io.loadmat", "tensorflow.data.Dataset.from_tensor_slices", "numpy.array", "numpy.asarray" ] ]
Sillte/figpptx
[ "bf5539b09eeef4e6a17bb4483f62f29d286138b2" ]
[ "gallery/line2d.py" ]
[ "\"\"\"Check behaviors of ``Line2D``.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom figpptx.comparer import Comparer\n\n\nclass Line2DCheck:\n \"\"\"Line2DCheck.\n\n \"\"\"\n @classmethod\n def run(cls, ax):\n cls.various_line2d(ax)\n Comparer().compare(ax.figure)\n\n @classmethod\n def various_line2d(cls, ax):\n ax.plot([0, 1], [0, 1])\n ax.plot([0, 1, 2], [2, 3, 1])\n return ax\n\n\nif __name__ == \"__main__\":\n fig, ax = plt.subplots(dpi=72)\n Line2DCheck.run(ax)\n" ]
[ [ "matplotlib.pyplot.subplots" ] ]
manluow/d3p
[ "23a33195d6fc4c0db60b24f3f871094a1f2cf8ab" ]
[ "seq2avg_data_generation.py" ]
[ "# -*- coding: utf-8 -*-\n\n\nimport os\nimport json\nimport pickle\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport pandas as pd\n\nfrom utils import day_of_month\n\ndef is_first_day(shop_timeline, seq, dt):\n '''Jude whether a day is the first day of the sequence\n '''\n timeline = shop_timeline[seq]\n for tl in timeline:\n if tl[0] <= dt < tl[1] and tl[0] == dt:\n return True\n return False\n\n\ndef is_open(shop_timeline, seq, dt):\n '''Jude whether a day is still open at the specified day\n '''\n timeline = shop_timeline[seq]\n for tl in timeline:\n if tl[0] <= dt < tl[1]:\n return True\n return False\n\n\ndef get_prev(shop_id, cache_orders, cache_masks, cache_ext, cur_mon, cur_day, start_mon, n_day):\n '''Get the weekly average number of orders in previous n_days\n\n Arg:\n shop_id: the id of the station\n cur_mon: the current month\n cur_day: the current day\n start_mon : the start month\n n_day: the number of days will be calculated\n '''\n end_idx = cur_day - 1\n for i in range(start_mon, cur_mon):\n end_idx += day_of_month(i)\n\n start_idx = end_idx - n_day\n r_orders = cache_orders[shop_id, start_idx: end_idx]\n r_masks = cache_masks[shop_id, start_idx: end_idx]\n r_ext = cache_ext[shop_id, start_idx: end_idx]\n\n return r_orders, r_masks, r_ext\n\n\ndef get_next(shop_id, cache_orders, cache_masks, cache_ext, cur_mon, cur_day, start_mon, n_day=14):\n '''Get the weekly average number of orders in next n_days\n\n Arg:\n shop_id: the id of the station\n cur_mon: the current month\n cur_day: the current day\n start_mon : the start month\n n_day: the number of days will be calculated\n '''\n start_idx = cur_day - 1\n\n for i in range(start_mon, cur_mon):\n start_idx += day_of_month(i)\n\n end_idx = start_idx + n_day\n\n r_orders = cache_orders[shop_id, start_idx: end_idx]\n r_masks = cache_masks[shop_id, start_idx: end_idx]\n r_ext = cache_ext[shop_id, start_idx: end_idx]\n\n avg_orders = np.zeros([7, ], dtype=np.float32)\n valid_days = np.zeros([7, ], dtype=np.float32)\n\n cur_dt = datetime(1991, cur_mon, cur_day)\n delta = timedelta(days=1)\n\n for i in range(n_day):\n if r_masks[i]:\n wd = cur_dt.weekday()\n avg_orders[wd] += r_orders[i]\n valid_days[wd] += 1\n cur_dt += delta\n\n avg_orders = avg_orders / valid_days\n avg_orders[np.where(valid_days == 0)] = 0\n\n r_masks = np.ones(shape=[7, ], dtype=np.float32)\n r_masks[np.where(valid_days == 0)] = 0\n\n return avg_orders, r_masks, r_ext\n\ndef main():\n shops = np.genfromtxt('data/shops_example.csv', delimiter=',')\n\n n_shops = shops.shape[0]\n\n id2seq = {idx: seq for idx, seq in enumerate(shops[:, 0].astype(np.int32))}\n seq2id = {seq: idx for idx, seq in enumerate(shops[:, 0].astype(np.int32))}\n\n # Processing online and offline events\n shop_timeline = pickle.load(open('cache/timeline.pkl', 'rb'))\n\n shop_info = pd.read_csv('data/shop_info_example.csv')\n\n restrict = {}\n n_parks = {}\n for idx, shop in shop_info.iterrows():\n shop_seq = shop['SHOP_SEQ']\n is_restrict = shop['IS_RESTRICT']\n n_park = shop['PARK_NUM']\n\n restrict[shop_seq] = is_restrict\n n_parks[shop_seq] = n_park\n\n dis = np.load('cache/dis.npy')\n\n for i in range(n_shops - 1):\n for j in range(i + 1, n_shops):\n dis[i, j] = 1 / dis[i, j]\n dis[j, i] = dis[i, j]\n\n poi = np.load('cache/poi.npy')\n\n # Prepare the training dataset\n cache_orders = []\n cache_masks = []\n cache_ext = []\n for month in [1]:\n mats = np.load('cache/mat%d.npy' % month)\n\n for day in range(1, day_of_month(month) + 1):\n tmp_order = np.full((n_shops,), -1.0, dtype=np.float32)\n tmp_mask = np.full((n_shops,), 0.0, dtype=np.float32)\n tmp_ext = np.zeros((n_shops, 7), dtype=np.float32)\n\n mat = mats[day]\n n_returns = np.sum(mat, 1)\n cur_dt = datetime(1991, month, day)\n\n for i in range(n_shops):\n seq = id2seq[i]\n\n if not is_open(shop_timeline, seq, cur_dt):\n continue\n\n n_return = n_returns[i]\n tmp_order[i] = n_return\n tmp_mask[i] = 1.0\n tmp_ext[i, cur_dt.weekday()] = 1.0\n\n cache_orders.append(tmp_order)\n cache_masks.append(tmp_mask)\n cache_ext.append(tmp_ext)\n\n cache_orders = np.transpose(np.array(cache_orders))\n cache_masks = np.transpose(np.array(cache_masks))\n cache_ext = np.transpose(np.array(cache_ext), [1, 0, 2])\n\n x = [] # The input\n o = [] # The number of orders in previous weeks\n\n\n ext_inp = [] # The day of the week\n\n mask1 = [] # The mask of the graph\n mask2 = [] # The mask of the prediction\n mask3 = [] # The mask of the new stations\n y = [] # The prediction\n\n for month in [1]:\n pickup_amounts = np.load('cache/pickup_amounts%d.npy' % month)\n return_amounts = np.load('cache/return_amounts%d.npy' % month)\n\n for day in range(15, day_of_month(month) + 1):\n print(month, day)\n pickup_amount = pickup_amounts[day]\n return_amount = return_amounts[day]\n\n cur_dt = datetime(1991, month, day)\n\n # Select the data from 1st, Jan, 1991 to 16th, Jan, 1991\n if datetime(1991, 1, 1) <= cur_dt < datetime(1991, 1, 16):\n\n t_x = np.zeros((n_shops, 649), dtype=np.float32)\n t_o = np.zeros((n_shops, 14, 1), dtype=np.float32)\n t_ext_inp = np.zeros((n_shops, 14, 7), dtype=np.float32)\n t_y = np.zeros((n_shops, 7, 1), dtype=np.float32)\n t_mask1 = np.zeros((n_shops, 1), dtype=np.float32)\n t_mask2 = np.zeros((n_shops, 7, 1), dtype=np.float32)\n t_mask3 = np.zeros((n_shops, 1), dtype=np.float32)\n\n for i in range(n_shops):\n seq = id2seq[i]\n\n n_park = n_parks[seq]\n n_pickup_amount = pickup_amount[i]\n n_return_amount = return_amount[i]\n\n t_x[i, :646] = shops[i, 227:]\n t_x[i, 646] = n_pickup_amount\n t_x[i, 647] = n_return_amount\n t_x[i, 648] = n_park\n\n if is_first_day(shop_timeline, seq, cur_dt):\n t_mask3[i] = 1.0\n\n\n a, _, c = get_prev(i,cache_orders, cache_masks, cache_ext, month, day, 1, 14)\n t_o[i] = np.expand_dims(a, 1)\n t_mask1[i] = 1.0\n t_ext_inp[i] = c\n\n\n a, b, c = get_next(i, cache_orders, cache_masks, cache_ext, month, day, 1)\n t_y[i] = np.expand_dims(a, 1)\n t_mask2[i] = np.expand_dims(b, 1)\n\n\n x.append(t_x)\n o.append(t_o)\n ext_inp.append(t_ext_inp)\n mask1.append(t_mask1)\n mask2.append(t_mask2)\n mask3.append(t_mask3)\n y.append(t_y)\n\n x = np.array(x)\n o = np.array(o)\n ext_inp = np.array(ext_inp)\n mask1 = np.array(mask1)\n mask2 = np.array(mask2)\n mask3 = np.array(mask3)\n y = np.array(y)\n\n if os.path.exists('dev/train') == False:\n os.makedirs('dev/train')\n\n\n np.save('dev/train/x', x)\n np.save('dev/train/o', o)\n np.save('dev/train/ext_inp', ext_inp)\n np.save('dev/train/mask1', mask1)\n np.save('dev/train/mask2', mask2)\n np.save('dev/train/mask3', mask3)\n np.save('dev/train/y', y)\n\n # Prepare the testing dataset\n cache_orders = []\n cache_masks = []\n cache_ext = []\n\n for month in [1]:\n mats = np.load('cache/mat%d.npy' % month)\n\n for day in range(1, day_of_month(month) + 1):\n tmp_order = np.full((n_shops,), -1.0, dtype=np.float32)\n tmp_mask = np.full((n_shops,), 0.0, dtype=np.float32)\n tmp_ext = np.zeros((n_shops, 7), dtype=np.float32)\n\n mat = mats[day]\n n_returns = np.sum(mat, 1)\n cur_dt = datetime(1991, month, day)\n\n for i in range(n_shops):\n seq = id2seq[i]\n\n if not is_open(shop_timeline, seq, cur_dt):\n continue\n\n n_return = n_returns[i]\n tmp_order[i] = n_return\n tmp_mask[i] = 1.0\n tmp_ext[i, cur_dt.weekday()] = 1.0\n\n cache_orders.append(tmp_order)\n cache_masks.append(tmp_mask)\n cache_ext.append(tmp_ext)\n\n cache_orders = np.transpose(np.array(cache_orders))\n cache_masks = np.transpose(np.array(cache_masks))\n cache_ext = np.transpose(np.array(cache_ext), [1, 0, 2])\n\n x = [] # The input\n o = [] # The number of orders in previous weeks\n ext_inp = [] # The day of the week\n mask1 = [] # The mask of the graph\n mask2 = [] # The mask of the prediction\n mask3 = [] # The mask of the new stations\n y = [] # The prediction\n\n for month in [1]:\n pickup_amounts = np.load('cache/pickup_amounts%d.npy' % month)\n return_amounts = np.load('cache/return_amounts%d.npy' % month)\n\n # 1 - 30\n for day in range(15, day_of_month(month) + 1):\n print(month, day)\n pickup_amount = pickup_amounts[day]\n return_amount = return_amounts[day]\n\n cur_dt = datetime(1991, month, day)\n\n # Select the data from 1st, Jan, 1991 to 16th, Jan, 1991\n if datetime(1991, 1, 1) <= cur_dt < datetime(1991, 1, 16):\n\n t_x = np.zeros((n_shops, 649), dtype=np.float32)\n t_o = np.zeros((n_shops, 14, 1), dtype=np.float32)\n t_y = np.zeros((n_shops, 7, 1), dtype=np.float32)\n t_ext_inp = np.zeros((n_shops, 14, 7), dtype=np.float32)\n t_mask1 = np.zeros((n_shops, 1), dtype=np.float32)\n t_mask2 = np.zeros((n_shops, 7, 1), dtype=np.float32)\n t_mask3 = np.zeros((n_shops, 1), dtype=np.float32)\n\n for i in range(n_shops):\n seq = id2seq[i]\n\n n_park = n_parks[seq]\n n_pickup_amount = pickup_amount[i]\n n_return_amount = return_amount[i]\n\n t_x[i, :646] = shops[i, 227:] # 1~4, 224~227\n t_x[i, 646] = n_pickup_amount\n t_x[i, 647] = n_return_amount\n t_x[i, 648] = n_park\n\n if is_first_day(shop_timeline, seq, cur_dt):\n t_mask3[i] = 1.0\n\n a, _, c = get_prev(i, cache_orders, cache_masks, cache_ext, month, day, 1, 14)\n t_o[i] = np.expand_dims(a, 1)\n t_mask1[i] = 1.0\n t_ext_inp[i] = c\n\n a, b, c = get_next(i, cache_orders, cache_masks, cache_ext, month, day, 1)\n t_y[i] = np.expand_dims(a, 1)\n t_mask2[i] = np.expand_dims(b, 1)\n\n x.append(t_x)\n o.append(t_o)\n ext_inp.append(t_ext_inp)\n mask1.append(t_mask1)\n mask2.append(t_mask2)\n mask3.append(t_mask3)\n y.append(t_y)\n\n x = np.array(x)\n o = np.array(o)\n mask1 = np.array(mask1)\n mask2 = np.array(mask2)\n mask3 = np.array(mask3)\n y = np.array(y)\n\n if os.path.exists('dev/test') == False:\n os.makedirs('dev/test')\n\n np.save('dev/test/x', x)\n np.save('dev/test/o', o)\n np.save('dev/test/ext_inp', ext_inp)\n np.save('dev/test/mask1', mask1)\n np.save('dev/test/mask2', mask2)\n np.save('dev/test/mask3', mask3)\n np.save('dev/test/y', y)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n" ]
[ [ "numpy.ones", "numpy.load", "numpy.save", "numpy.sum", "numpy.zeros", "pandas.read_csv", "numpy.where", "numpy.expand_dims", "numpy.array", "numpy.genfromtxt", "numpy.full" ] ]
sushma-4/NEAT
[ "cb7394597acf1cd5824fcdc3b83308eaffbbe916" ]
[ "source/vcf_func.py" ]
[ "import io\nimport sys\nimport time\nimport gzip\nimport random\nimport pandas as pd\n\n\ndef parse_vcf(vcf_path: str, tumor_normal: bool = False, ploidy: int = 2,\n include_homs: bool = False, include_fail: bool = False, debug: bool = False,\n choose_random_ploid_if_no_gt_found: bool = True):\n\n tt = time.time()\n # Read in the raw vcf using pandas' csv reader.\n if vcf_path.endswith('.gz'):\n f = gzip.open(vcf_path)\n else:\n f = open(vcf_path, 'r')\n\n # quickest way I've found to read in the file:\n lines = [line for line in f if not line.startswith('##')]\n f.close()\n\n # Check to make sure header row is included\n if not lines[0].startswith('#CHROM'):\n print(f\"ERROR: Improper vcf header row for {vcf_path}. Check and re-run.\")\n sys.exit(1)\n else:\n lines[0] = lines[0].strip('#')\n # NOTE: if the vcf that is read in does not match the proper format, this read_csv command\n # will throw an error. This means you can't have data with no column header.\n variants = pd.read_csv(\n io.StringIO(''.join(lines)),\n dtype={'CHROM': str, 'POS': int, 'ID': str, 'REF': str, 'ALT': str,\n 'QUAL': str},\n sep='\\t'\n )\n\n # the following section is just some sanity checking.\n min_headers = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL']\n for i in range(len(min_headers)):\n if min_headers[i] != variants.columns[i]:\n print(f\"ERROR: VCF must contain the following headers, in order: {min_headers}\")\n sys.exit(1)\n if debug:\n optional_headers = ['FILTER', 'INFO', 'FORMAT']\n for j in range(len(optional_headers)):\n if optional_headers[j] != variants.columns[j]:\n print(f'Warning: missing optional header: {optional_headers[j]}.'\n f'Though not required, a full VCF with complete fields will be helpful.')\n\n # Check for homs and fails, and drop those rows unless otherwise specified\n if not include_homs:\n variants = variants.drop(variants[(variants['ALT'] == '.') |\n (variants['ALT'] == '') |\n (variants.apply(lambda row: all(j == row.ALT for j in row.REF), axis=1))\n ].index)\n if not include_fail:\n variants = variants.drop(variants[(variants['FILTER'] != 'PASS') &\n (variants['FILTER'] != '.')\n ].index)\n\n # If FORMAT is present in the vcf, there must be corresponding Sample columns.\n samp_cols = []\n if 'FORMAT' in variants.columns:\n # VCF spec says that all columns after FORMAT are sample columns.\n samp_cols = variants.columns[list(variants.columns).index('FORMAT') + 1:]\n if len(samp_cols):\n if len(samp_cols) == 1 and not tumor_normal:\n variants['sample_split'] = variants[samp_cols[0]].str.split(':')\n samp_cols = ['sample_split']\n elif len(samp_cols) >= 1 and not tumor_normal:\n print('More than one sample column present, only first sample column used.')\n variants['sample_split'] = variants[samp_cols[0]].str.split(':')\n samp_cols = ['sample_split']\n elif len(samp_cols) == 1 and tumor_normal:\n print(f'Tumor-Normal samples require both a tumor and normal column in the VCF. \\n'\n f'Supplied samples = {list(samp_cols)}')\n sys.exit(1)\n elif len(samp_cols) >= 1 and tumor_normal:\n normals = [label for label in samp_cols if 'normal' in label.lower()]\n tumors = [label for label in samp_cols if 'tumor' in label.lower()]\n if not (tumors and normals):\n print(\"ERROR: Input VCF for cancer must contain a column with a label containing 'tumor' \"\n \"and 'normal' (case-insensitive).\")\n sys.exit(1)\n if len(normals) > 1 or len(tumors) > 1:\n print(\"WARNING: If more than one tumor or normal column is present, \"\n \"only the first of each is used.\")\n samp_cols = [normals[0], tumors[0]]\n variants['normal_sample_split'] = variants[samp_cols[0]].str.split(':')\n variants['tumor_sample_split'] = variants[samp_cols[1]].str.split(':')\n samp_cols = ['normal_sample_split', 'tumor_sample_split']\n else:\n print('ERROR: Unconsidered case: you may have broken reality. Check your VCF for the proper number'\n 'of sample columns.')\n sys.exit(1)\n else:\n print('ERROR: If FORMAT column is present in VCF, there must be at least one sample column.')\n sys.exit(1)\n else:\n print(\"Warning: Input VCF files must have a FORMAT and SAMPLE column for variant insert to work.\")\n\n # Split fields with multiple datapoints, if present, into lists\n variants['alt_split'] = variants['ALT'].str.split(',')\n variants = variants.explode('alt_split')\n if 'INFO' in variants.columns:\n variants['info_split'] = variants['INFO'].str.split(';')\n if 'FORMAT' in variants.columns:\n variants['format_split'] = variants['FORMAT'].str.split(':')\n\n # The following block of code looks for allele frequencies in the VCF.\n # There may be a more clever way to look through these subfields, but I loop (just once) over all the rows.\n new_column = []\n printed_warning = False\n rows_to_delete = []\n n_skipped = 0\n # TODO find a more efficient way than looping over the rows\n # Nan's give errors, so let's fill them out quick.\n variants = variants.fillna('.')\n for index, row in variants.iterrows():\n af_numbers = []\n gt_numbers = []\n # Looking for AF in INFO field to get the allele frequency.\n if 'INFO' in variants.columns:\n found_af = False\n found_wp = False\n for info in row['info_split']:\n # breaks the loop once everthing is found\n if found_af and found_wp:\n break\n\n # Looking for allele frequency (AF) in the info field\n if 'AF' in info and not found_af:\n # In case they try to do something like \"AF=0.5;AF=0.25\" instead of \"AF=0.5,0.25\"\n if not printed_warning and debug:\n print('Note: NEAT only uses the first AF in the info field of input VCF.')\n print_message = True\n found_af = True\n for i in info.split('=')[1].split(','):\n # If they didn't supply a number, but instead a '.' or a missing value or something, then\n # the try/except block should catch it.\n try:\n af_numbers.append(float(i))\n except ValueError:\n print(f\"Warning: format is off for AF on this row: {list(row)} \\n\"\n f\"Proceeding without AF for this record.\")\n af_numbers.append(None)\n # WP is NEAT's ploidy indicator. Check if it's there.\n elif 'WP' in info and not found_wp:\n found_wp =True\n for j in info.split('=')[1].split(','):\n # If they didn't supply a number, but instead a '.' or a missing value or something, then\n # the try/except block should catch it.\n gt_numbers.append(j)\n\n # If WP is present, we'll use that, otherwise look for GT in the FORMAT field.\n if 'FORMAT' in variants.columns and not gt_numbers:\n # Assuming there was data in the FORMAT column, this will find the first GT.\n if row['format_split'] != ['.']:\n for format_item in row['format_split']:\n # GT is the usual genotype indicator. will also need to search for the FORMAT field for this\n if 'GT' in format_item:\n if tumor_normal:\n # Look for the normal and tumor sample GTs by finding the index of the item in question\n to_add = [row['normal_sample_split']\n [row['format_split'].index(format_item)].replace('|', '/'),\n row['tumor_sample_split']\n [row['format_split'].index(format_item)].replace('|', '/')]\n gt_numbers.append(to_add)\n else:\n # Append the corresponding item from the sample.\n gt_numbers.append(row['sample_split'][row['format_split'].index(format_item)])\n # We've found it, so we can quit looking.\n break\n\n # If there is not GT or WP present, then we either choose a random ploid or skip\n else:\n if choose_random_ploid_if_no_gt_found:\n if not printed_warning:\n print('Warning: Found variants without a GT field, assuming heterozygous...')\n printed_warning = True\n tmp_list = []\n for i in range(len(row['alt_split'])):\n tmp = ['0'] * ploidy\n tmp[random.randint(0, ploidy-1)] = '1'\n tmp_list.append('/'.join(tmp))\n gt_numbers.extend(tmp_list)\n else:\n if not printed_warning:\n print('Warning: Found variants without a GT field, ignoring variants...')\n printed_warning = True\n rows_to_delete.append(index)\n # Trim unnecessary sequences from alleles\n while (len(row['REF']) > 1) and \\\n (all([n[-1] == row['REF'] for n in row['alt_split']])) and \\\n (all([len(n) > 1 for n in row['alt_split']])):\n variants.loc[index, 'REF'] = variants.loc[index, 'REF'][:-1]\n variants.loc[index, 'alt_split'] = [[n[:-1] for n in variants.loc[index, 'alt_split']]]\n # Check that the data are consistent\n if af_numbers:\n if len(af_numbers) < len(row['alt_split']):\n print(f\"ERROR: allele frequency (AF) field in INFO must match number of alternate alleles: \"\n f\"{list(row)}\")\n sys.exit(1)\n else:\n # Used None value if no AF was supplied\n af_numbers.extend([None] * max([len(row['alt_split']), 1]))\n if not gt_numbers:\n rows_to_delete.append(index)\n if debug:\n print(f'Skipping row because no genotype found:\\n{row}')\n else:\n # drop variants that aren't actually used\n for gt in gt_numbers:\n if gt == '0/0':\n rows_to_delete.append(index)\n if debug:\n print(f'Skipping row because of 0/0 genotype:\\n{row}')\n # Append column to form new AF and GT columns of the dataframe\n new_column.append([af_numbers, gt_numbers])\n # Add the new data to the table\n variants['AF'] = pd.DataFrame(new_column)[0]\n variants['GT'] = pd.DataFrame(new_column)[1]\n # drop rows with no genotype numbers\n variants = variants.drop(rows_to_delete)\n n_skipped += len(rows_to_delete)\n # drop rows with position <= 0\n variants = variants.drop(variants[variants[\"POS\"] <= 0].index)\n n_skipped += len(variants[variants[\"POS\"] <= 0].index)\n # Delete rows where they try to insert more than one variant\n n_skipped_because_hash = 0\n variants = variants.loc[~variants.duplicated(subset=['CHROM', 'POS'])]\n n_skipped_because_hash += len(variants.loc[variants.duplicated(subset=['CHROM', 'POS'])].index)\n\n variants = variants.sort_values(by=['CHROM', 'POS'])\n\n print(f'Found {len(variants.index)} valid variants in input vcf.')\n print(f' * {n_skipped} variants skipped: (qual filtered / ref genotypes / invalid syntax)')\n print(f' * {n_skipped_because_hash} variants skipped due to multiple variants found per position')\n print(f'vcf reading took: {int(time.time() - tt)} (sec)')\n print('--------------------------------')\n return list(samp_cols), variants\n" ]
[ [ "pandas.DataFrame" ] ]
nstarman/jas1101_project
[ "f54620b715eb2f7dbe7bd39d4a1e21e50bc06541" ]
[ "jas1101finalproject/utils.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"**DOCSTRING**.\n\ndescription\n\nRouting Listings\n----------------\n\n\"\"\"\n\n\n###############################################################################\n# IMPORTS\n\n# GENERAL\nimport numpy as np\nimport astropy.units as u\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nimport seaborn as sns\n\n\n# CUSTOM\n\n# PROJECT-SPECIFIC\n\n\n###############################################################################\n# PARAMETERS\n\nrcParams.update({\"figure.figsize\": [7, 5]})\nrcParams.update({\"xtick.major.pad\": \"5.0\"})\nrcParams.update({\"xtick.major.size\": \"4\"})\nrcParams.update({\"xtick.major.width\": \"1.\"})\nrcParams.update({\"xtick.minor.pad\": \"5.0\"})\nrcParams.update({\"xtick.minor.size\": \"4\"})\nrcParams.update({\"xtick.minor.width\": \"0.8\"})\nrcParams.update({\"ytick.major.pad\": \"5.0\"})\nrcParams.update({\"ytick.major.size\": \"4\"})\nrcParams.update({\"ytick.major.width\": \"1.\"})\nrcParams.update({\"ytick.minor.pad\": \"5.0\"})\nrcParams.update({\"ytick.minor.size\": \"4\"})\nrcParams.update({\"ytick.minor.width\": \"0.8\"})\nrcParams.update({\"axes.labelsize\": 14})\nrcParams.update({\"font.size\": 14})\n\n\n###############################################################################\n# CODE\n###############################################################################\n\n\[email protected]_input(angle=u.deg, distance=\"length\")\ndef convert_angle(angle, distance) -> u.kpc:\n \"\"\"convert_angle.\n\n Parameters\n ----------\n angle : Quantity\n deg\n distance : Quantity\n kpc\n\n Returns\n -------\n Quantity\n\n \"\"\"\n return distance * np.tan(angle)\n\n\n# /def\n\n# --------------------------------------------------------------------------\n\n\[email protected]_input(angle=u.mas / u.yr, distance=\"length\")\ndef convert_pm_angular(velocity, distance) -> u.km / u.s:\n \"\"\"convert_pm_angular.\n\n Parameters\n ----------\n velocity : Quantity\n mas/yr\n distance : Quantity\n kpc\n\n Returns\n -------\n velocity : Quantity\n\n \"\"\"\n v = velocity.to(u.mas / u.yr)\n return distance * np.tan(v * u.yr) / u.yr\n\n\n# /def\n\n# --------------------------------------------------------------------------\ndef clip_quantile_nd(z, z_quantile=None, ind_clip=[1,2], return_func=False):\n \"\"\" Clip function based on quantile for N-d array.\n \n Parameters\n ----------\n z : N-d array [N_samples, N_dimensions]\n z_quantile : quantile [lower, upper] (float: 0 ~ 1)\n ind_clip : which columns of z to clip\n return_func : whether to return a function or the clipped array\n \n Example\n ----------\n good_pm = clip_quantile_1d(np.vstack([r, pmx, pmy]).T)\n \n Return\n ----------\n A function or N-d array.\n \n \"\"\"\n \n if z_quantile is None:\n z_quantile = [0.001, 0.999]\n \n z_clip = np.quantile(z, z_quantile, axis=0)\n n_dim = z.shape[1]\n \n clip = lambda z_: np.logical_and.reduce([(z_[:,j] > z_clip[0,j]) & (z_[:,j] < z_clip[1:,j]) for j in ind_clip], axis=0)\n \n if return_func:\n return clip\n else:\n return clip(z)\n \ndef clip_quantile_1d(z, z_quantile=None, return_func=False):\n \"\"\" Clip function based on given quantile.\n \n Parameters\n ----------\n z : 1d array\n z_quantile : quantile [lower, upper] (float: 0 ~ 1)\n return_func : whether to return a function or the clipped array\n \n Example\n ----------\n good_pmx = clip_quantile_1d(pmx)\n \n Return\n ----------\n A function or N-d array.\n \n \"\"\"\n \n if z_quantile is None:\n z_quantile = [0.001, 0.999]\n \n z_clip = np.quantile(z, z_quantile)\n \n clip = lambda z_: (z_ > z_clip[0]) & (z_ < z_clip[1])\n \n if return_func:\n return clip\n else:\n return clip(z)\n\ndef profile_binning(\n r,\n z,\n bins,\n z_name=\"pm\",\n z_clip=None,\n z_quantile=None,\n return_bin=True,\n plot=True,\n):\n \"\"\"Bin the given quantity z in r.\n \n Parameters\n ----------\n r: 1d array, binned x values\n z: 1d array, binned y values\n bins: 1d array, bins\n \n Returns\n --------\n r_rbin : 1d array, mean r in bins\n z_rbin : 1d array, mean z in bins\n z_bins : dict, numbers for bins\n \n \"\"\"\n \n if z_clip is None:\n clip = clip_quantile_1d(z, z_quantile, return_func=True)\n else:\n clip = lambda z_: (z_ > z_clip[0]) & (z_ < z_clip[1])\n \n z_bins = {}\n\n if plot:\n fig, ax = plt.subplots(1, 1, figsize=(8, 6))\n\n # Clip by bins\n for k, b in enumerate(bins[:-1]):\n in_bin = (bins[k] <= r) & (r < bins[k + 1])\n clipped = clip(z[in_bin])\n z_in_bin = z[in_bin][clipped]\n r_in_bin = r[in_bin][clipped]\n\n z_bin = {z_name: z_in_bin, \"r\": r_in_bin}\n z_bins[k] = z_bin\n\n if plot:\n lab = \"{0:.2f}<r<{1:.2f}\".format(bins[k], bins[k + 1])\n sns.distplot(\n z_in_bin,\n hist=False,\n kde_kws={\"lw\": 2, \"alpha\": 0.9},\n label=lab,\n )\n\n r_rbin, z_rbin = get_mean_rbins(z_bins, z_name=z_name)\n \n z_bins = z_bins if return_bin else None\n \n return r_rbin, z_rbin, z_bins\n\n# --------------------------------------------------------------------------\n\n\ndef get_mean_rbins(z_bins, z_name=\"pm\"):\n \"\"\"Get mean of radial bins.\"\"\"\n res = np.array(\n [\n [np.mean(val[\"r\"]), np.mean(val[z_name])]\n for key, val in z_bins.items()\n ]\n )\n r_rbin, z_rbin = res[:, 0], res[:, 1]\n return r_rbin, z_rbin\n\n\n###############################################################################\n# Command Line\n###############################################################################\n\n\n###############################################################################\n# END\n" ]
[ [ "numpy.logical_and.reduce", "numpy.quantile", "matplotlib.pyplot.subplots", "numpy.tan", "matplotlib.rcParams.update", "numpy.mean" ] ]
vincentkslim/IPC
[ "eb702ead6f23a1dc0be39c9f5a0fd62c80abeb98" ]
[ "tools/process_IP_results.py" ]
[ "\"\"\"Process simulation results.\"\"\"\n\nimport sys\nimport os\nimport pathlib\nimport mmap\n\nimport numpy\nimport pandas\n\ntimesteps = (1e-2, 1e-3, 1e-4, 1e-5)\n\n\ndef save_results_csv(results):\n \"\"\"Save results to seperate CSV files.\"\"\"\n with open(\"results-IP.csv\", \"w\", newline=\"\") as f:\n scene_names = sorted(list(results.keys()))\n for scene_name in scene_names:\n f.write(f\"{scene_name}\\n\")\n results[scene_name].to_csv(f, header=False, index=False)\n f.write(\"\\n\")\n\n\ndef check_error_file(log_path):\n err_log = log_path.with_suffix(\"\").with_suffix(\".err.txt\")\n if not err_log.exists():\n err_log = log_path.with_suffix(\".err\")\n if not err_log.exists():\n return \"Incomplete\"\n with open(err_log) as err_file:\n s = mmap.mmap(err_file.fileno(), 0, access=mmap.ACCESS_READ)\n if s.find(b\"out-of-memory\") != -1 or s.find(b\"what(): vector::reserve\") != -1:\n return \"Out-of-Memory\"\n elif s.find(b\"TIME LIMIT\") != -1:\n return \"Timeout\"\n elif s.find(b\"GRBException\") != -1:\n return \"GRBException\"\n return \"Incomplete\"\n\n\ndef get_sim_status(log_path):\n with open(log_path) as log_file:\n s = mmap.mmap(log_file.fileno(), 0, access=mmap.ACCESS_READ)\n if s.find(b\"intersecting state\") != -1:\n return \"Intersecting\"\n if s.find(b\"blow-up\") != -1:\n return \"Blow-Up\"\n if s.find(b\"simulation finished\") != -1:\n return \"Pass\"\n return check_error_file(log_path)\n\n\ndef add_scene_to_results(results, scene_name, default_result):\n if scene_name in results:\n return\n results[scene_name] = default_result.copy()\n\n\ndef main():\n \"\"\"Process simulation results.\"\"\"\n vals = numpy.full((len(timesteps), 1), \"\")\n default_result = pandas.DataFrame(vals, index=timesteps)\n\n results = {}\n results_path = pathlib.Path(sys.argv[1])\n for log_dir in results_path.glob(\"**/logs\"):\n for log in log_dir.iterdir():\n if (log.with_suffix(\"\").suffix != \".out\"\n or os.stat(log).st_size == 0\n or str(log.name)[:2] == \"._\"):\n continue\n # meshCO_pointTriangleCO__IP_1e-2.out.txt\n scene_name, params = log.name.split(\"__\")\n params = params.split(\".\")[0].split(\"_\")\n if params[0] != \"IP\":\n continue\n constraint_solver, timestep = params\n timestep = float(timestep)\n if scene_name not in results:\n add_scene_to_results(results, scene_name, default_result)\n results[scene_name][timestep] = get_sim_status(log)\n save_results_csv(results)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.DataFrame" ] ]
geisten/bot
[ "76d4aef279cd168f6cbf7994055c1d289329e49c" ]
[ "backtest/simulator.py" ]
[ "\"\"\"Create and save random price data\"\"\"\nfrom random import random\nimport os\nfrom datetime import datetime, timedelta\nimport pandas as pd # type: ignore\n\n\ndef random_walker(data_length: int):\n \"\"\"Create a random walk data list\"\"\"\n # seed(1)\n random_walk = list()\n random_walk.append(-1 if random() < 0.5 else 1) # nosec\n for i in range(0, data_length):\n movement = -1 if random() < 0.5 else 1 # nosec\n value = random_walk[i] + movement\n random_walk.append(value)\n return random_walk\n\n\ndef load_data(filename: str, offset: float, variance: float):\n \"\"\"Load the created price curve from file\"\"\"\n if not os.path.exists(filename):\n # prob = [0.05, 0.95]\n data_length = 1000\n positions = random_walker(data_length)\n\n date_today = datetime.now()\n minutes = pd.date_range(\n date_today, date_today + timedelta(0, 60 * data_length), freq='min')\n data = pd.DataFrame({'Coin1': positions}, index=minutes)\n data[\"Coin1\"] = offset + data[\"Coin1\"] * variance\n data.to_pickle(filename)\n else:\n data = pd.read_pickle(filename)\n\n return data\n\n\ndef load_csv_data(filename: str, offset: float, variance: float):\n \"\"\"Load the created price curve from csv file\"\"\"\n if not os.path.exists(filename):\n # prob = [0.05, 0.95]\n data_length = 1000\n positions = random_walker(data_length)\n\n date_today = datetime.now()\n minutes = pd.date_range(\n date_today, date_today + timedelta(0, 60 * data_length), freq='min')\n data = pd.DataFrame({'Coin1': positions}, index=minutes)\n data[\"Coin1\"] = offset + data[\"Coin1\"] * variance\n data.to_csv(filename)\n else:\n data = pd.read_csv(filename)\n\n return data\n" ]
[ [ "pandas.read_csv", "pandas.read_pickle", "pandas.DataFrame" ] ]
aseaday/ray
[ "673ecd1241934c644bca7cf92cb5c55a993b5e51" ]
[ "rllib/agents/ars/ars.py" ]
[ "# Code in this file is copied and adapted from\n# https://github.com/openai/evolution-strategies-starter and from\n# https://github.com/modestyachts/ARS\n\nfrom collections import namedtuple\nimport logging\nimport numpy as np\nimport random\nimport time\n\nimport ray\nfrom ray.rllib.agents import Trainer, with_common_config\nfrom ray.rllib.agents.ars.ars_tf_policy import ARSTFPolicy\nfrom ray.rllib.agents.es import optimizers, utils\nfrom ray.rllib.agents.es.es_tf_policy import rollout\nfrom ray.rllib.env.env_context import EnvContext\nfrom ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.deprecation import Deprecated\nfrom ray.rllib.utils.torch_utils import set_torch_seed\nfrom ray.rllib.utils.typing import TrainerConfigDict\nfrom ray.rllib.utils import FilterManager\n\nlogger = logging.getLogger(__name__)\n\nResult = namedtuple(\n \"Result\",\n [\n \"noise_indices\",\n \"noisy_returns\",\n \"sign_noisy_returns\",\n \"noisy_lengths\",\n \"eval_returns\",\n \"eval_lengths\",\n ],\n)\n\n# fmt: off\n# __sphinx_doc_begin__\nDEFAULT_CONFIG = with_common_config({\n \"action_noise_std\": 0.0,\n \"noise_stdev\": 0.02, # std deviation of parameter noise\n \"num_rollouts\": 32, # number of perturbs to try\n \"rollouts_used\": 32, # number of perturbs to keep in gradient estimate\n \"num_workers\": 2,\n \"sgd_stepsize\": 0.01, # sgd step-size\n \"observation_filter\": \"MeanStdFilter\",\n \"noise_size\": 250000000,\n \"eval_prob\": 0.03, # probability of evaluating the parameter rewards\n \"report_length\": 10, # how many of the last rewards we average over\n \"offset\": 0,\n # ARS will use Trainer's evaluation WorkerSet (if evaluation_interval > 0).\n # Therefore, we must be careful not to use more than 1 env per eval worker\n # (would break ARSPolicy's compute_single_action method) and to not do\n # obs-filtering.\n \"evaluation_config\": {\n \"num_envs_per_worker\": 1,\n \"observation_filter\": \"NoFilter\"\n },\n})\n# __sphinx_doc_end__\n# fmt: on\n\n\[email protected]\ndef create_shared_noise(count):\n \"\"\"Create a large array of noise to be shared by all workers.\"\"\"\n seed = 123\n noise = np.random.RandomState(seed).randn(count).astype(np.float32)\n return noise\n\n\nclass SharedNoiseTable:\n def __init__(self, noise):\n self.noise = noise\n assert self.noise.dtype == np.float32\n\n def get(self, i, dim):\n return self.noise[i : i + dim]\n\n def sample_index(self, dim):\n return np.random.randint(0, len(self.noise) - dim + 1)\n\n def get_delta(self, dim):\n idx = self.sample_index(dim)\n return idx, self.get(idx, dim)\n\n\[email protected]\nclass Worker:\n def __init__(self, config, env_creator, noise, worker_index, min_task_runtime=0.2):\n\n # Set Python random, numpy, env, and torch/tf seeds.\n seed = config.get(\"seed\")\n if seed is not None:\n # Python random module.\n random.seed(seed)\n # Numpy.\n np.random.seed(seed)\n # Torch.\n if config.get(\"framework\") == \"torch\":\n set_torch_seed(seed)\n\n self.min_task_runtime = min_task_runtime\n self.config = config\n self.config[\"single_threaded\"] = True\n self.noise = SharedNoiseTable(noise)\n\n env_context = EnvContext(config[\"env_config\"] or {}, worker_index)\n self.env = env_creator(env_context)\n # Seed the env, if gym.Env.\n if not hasattr(self.env, \"seed\"):\n logger.info(\"Env doesn't support env.seed(): {}\".format(self.env))\n # Gym.env.\n else:\n self.env.seed(seed)\n\n from ray.rllib import models\n\n self.preprocessor = models.ModelCatalog.get_preprocessor(self.env)\n\n policy_cls = get_policy_class(config)\n self.policy = policy_cls(\n self.env.observation_space, self.env.action_space, config\n )\n\n @property\n def filters(self):\n return {DEFAULT_POLICY_ID: self.policy.observation_filter}\n\n def sync_filters(self, new_filters):\n for k in self.filters:\n self.filters[k].sync(new_filters[k])\n\n def get_filters(self, flush_after=False):\n return_filters = {}\n for k, f in self.filters.items():\n return_filters[k] = f.as_serializable()\n if flush_after:\n f.clear_buffer()\n return return_filters\n\n def rollout(self, timestep_limit, add_noise=False):\n rollout_rewards, rollout_fragment_length = rollout(\n self.policy,\n self.env,\n timestep_limit=timestep_limit,\n add_noise=add_noise,\n offset=self.config[\"offset\"],\n )\n return rollout_rewards, rollout_fragment_length\n\n def do_rollouts(self, params, timestep_limit=None):\n # Set the network weights.\n self.policy.set_flat_weights(params)\n\n noise_indices, returns, sign_returns, lengths = [], [], [], []\n eval_returns, eval_lengths = [], []\n\n # Perform some rollouts with noise.\n while len(noise_indices) == 0:\n if np.random.uniform() < self.config[\"eval_prob\"]:\n # Do an evaluation run with no perturbation.\n self.policy.set_flat_weights(params)\n rewards, length = self.rollout(timestep_limit, add_noise=False)\n eval_returns.append(rewards.sum())\n eval_lengths.append(length)\n else:\n # Do a regular run with parameter perturbations.\n noise_index = self.noise.sample_index(self.policy.num_params)\n\n perturbation = self.config[\"noise_stdev\"] * self.noise.get(\n noise_index, self.policy.num_params\n )\n\n # These two sampling steps could be done in parallel on\n # different actors letting us update twice as frequently.\n self.policy.set_flat_weights(params + perturbation)\n rewards_pos, lengths_pos = self.rollout(timestep_limit)\n\n self.policy.set_flat_weights(params - perturbation)\n rewards_neg, lengths_neg = self.rollout(timestep_limit)\n\n noise_indices.append(noise_index)\n returns.append([rewards_pos.sum(), rewards_neg.sum()])\n sign_returns.append(\n [np.sign(rewards_pos).sum(), np.sign(rewards_neg).sum()]\n )\n lengths.append([lengths_pos, lengths_neg])\n\n return Result(\n noise_indices=noise_indices,\n noisy_returns=returns,\n sign_noisy_returns=sign_returns,\n noisy_lengths=lengths,\n eval_returns=eval_returns,\n eval_lengths=eval_lengths,\n )\n\n\ndef get_policy_class(config):\n if config[\"framework\"] == \"torch\":\n from ray.rllib.agents.ars.ars_torch_policy import ARSTorchPolicy\n\n policy_cls = ARSTorchPolicy\n else:\n policy_cls = ARSTFPolicy\n return policy_cls\n\n\nclass ARSTrainer(Trainer):\n \"\"\"Large-scale implementation of Augmented Random Search in Ray.\"\"\"\n\n @classmethod\n @override(Trainer)\n def get_default_config(cls) -> TrainerConfigDict:\n return DEFAULT_CONFIG\n\n @override(Trainer)\n def validate_config(self, config: TrainerConfigDict) -> None:\n # Call super's validation method.\n super().validate_config(config)\n\n if config[\"num_gpus\"] > 1:\n raise ValueError(\"`num_gpus` > 1 not yet supported for ARS!\")\n if config[\"num_workers\"] <= 0:\n raise ValueError(\"`num_workers` must be > 0 for ARS!\")\n if config[\"evaluation_config\"][\"num_envs_per_worker\"] != 1:\n raise ValueError(\n \"`evaluation_config.num_envs_per_worker` must always be 1 for \"\n \"ARS! To parallelize evaluation, increase \"\n \"`evaluation_num_workers` to > 1.\"\n )\n if config[\"evaluation_config\"][\"observation_filter\"] != \"NoFilter\":\n raise ValueError(\n \"`evaluation_config.observation_filter` must always be \"\n \"`NoFilter` for ARS!\"\n )\n\n @override(Trainer)\n def setup(self, config):\n # Setup our config: Merge the user-supplied config (which could\n # be a partial config dict with the class' default).\n self.config = self.merge_trainer_configs(\n self.get_default_config(), config, self._allow_unknown_configs\n )\n\n # Validate our config dict.\n self.validate_config(self.config)\n\n # Generate `self.env_creator` callable to create an env instance.\n self.env_creator = self._get_env_creator_from_env_id(self._env_id)\n # Generate the local env.\n env_context = EnvContext(self.config[\"env_config\"] or {}, worker_index=0)\n env = self.env_creator(env_context)\n\n self.callbacks = self.config[\"callbacks\"]()\n\n self._policy_class = get_policy_class(self.config)\n self.policy = self._policy_class(\n env.observation_space, env.action_space, self.config\n )\n self.optimizer = optimizers.SGD(self.policy, self.config[\"sgd_stepsize\"])\n\n self.rollouts_used = self.config[\"rollouts_used\"]\n self.num_rollouts = self.config[\"num_rollouts\"]\n self.report_length = self.config[\"report_length\"]\n\n # Create the shared noise table.\n logger.info(\"Creating shared noise table.\")\n noise_id = create_shared_noise.remote(self.config[\"noise_size\"])\n self.noise = SharedNoiseTable(ray.get(noise_id))\n\n # Create the actors.\n logger.info(\"Creating actors.\")\n self.workers = [\n Worker.remote(self.config, self.env_creator, noise_id, idx + 1)\n for idx in range(self.config[\"num_workers\"])\n ]\n\n self.episodes_so_far = 0\n self.reward_list = []\n self.tstart = time.time()\n\n @override(Trainer)\n def get_policy(self, policy=DEFAULT_POLICY_ID):\n if policy != DEFAULT_POLICY_ID:\n raise ValueError(\n \"ARS has no policy '{}'! Use {} \"\n \"instead.\".format(policy, DEFAULT_POLICY_ID)\n )\n return self.policy\n\n @override(Trainer)\n def step_attempt(self):\n config = self.config\n\n theta = self.policy.get_flat_weights()\n assert theta.dtype == np.float32\n assert len(theta.shape) == 1\n\n # Put the current policy weights in the object store.\n theta_id = ray.put(theta)\n # Use the actors to do rollouts, note that we pass in the ID of the\n # policy weights.\n results, num_episodes, num_timesteps = self._collect_results(\n theta_id, config[\"num_rollouts\"]\n )\n\n all_noise_indices = []\n all_training_returns = []\n all_training_lengths = []\n all_eval_returns = []\n all_eval_lengths = []\n\n # Loop over the results.\n for result in results:\n all_eval_returns += result.eval_returns\n all_eval_lengths += result.eval_lengths\n\n all_noise_indices += result.noise_indices\n all_training_returns += result.noisy_returns\n all_training_lengths += result.noisy_lengths\n\n assert len(all_eval_returns) == len(all_eval_lengths)\n assert (\n len(all_noise_indices)\n == len(all_training_returns)\n == len(all_training_lengths)\n )\n\n self.episodes_so_far += num_episodes\n\n # Assemble the results.\n eval_returns = np.array(all_eval_returns)\n eval_lengths = np.array(all_eval_lengths)\n noise_indices = np.array(all_noise_indices)\n noisy_returns = np.array(all_training_returns)\n noisy_lengths = np.array(all_training_lengths)\n\n # keep only the best returns\n # select top performing directions if rollouts_used < num_rollouts\n max_rewards = np.max(noisy_returns, axis=1)\n if self.rollouts_used > self.num_rollouts:\n self.rollouts_used = self.num_rollouts\n\n percentile = 100 * (1 - (self.rollouts_used / self.num_rollouts))\n idx = np.arange(max_rewards.size)[\n max_rewards >= np.percentile(max_rewards, percentile)\n ]\n noise_idx = noise_indices[idx]\n noisy_returns = noisy_returns[idx, :]\n\n # Compute and take a step.\n g, count = utils.batched_weighted_sum(\n noisy_returns[:, 0] - noisy_returns[:, 1],\n (self.noise.get(index, self.policy.num_params) for index in noise_idx),\n batch_size=min(500, noisy_returns[:, 0].size),\n )\n g /= noise_idx.size\n # scale the returns by their standard deviation\n if not np.isclose(np.std(noisy_returns), 0.0):\n g /= np.std(noisy_returns)\n assert g.shape == (self.policy.num_params,) and g.dtype == np.float32\n # Compute the new weights theta.\n theta, update_ratio = self.optimizer.update(-g)\n # Set the new weights in the local copy of the policy.\n self.policy.set_flat_weights(theta)\n # update the reward list\n if len(all_eval_returns) > 0:\n self.reward_list.append(eval_returns.mean())\n\n # Now sync the filters\n FilterManager.synchronize(\n {DEFAULT_POLICY_ID: self.policy.observation_filter}, self.workers\n )\n\n info = {\n \"weights_norm\": np.square(theta).sum(),\n \"weights_std\": np.std(theta),\n \"grad_norm\": np.square(g).sum(),\n \"update_ratio\": update_ratio,\n \"episodes_this_iter\": noisy_lengths.size,\n \"episodes_so_far\": self.episodes_so_far,\n }\n result = dict(\n episode_reward_mean=np.mean(self.reward_list[-self.report_length :]),\n episode_len_mean=eval_lengths.mean(),\n timesteps_this_iter=noisy_lengths.sum(),\n info=info,\n )\n\n return result\n\n @override(Trainer)\n def cleanup(self):\n # workaround for https://github.com/ray-project/ray/issues/1516\n for w in self.workers:\n w.__ray_terminate__.remote()\n\n @override(Trainer)\n def compute_single_action(self, observation, *args, **kwargs):\n action, _, _ = self.policy.compute_actions([observation], update=True)\n if kwargs.get(\"full_fetch\"):\n return action[0], [], {}\n return action[0]\n\n @Deprecated(new=\"compute_single_action\", error=True)\n def compute_action(self, observation, *args, **kwargs):\n return self.compute_single_action(observation, *args, **kwargs)\n\n @override(Trainer)\n def _sync_weights_to_workers(self, *, worker_set=None, workers=None):\n # Broadcast the new policy weights to all evaluation workers.\n assert worker_set is not None\n logger.info(\"Synchronizing weights to evaluation workers.\")\n weights = ray.put(self.policy.get_flat_weights())\n worker_set.foreach_policy(lambda p, pid: p.set_flat_weights(ray.get(weights)))\n\n def _collect_results(self, theta_id, min_episodes):\n num_episodes, num_timesteps = 0, 0\n results = []\n while num_episodes < min_episodes:\n logger.debug(\n \"Collected {} episodes {} timesteps so far this iter\".format(\n num_episodes, num_timesteps\n )\n )\n rollout_ids = [\n worker.do_rollouts.remote(theta_id) for worker in self.workers\n ]\n # Get the results of the rollouts.\n for result in ray.get(rollout_ids):\n results.append(result)\n # Update the number of episodes and the number of timesteps\n # keeping in mind that result.noisy_lengths is a list of lists,\n # where the inner lists have length 2.\n num_episodes += sum(len(pair) for pair in result.noisy_lengths)\n num_timesteps += sum(sum(pair) for pair in result.noisy_lengths)\n\n return results, num_episodes, num_timesteps\n\n def __getstate__(self):\n return {\n \"weights\": self.policy.get_flat_weights(),\n \"filter\": self.policy.observation_filter,\n \"episodes_so_far\": self.episodes_so_far,\n }\n\n def __setstate__(self, state):\n self.episodes_so_far = state[\"episodes_so_far\"]\n self.policy.set_flat_weights(state[\"weights\"])\n self.policy.observation_filter = state[\"filter\"]\n FilterManager.synchronize(\n {DEFAULT_POLICY_ID: self.policy.observation_filter}, self.workers\n )\n" ]
[ [ "numpy.random.uniform", "numpy.sign", "numpy.random.seed", "numpy.arange", "numpy.random.RandomState", "numpy.max", "numpy.array", "numpy.std", "numpy.square", "numpy.percentile", "numpy.mean" ] ]
Little-gg/tensorflow_learn
[ "3cb017e5745a20482d5e192a6840aac750e4c567" ]
[ "2_mnist_AGN/main.py" ]
[ "#/usr/bin/env python\n# encoding: utf-8\n\nimport numpy as np\nimport sklearn.preprocessing as prep\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\ndef xavier_init(fan_in, fan_out, constant = 1):\n low = -constant * np.sqrt(6.0 / (fan_in + fan_out))\n high = constant * np.sqrt(6.0 / (fan_in + fan_out))\n return tf.random_uniform((fan_in, fan_out),\n minval = low, maxval = high,\n dtype = tf.float32)\n\n\nclass AdditiveGaussianNoiseAutoencoder(object):\n def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus,\n optimizer = tf.train.AdamOptimizer(), scale=0.1):\n self.n_input = n_input\n self.n_hidden = n_hidden\n self.transfer = transfer_function\n self.scale = tf.placeholder(tf.float32)\n self.training_scale = scale\n network_weights = self._initialize_weights()\n self.weights = network_weights\n\n self.x = tf.placeholder(tf.float32, [None, self.n_input])\n self.hidden = self.transfer(tf.add(tf.matmul(\n self.x + scale * tf.random_normal((n_input,)),\n self.weights['w1']), self.weights['b1']))\n self.reconstruction = tf.add(tf.matmul(self.hidden,\n self.weights['w2']),\n self.weights['b2'])\n\n self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(\n self.reconstruction, self.x), 2.0))\n self.optimizer = optimizer.minimize(self.cost)\n\n init = tf.global_variables_initializer()\n self.sess = tf.Session()\n self.sess.run(init)\n\n def _initialize_weights(self):\n all_weights = dict()\n all_weights['w1'] = tf.Variable(xavier_init(self.n_input,\n self.n_hidden))\n all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden],\n dtype = tf.float32))\n all_weights['w1'] = tf.Variable(tf.zeros([self.n_hidden,\n self.n_input],\n dtype = tf.float32))\n all_weights['b2'] = tf.Variable(tf.zeros([self.n_hidden],\n dtype = tf.float32))\n return all_weights\n\n def partial_fit(self, x):\n cost, opt = self.sess.run((self.cost, self.optimizer),\n feed_dict = {self.x: X, self.scale: self.training_scale})\n return cost\n\n def calc_total_cost(self, X):\n return self.sess.run(self.cost, feed_dict = {self.x: X,\n self.scale: self.training_scale})\n\n def transform(self, X):\n return self.sess.run(self.hidden, feed_dict = {self.x: X,\n self.scale: self.training_scale})\n\n def generate(self, hidden = None):\n if hidden is None:\n hidden = np.random.normal(size = self.weights[\"b1\"])\n return self.sess.run(self.reconstruction,\n feed_dict = {self.hidden: hidden})\n\n def reconstruct(self, X):\n return self.sess.run(self.reconstruction, feed_dict = {self.x: X,\n self.scale: self.training_scale})\n\n def getWeights(self):\n return self.sess.run(self.weights['w1'])\n\n def getBiases(self):\n return self.sess.run(self.weights['b1'])\n\n\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\ndef standard_scale(X_train, X_test):\n preprocessor = prep.StandardScaler().fit(X_train)\n X_train = preprocessor.transform(X_train)\n X_test = preprocessor.transform(X_test)\n return X_train, X_test\n\ndef get_random_block_from_data(data, batch_size):\n start_index = np.random.randint(0, len(data) - batch_size)\n return data[start_index:(start_index + batch_size)]\n\nX_train, X_test = standard_scale(mnist.train.images, mnist.test.images)\n\nn_samples = int(mnist.train.num_examples)\ntraining_epochs = 20\nbatch_size = 128\ndisplay_step = 1\n\nautoencoder = AdditiveGaussianNoiseAutoencoder(n_input = 784,\n n_hidden = 200,\n transfer_function=tf.nn.softplus,\n optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),\n scale = 0.01)\n\nfor epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(n_samples / batch_size)\n for i in range(total_batch):\n batch_xs = get_random_block_from_data(X_train, batch_size)\n cost = autoencoder.partial_fit(batch_xs)\n avg_cost += cost / n_samples * batch_size\n\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch +1), \"cost=\",\n \"{:.9f}\".format(avg_cost))\n print(\"Total cost: \" + str(autoencoder.calc_total_cost(X_test)))\n" ]
[ [ "tensorflow.placeholder", "tensorflow.zeros", "tensorflow.global_variables_initializer", "tensorflow.subtract", "tensorflow.train.AdamOptimizer", "numpy.random.normal", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.random_uniform", "tensorflow.matmul", "tensorflow.Session", "sklearn.preprocessing.StandardScaler", "numpy.sqrt", "tensorflow.random_normal" ] ]
Mikehem/tfx
[ "b1acab7bf89ec1364c96b9b4e2cc41594407b86c" ]
[ "tfx/extensions/google_cloud_ai_platform/pusher/executor_test.py" ]
[ "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.extensions.google_cloud_ai_platform.pusher.executor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport os\nfrom typing import Any, Dict, Text\n# Standard Imports\nimport mock\nimport tensorflow as tf\n\nfrom tfx.components.pusher import executor as tfx_pusher_executor\nfrom tfx.dsl.io import fileio\nfrom tfx.extensions.google_cloud_ai_platform.pusher import executor\nfrom tfx.types import standard_artifacts\nfrom tfx.utils import json_utils\nfrom tfx.utils import telemetry_utils\n\n\nclass ExecutorTest(tf.test.TestCase):\n\n def setUp(self):\n super(ExecutorTest, self).setUp()\n self._source_data_dir = os.path.join(\n os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.dirname(__file__)))),\n 'components', 'testdata')\n self._output_data_dir = os.path.join(\n os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),\n self._testMethodName)\n fileio.makedirs(self._output_data_dir)\n self._model_export = standard_artifacts.Model()\n self._model_export.uri = os.path.join(self._source_data_dir,\n 'trainer/current')\n self._model_blessing = standard_artifacts.ModelBlessing()\n self._input_dict = {\n tfx_pusher_executor.MODEL_KEY: [self._model_export],\n tfx_pusher_executor.MODEL_BLESSING_KEY: [self._model_blessing],\n }\n\n self._model_push = standard_artifacts.PushedModel()\n self._model_push.uri = os.path.join(self._output_data_dir, 'model_push')\n fileio.makedirs(self._model_push.uri)\n self._output_dict = {\n tfx_pusher_executor.PUSHED_MODEL_KEY: [self._model_push],\n }\n # Dict format of exec_properties. custom_config needs to be serialized\n # before being passed into Do function.\n self._exec_properties = {\n 'custom_config': {\n executor.SERVING_ARGS_KEY: {\n 'model_name': 'model_name',\n 'project_id': 'project_id'\n },\n },\n 'push_destination': None,\n }\n self._executor = executor.Executor()\n\n def _serialize_custom_config_under_test(self) -> Dict[Text, Any]:\n \"\"\"Converts self._exec_properties['custom_config'] to string.\"\"\"\n result = copy.deepcopy(self._exec_properties)\n result['custom_config'] = json_utils.dumps(result['custom_config'])\n return result\n\n def assertDirectoryEmpty(self, path):\n self.assertEqual(len(fileio.listdir(path)), 0)\n\n def assertDirectoryNotEmpty(self, path):\n self.assertGreater(len(fileio.listdir(path)), 0)\n\n def assertPushed(self):\n self.assertDirectoryNotEmpty(self._model_push.uri)\n self.assertEqual(1, self._model_push.get_int_custom_property('pushed'))\n\n def assertNotPushed(self):\n self.assertDirectoryEmpty(self._model_push.uri)\n self.assertEqual(0, self._model_push.get_int_custom_property('pushed'))\n\n @mock.patch(\n 'tfx.extensions.google_cloud_ai_platform.pusher.executor.discovery')\n @mock.patch.object(executor, 'runner', autospec=True)\n def testDoBlessed(self, mock_runner, _):\n self._model_blessing.uri = os.path.join(self._source_data_dir,\n 'model_validator/blessed')\n self._model_blessing.set_int_custom_property('blessed', 1)\n mock_runner.get_service_name_and_api_version.return_value = ('ml', 'v1')\n self._executor.Do(self._input_dict, self._output_dict,\n self._serialize_custom_config_under_test())\n executor_class_path = '%s.%s' % (self._executor.__class__.__module__,\n self._executor.__class__.__name__)\n with telemetry_utils.scoped_labels(\n {telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):\n job_labels = telemetry_utils.get_labels_dict()\n mock_runner.deploy_model_for_aip_prediction.assert_called_once_with(\n mock.ANY,\n self._model_push.uri,\n mock.ANY,\n mock.ANY,\n job_labels,\n )\n self.assertPushed()\n version = self._model_push.get_string_custom_property('pushed_version')\n self.assertEqual(\n self._model_push.get_string_custom_property('pushed_destination'),\n 'projects/project_id/models/model_name/versions/{}'.format(version))\n\n @mock.patch(\n 'tfx.extensions.google_cloud_ai_platform.pusher.executor.discovery')\n @mock.patch.object(executor, 'runner', autospec=True)\n def testDoNotBlessed(self, mock_runner, _):\n self._model_blessing.uri = os.path.join(self._source_data_dir,\n 'model_validator/not_blessed')\n self._model_blessing.set_int_custom_property('blessed', 0)\n mock_runner.get_service_name_and_api_version.return_value = ('ml', 'v1')\n self._executor.Do(self._input_dict, self._output_dict,\n self._serialize_custom_config_under_test())\n self.assertNotPushed()\n mock_runner.deploy_model_for_aip_prediction.assert_not_called()\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
KSaiRahul21/matrixprofile
[ "d8250e30d90ed0453bb7c35bb34ab0c04ae7b334" ]
[ "tests/test_skimp.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nrange = getattr(__builtins__, 'xrange', range)\n# end of py2 compatability boilerplate\n\nimport os\n\nimport pytest\n\nimport numpy as np\n\nfrom matrixprofile.algorithms import skimp\nfrom matrixprofile.algorithms.skimp import binary_split\nfrom matrixprofile.algorithms.skimp import maximum_subsequence\n\n\ndef test_binary_split_1():\n desired = [0]\n actual = binary_split(1)\n\n np.testing.assert_equal(actual, desired)\n\n\ndef test_binary_split_many():\n desired = [0, 5, 2, 7, 1, 3, 6, 8, 4, 9]\n actual = binary_split(10)\n\n np.testing.assert_equal(actual, desired)\n\n\ndef test_maximum_subsequence_36():\n np.random.seed(9999)\n ts = np.random.uniform(size=2**10)\n w = 2**5\n subq = ts[0:w]\n ts[0:w] = subq\n ts[w+100:w+100+w] = subq\n\n upper = maximum_subsequence(ts, 0.98)\n\n assert(upper == 36)\n\n\ndef test_maximum_subsequence_68():\n np.random.seed(9999)\n ts = np.random.uniform(size=2**10)\n w = 2**6\n subq = ts[0:w]\n ts[0:w] = subq\n ts[w+100:w+100+w] = subq\n\n upper = maximum_subsequence(ts, 0.98)\n\n assert(upper == 68)\n\ndef test_maximum_subsequence_no_windows():\n np.random.seed(9999)\n ts = np.random.uniform(size=2**10)\n w = 2**6\n subq = ts[0:w]\n ts[0:w] = subq\n ts[w+100:w+100+w] = subq\n\n with pytest.warns(RuntimeWarning) as record:\n upper = maximum_subsequence(ts, 1.0)\n\n assert(np.isnan(upper))\n assert('No windows found with given threshold' in record[0].message.args[0])" ]
[ [ "numpy.testing.assert_equal", "numpy.random.uniform", "numpy.random.seed", "numpy.isnan" ] ]
UtrechtUniversity/nudging
[ "9eb1b77749f36059d0c03e60338308ed2e1ebe3d" ]
[ "nudging/dataset/matrix.py" ]
[ "\"\"\"DataSet class for simlated matrix data\"\"\"\nfrom pandas import DataFrame\n\nfrom nudging.dataset.base import BaseDataSet\n\n\nclass MatrixData(BaseDataSet):\n \"\"\"Class MatrixData\"\"\"\n @classmethod\n def from_data(cls, data, truth=None, names=None, **kwargs):\n \"\"\"Initialize dataset from numpy arrays.\n\n Arguments\n ---------\n X: np.ndarray\n Feature matrix in numpy array format (NxM).\n outcome: np.ndarray\n Outcome for each of the samples (N).\n nudge: np.ndarray\n Whether each subject was nudged or not (1 or 0) (N).\n names: list[str]\n List of column names (M)\n\n Returns\n -------\n MatrixData:\n Initialized dataset.\n \"\"\"\n X, nudge, outcome = data\n if truth is None:\n truth = {}\n truth.update(kwargs)\n standard_df = DataFrame(X)\n if names is not None:\n standard_df.set_axis(names, axis=1, inplace=True)\n else:\n standard_df.set_axis(\n [str(x) for x in list(standard_df)], axis=1, inplace=True)\n standard_df[\"outcome\"] = outcome\n standard_df[\"nudge\"] = nudge\n if \"nudge_type\" not in truth:\n truth[\"nudge_type\"] = -1\n if \"nudge_domain\" not in truth:\n truth[\"nudge_domain\"] = -1\n truth[\"n_features\"] = X.shape[1]\n return cls(standard_df=standard_df, truth=truth)\n" ]
[ [ "pandas.DataFrame" ] ]
Bhaskers-Blu-Org2/arcticseals
[ "adfdd911e7f74ffaf288d1fbc4d8863844328d45" ]
[ "src/archive/ir-hotspot-rfc/hotspot_classifier.py" ]
[ "\"\"\"Functions and Command line Script for classifying hotspots\"\"\"\n# Standard Inputs\nimport argparse\nimport os\nimport sys\nimport pickle\n# Pip Inputs\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\n\ndef square_crop(image, x_pos, y_pos, size=35):\n \"\"\"Returns a square crop of size centered on (x_pos, y_pos)\n Inputs:\n image: np.ndarray, image data in an array\n x_pos: int, x-coordinate of the hotspot\n y_pos: int, y-coordinate of the hotspot\n size: int, side length for the returned crop\n Outputs:\n cropped_image: np.ndarray, cropped image data of size (size x size)\n \"\"\"\n size = (np.floor(size/2) * 2 + 1).astype(np.int) #Forces size to be odd\n offset = ((size - 1) / 2).astype(np.int)\n\n x_low = x_pos - offset\n x_high = x_pos + offset + 1\n\n y_low = y_pos - offset\n y_high = y_pos + offset + 1\n\n if x_low < 0:\n x_high = x_high - x_low\n x_low = 0\n\n if x_high > image.shape[1]:\n x_low = x_low - (x_high - image.shape[1])\n x_high = image.shape[1]\n\n if y_low < 0:\n y_high = y_high - y_low\n y_low = 0\n\n if y_high > image.shape[0]:\n y_low = y_low - (y_high - image.shape[0])\n y_high = image.shape[0]\n\n cropped_image = image[y_low:y_high, x_low:x_high]\n\n return cropped_image\n\ndef parse_arguments(sys_args):\n \"\"\"Parses the input and output directories from the arguments\n Input:\n sys_args: list, the arguments to be parsed\n Output:\n input_directory: string, path to the input image directory\n output_directory: string, path to the output image directory\n \"\"\"\n # Set up parse\n parser = argparse.ArgumentParser(\n description='Command line interface for thermal image normalization')\n parser.add_argument('--datadir', type=str,\n required=True, help='relative path to directory containing images')\n parser.add_argument('--datafile', type=str,\n default=None, help='path to the csv file containing identified hotspots')\n parser.add_argument('--modelfile', type=str,\n default=None, help='path to the pickle dump of the trained classifier')\n parser.add_argument('--outfile', type=str,\n default='output.csv', help='path to write the output csv to')\n # Parse\n args = parser.parse_args(sys_args)\n\n # Store values\n data_file = args.datafile\n data_directory = args.datadir\n model_file = args.modelfile\n output_file = args.outfile\n\n print('Using input csv: {}'.format(data_file))\n print('Using data directory for inputs: {}'.format(data_directory))\n print('Using classification model: {}'.format(model_file))\n print('Will write classifications to: {}'.format(output_file))\n\n return data_file, data_directory, model_file, output_file\n\ndef load_data(data_frame, data_directory):\n \"\"\"Loads the flattened thumbnails for classification\n Inputs:\n data_frame: pandas df, the information about the data\n data_directory: string, the path to the directory for the images\n Outputs:\n data: np.ndarray, array with the image thumbnails where the\n row matches row in the data_frame and column is the flattened\n image data\n \"\"\"\n thumb_size = 35\n data = np.zeros((len(data_frame), thumb_size**2))\n\n for index in range(len(data_frame)):\n try:\n file_name = os.path.join(data_directory, data_frame['filt_thermal16'][index])\n file_name = file_name.replace('16BIT', '8BIT-N')\n\n image = np.array(Image.open(file_name))\n cropped = square_crop(image, data_frame['x_pos'][index], data_frame['y_pos'][index])\n data[index, :] = cropped.flatten()\n except FileNotFoundError:\n print('Could not find: {}'.format(file_name))\n\n return data\n\ndef classify_data(data_file, data_directory, model_file, output_file):\n \"\"\"Data loading, classifying and output logic. For compatibility with library inputs\n Inputs:\n data_file: string, path to the input csv file\n data_directory: string, path to the thermal images\n model_file: string, path to the classifier model\n output_file: string, path to the output csv file\n \"\"\"\n print('Loading the data files...')\n df = pd.read_csv(data_file)\n data = load_data(df, data_directory)\n\n print('Loading the classifier...')\n clf = pickle.load(open(model_file, 'rb'))\n print('Beginning the classification...')\n y_predict = clf.predict(data)\n y_predict_proba = np.max(clf.predict_proba(data), axis=1)\n\n print('Writing the output...')\n y_predict_label = []\n labels = ['Anomaly', 'Animal']\n for _, prediction in enumerate(y_predict):\n y_predict_label.append(labels[prediction])\n\n df['hotspot_type'] = y_predict_label\n df['ir_confidence'] = y_predict_proba\n\n df.to_csv(output_file)\n\n print('Wrote classification to: {}'.format(output_file))\n\n# Main Program\ndef main(sys_argv):\n \"\"\"Classifies the hotspots in data_file and writes output_file from command line\n Example usage: python -W ignore hotspot_classifier.py --datadir ./ArcticSealsData01_Thermal_N/\n --datafile ../arcticseals/data/test.csv --modelfilepca_rfc_model_20180725_154906.p\n \"\"\"\n data_file, data_directory, model_file, output_file = parse_arguments(sys_argv[1:])\n classify_data(data_file, data_directory, model_file, output_file)\n\nif __name__ == '__main__':\n main(sys.argv)\n" ]
[ [ "pandas.read_csv", "numpy.floor" ] ]
boczekbartek/flexconv
[ "610b5be3a846bcc1436275daaad89482b6b8e7cc" ]
[ "ckconv/utils/grids.py" ]
[ "import torch\n\n\ndef rel_positions_grid(grid_sizes):\n \"\"\"Generates a flattened grid of (x,y,...) coordinates in a range of -1 to 1.\n sidelen: int\n dim: int\n \"\"\"\n tensors = []\n for size in grid_sizes:\n tensors.append(torch.linspace(-1, 1, steps=size))\n # tensors = tuple(dim * [torch.linspace(-1, 1, steps=grid_length)])\n relpos_grid = torch.stack(torch.meshgrid(*tensors), dim=-0)\n return relpos_grid\n" ]
[ [ "torch.linspace", "torch.meshgrid" ] ]
makoeppel/acl2019-GPPL-humour-metaphor
[ "f659144465085f80e699445c0bc0202d0bdb9817" ]
[ "python/models/gp_classifier_svi.py" ]
[ "'''\n\nUses stochastic variational inference (SVI) to scale to larger datasets with limited memory. At each iteration\nof the VB algorithm, only a fixed number of random data points are used to update the distribution.\n\n'''\n\nimport numpy as np\nimport logging\n\nimport scipy\n\nfrom gp_classifier_vb import GPClassifierVB, sigmoid, max_no_jobs\nfrom sklearn.cluster import MiniBatchKMeans\nfrom joblib import Parallel, delayed\nimport multiprocessing\nfrom scipy.special import psi\n\n\ndef _gradient_terms_for_subset(K_mm, invK_mm, kernel_derfactor, kernel_operator, common_term, ls_d, coords, s):\n\n if kernel_operator == '*':\n dKdls = K_mm * kernel_derfactor(coords, coords, ls_d, operator=kernel_operator) / s\n elif kernel_operator == '+':\n dKdls = kernel_derfactor(coords, coords, ls_d, operator=kernel_operator) / s\n\n return 0.5 * np.trace(common_term.dot(dKdls).dot(invK_mm * s) )\n\n\nclass GPClassifierSVI(GPClassifierVB):\n data_idx_i = [] # data indices to update in the current iteration, i\n changed_selection = True # indicates whether the random subset of data has changed since variables were initialised\n covpair = None\n covpair_out = None\n\n def __init__(self, ninput_features, z0=0.5, shape_s0=2, rate_s0=2, shape_ls=10, rate_ls=0.1, ls_initial=None,\n kernel_func='matern_3_2', kernel_combination='*', max_update_size=1000, ninducing=500, use_svi=True,\n delay=1.0, forgetting_rate=0.9, verbose=False, fixed_s=False):\n\n self.max_update_size = max_update_size # maximum number of data points to update in each SVI iteration\n\n # initialise the forgetting rate and delay for SVI\n self.forgetting_rate = forgetting_rate\n self.delay = delay # delay must be at least 1\n\n # number of inducing points\n self.ninducing = ninducing\n\n self.n_converged = 10 # usually needs more converged iterations and can drop below zero due to approx. errors\n\n # default state before initialisation, unless some inducing coordinates are set by external call\n self.inducing_coords = None\n self.K_mm = None\n self.invK_mm = None\n self.K_nm = None\n self.V_nn = None\n\n # if use_svi is switched off, we revert to the standard (parent class) VB implementation\n self.use_svi = use_svi\n\n self.reset_inducing_coords = True # creates new inducing coords each time fit is called, if this flag is set\n\n self.exhaustive_train = 1\n # number of iterations that all training data must be used in when doing stochastic\n # sampling. You will need this setting on if you have any diagonal kernels/no person features.\n # Switching it off means that the algorithm will decide when to stop the stochastic updates.\n # It may think it has converged before seeing all the data.\n\n self.data_splits = None\n self.nsplits = 0 # we set this when data is passed in\n self.current_data_split = -1\n\n super(GPClassifierSVI, self).__init__(ninput_features, z0, shape_s0, rate_s0, shape_ls, rate_ls, ls_initial,\n kernel_func, kernel_combination, verbose=verbose, fixed_s=fixed_s)\n\n # Initialisation --------------------------------------------------------------------------------------------------\n\n def _init_params(self, mu0=None, reinit_params=True, K=None):\n if self.use_svi and (self.K_mm is None or self.vb_iter == 0):\n self._choose_inducing_points()\n\n super(GPClassifierSVI, self)._init_params(mu0, reinit_params, K)\n\n def _init_covariance(self):\n if not self.use_svi:\n return super(GPClassifierSVI, self)._init_covariance()\n\n self.obs_v = np.ones((self.n_locs, 1)) * self.rate_s0 / self.shape_s0\n\n def _init_s(self):\n if not self.use_svi:\n return super(GPClassifierSVI, self)._init_s()\n\n if not self.fixed_s:\n self.shape_s = self.shape_s0 + self.ninducing / 2.0\n self.rate_s = self.rate_s0 + 0.5 * (np.sum((self.obs_f-self.mu0)**2) + self.ninducing*self.rate_s0/self.shape_s0)\n\n self.s = self.shape_s / self.rate_s\n self.Elns = psi(self.shape_s) - np.log(self.rate_s)\n self.old_s = self.s\n if self.verbose:\n logging.debug(\"Setting the initial precision scale to s=%.3f\" % self.s)\n\n def reset_kernel(self):\n self._init_covariance()\n if self.use_svi:\n self.K_mm = None\n self.K_nm = None\n self.invK_mm = None\n\n def _choose_inducing_points(self):\n # choose a set of inducing points -- for testing we can set these to the same as the observation points.\n self.update_size = self.max_update_size # number of inducing points in each stochastic update\n if self.update_size > self.n_obs:\n self.update_size = self.n_obs\n\n # diagonal can't use inducing points but can use the subsampling of observations\n if self.inducing_coords is None and (self.ninducing > self.n_locs or self.cov_type == 'diagonal'):\n if self.inducing_coords is not None:\n logging.warning(\n 'replacing initial inducing points with observation coordinates because they are smaller.')\n self.ninducing = self.n_locs\n self.inducing_coords = self.obs_coords\n # invalidate matrices passed in to init_inducing_points() as we need to recompute for new inducing points\n self.reset_kernel()\n elif self.inducing_coords is None:\n init_size = 300\n if self.ninducing > init_size:\n init_size = self.ninducing\n kmeans = MiniBatchKMeans(init_size=init_size, n_clusters=self.ninducing)\n\n if self.obs_coords.shape[0] > 20 * self.ninducing:\n coords = self.obs_coords[np.random.choice(self.obs_coords.shape[0], 20 * self.ninducing, replace=False),\n :]\n else:\n coords = self.obs_coords\n\n kmeans.fit(coords)\n\n # self.inducing_coords = self.obs_coords[np.random.randint(0, self.n_locs, size=(ninducing)), :]\n self.inducing_coords = kmeans.cluster_centers_\n # self.inducing_coords = self.obs_coords\n self.reset_kernel()\n\n if self.K_mm is None:\n self.K_mm = self.kernel_func(self.inducing_coords, self.ls, operator=self.kernel_combination)\n self.K_mm += 1e-6 * np.eye(len(self.K_mm)) # jitter\n if self.invK_mm is None:\n if self.cov_type == 'diagonal':\n self.invK_mm = self.K_mm\n else:\n self.invK_mm = scipy.linalg.inv(self.K_mm)\n if self.K_nm is None:\n if self.cov_type == 'diagonal':\n self.K_nm = self.K_mm # there are no inducing points\n else:\n self.K_nm = self.kernel_func(self.obs_coords, self.ls, self.inducing_coords,\n operator=self.kernel_combination)\n\n self.u_invSm = np.zeros((self.ninducing, 1), dtype=float) # theta_1\n if self.cov_type == 'diagonal':\n self.u_invS = np.zeros((self.ninducing), dtype=float) # theta_2\n self.u_Lambda = np.zeros((self.ninducing), dtype=float) # observation precision at inducing points\n else:\n self.u_invS = np.zeros((self.ninducing, self.ninducing), dtype=float) # theta_2\n self.u_Lambda = np.zeros((self.ninducing, self.ninducing),\n dtype=float) # observation precision at inducing points\n\n self.uS = self.K_mm * self.rate_s0 / self.shape_s0 # initialise properly to prior\n self.um_minus_mu0 = np.zeros((self.ninducing, 1))\n\n # Mapping between latent and observation spaces -------------------------------------------------------------------\n\n def _compute_jacobian(self, f=None, data_idx_i=None):\n\n if f is None:\n f = self.obs_f\n\n if data_idx_i is not None:\n g_obs_f = self.forward_model(f.flatten()[data_idx_i]) # first order Taylor series approximation\n else:\n # if self.verbose:\n # logging.debug(\"in _compute_jacobian, applying forward model to all observation points\")\n g_obs_f = self.forward_model(f.flatten())\n # if self.verbose:\n # logging.debug(\"in _compute_jacobian, computing gradients for all observation points...\")\n J = np.diag(g_obs_f * (1 - g_obs_f))\n return g_obs_f, J\n\n def _update_jacobian(self, G_update_rate=1.0):\n if not self.use_svi:\n return super(GPClassifierSVI, self)._update_jacobian(G_update_rate)\n\n g_obs_f, J = self._compute_jacobian(data_idx_i=self.data_idx_i)\n\n if G_update_rate == 1 or not len(self.G) or self.G.shape != J.shape or self.changed_selection:\n # either G has not been initialised, or is from different observations, or random subset of data has changed\n self.G = J\n else:\n self.G = G_update_rate * J + (1 - G_update_rate) * self.G\n\n # set the selected observations i.e. not their locations, but the actual indexes in the input data. In the\n # standard case, these are actually the same anyway, but this can change if the observations are pairwise prefs.\n self.data_obs_idx_i = self.data_idx_i\n return g_obs_f\n\n # Log Likelihood Computation -------------------------------------------------------------------------------------\n\n def _logpt(self):\n logrho, lognotrho, _ = self._post_sample(self.obs_f, self.obs_v, expectedlog=True)\n\n return logrho, lognotrho\n\n def _logpf(self):\n # Note that some terms are cancelled with those in data_ll to simplify\n if not self.use_svi:\n return super(GPClassifierSVI, self)._logpf()\n\n _, G = self._compute_jacobian()\n _, logdet_K = np.linalg.slogdet(self.K_mm)\n D = len(self.um_minus_mu0)\n logdet_Ks = - D * self.Elns + logdet_K\n\n invK_expecF = np.trace(self.invKs_mm.dot(self.uS))\n\n m_invK_m = self.um_minus_mu0.T.dot(self.invK_mm * self.s).dot(self.um_minus_mu0)\n\n return 0.5 * (- np.log(2 * np.pi) * D - logdet_Ks - invK_expecF - m_invK_m)\n\n def _logqf(self):\n if not self.use_svi:\n return super(GPClassifierSVI, self)._logqf()\n\n # We want to do this, but we can simplify it, since the x and mean values cancel:\n _, logdet_C = np.linalg.slogdet(self.uS)\n D = len(self.um_minus_mu0)\n _logqf = 0.5 * (- np.log(2 * np.pi) * D - logdet_C - D)\n return _logqf\n\n def get_obs_precision(self):\n if not self.use_svi:\n return super(GPClassifierSVI, self).get_obs_precision()\n # _, G = self._compute_jacobian()\n # Lambda_factor1 = self.invKs_mm.dot(self.Ks_nm.T).dot(G.T)\n # Lambda_i = (Lambda_factor1 / self.Q[np.newaxis, :]).dot(Lambda_factor1.T)\n # return Lambda_i\n\n # this is different from the above because it is a weighted sum of previous values\n # return self.u_invS - (self.invKs_mm)\n\n if self.cov_type == 'diagonal':\n return np.diag(self.u_Lambda)\n return self.u_Lambda\n\n def lowerbound_gradient(self, dim):\n '''\n Gradient of the lower bound on the marginal likelihood with respect to the length-scale of dimension dim.\n '''\n if not self.use_svi:\n return super(GPClassifierSVI, self).lowerbound_gradient(dim)\n\n common_term = (self.um_minus_mu0.dot(self.um_minus_mu0.T) + self.uS).dot(self.s * self.invK_mm) - np.eye(self.ninducing)\n\n if self.n_lengthscales == 1 or dim == -1: # create an array with values for each dimension\n dims = range(self.obs_coords.shape[1])\n else: # do it for only the dimension dim\n dims = [dim]\n\n num_jobs = multiprocessing.cpu_count()\n if num_jobs > max_no_jobs:\n num_jobs = max_no_jobs\n if len(self.ls) > 1:\n gradient = Parallel(n_jobs=num_jobs, backend='threading')(\n delayed(_gradient_terms_for_subset)(self.K_mm, self.invK_mm, self.kernel_derfactor, self.kernel_combination,\n common_term, self.ls[dim], self.inducing_coords[:, dim:dim + 1], self.s)\n for dim in dims)\n\n else:\n gradient = Parallel(n_jobs=num_jobs, backend='threading')(\n delayed(_gradient_terms_for_subset)(self.K_mm, self.invK_mm, self.kernel_derfactor, self.kernel_combination,\n common_term, self.ls[0], self.inducing_coords[:, dim:dim + 1], self.s)\n for dim in dims)\n\n if self.n_lengthscales == 1:\n # sum the partial derivatives over all the dimensions\n gradient = [np.sum(gradient)]\n\n return np.array(gradient)\n\n # Training methods ------------------------------------------------------------------------------------------------\n\n def _expec_f(self):\n if self.use_svi:\n # change the randomly selected observation points\n self._update_sample()\n\n super(GPClassifierSVI, self)._expec_f()\n\n def _update_f(self):\n if not self.use_svi:\n return super(GPClassifierSVI, self)._update_f()\n\n # this is done here not update_sample because it needs to be updated every time obs_f is updated\n self.obs_f_i = self.obs_f[self.data_idx_i]\n\n K_nm_i = self.K_nm[self.data_idx_i, :]\n\n Q = self.Q[self.data_obs_idx_i][np.newaxis, :]\n Lambda_factor1 = self.G.dot(K_nm_i).dot(self.invK_mm).T\n Lambda_i = (Lambda_factor1 / Q).dot(Lambda_factor1.T)\n\n if self.cov_type == 'diagonal':\n Lambda_i = np.diag(Lambda_i)\n\n # calculate the learning rate for SVI\n rho_i = (self.vb_iter + self.delay) ** (-self.forgetting_rate)\n # print(\"\\rho_i = %f \" % rho_i\n\n # weighting. Lambda and\n w_i = np.sum(self.obs_total_counts) / float(\n np.sum(self.obs_total_counts[self.data_obs_idx_i])) # self.obs_f.shape[0] / float(self.obs_f_i.shape[0])\n\n # S is the variational covariance parameter for the inducing points, u. Canonical parameter theta_2 = -0.5 * S^-1.\n # The variational update to theta_2 is (1-rho)*S^-1 + rho*Lambda. Since Lambda includes a sum of Lambda_i over\n # all data points i, the stochastic update weights a sample sum of Lambda_i over a mini-batch.\n Lambda_i = Lambda_i * w_i * rho_i\n if self.cov_type == 'diagonal':\n self.u_invS = (1 - rho_i) * self.prev_u_invS + Lambda_i + rho_i * np.diag(self.invKs_mm)\n else:\n self.u_invS = (1 - rho_i) * self.prev_u_invS + Lambda_i + rho_i * self.invKs_mm\n self.u_Lambda = (1 - rho_i) * self.prev_u_Lambda + Lambda_i\n\n # use the estimate given by the Taylor series expansion\n z0 = self.forward_model(self.obs_f, subset_idxs=self.data_obs_idx_i) + self.G.dot(self.mu0_i - self.obs_f_i)\n y = self.z_i - z0\n\n # Variational update to theta_1 is (1-rho)*S^-1m + rho*beta*K_mm^-1.K_mn.y\n self.u_invSm = (1 - rho_i) * self.prev_u_invSm + w_i * rho_i * (Lambda_factor1 / Q).dot(y)\n\n # Next step is to use this to update f, so we can in turn update G. The contribution to Lambda_m and u_inv_S should therefore be made only once G has stabilised!\n # L_u_invS = cholesky(self.u_invS.T, lower=True, check_finite=False)\n # B = solve_triangular(L_u_invS, self.invKs_mm.T, lower=True, check_finite=False)\n # A = solve_triangular(L_u_invS, B, lower=True, trans=True, check_finite=False, overwrite_b=True)\n\n if self.cov_type == 'diagonal':\n self.uS = np.diag(1.0 / self.u_invS)\n else:\n self.uS = scipy.linalg.inv(self.u_invS)\n\n # self.um_minus_mu0 = solve_triangular(L_u_invS, self.u_invSm, lower=True, check_finite=False)\n # self.um_minus_mu0 = solve_triangular(L_u_invS, self.um_minus_mu0, lower=True, trans=True, check_finite=False,\n # overwrite_b=True)\n self.um_minus_mu0 = self.uS.dot(self.u_invSm)\n\n if self.covpair is None:\n if self.cov_type == 'diagonal':\n self.covpair = 1.0\n else:\n self.covpair = scipy.linalg.solve(self.Ks_mm, self.Ks_nm.T).T\n\n self.obs_f, self.obs_v = self._f_given_u(self.covpair, self.mu0, 1.0 / self.s, full_cov=False)\n\n\n def _f_given_u(self, covpair, mu0, Ks_nn=None, full_cov=True):\n # see Hensman, Scalable variational Gaussian process classification, equation 18\n\n #(self.K_nm / self.s).dot(self.s * self.invK_mm).dot(self.uS).dot(self.u_invSm)\n if self.cov_type == 'diagonal':\n if self.um_minus_mu0.size != mu0.size:\n logging.error('We cannot make predictions for new test items when using a diagonal covariance -- we '\n 'need to be able to use the features to make predictions.')\n if Ks_nn is not None:\n return np.zeros(mu0.size), np.diag(np.ones(mu0.size)/self.s)\n else:\n return np.zeros(mu0.size)\n\n fhat = self.um_minus_mu0 + mu0\n if Ks_nn is not None and full_cov:\n C = self.uS\n return fhat, C\n elif Ks_nn is not None:\n C = np.diag(self.uS)[:, None]\n return fhat, C\n else:\n return fhat\n\n # for non-diagonal covariance matrices\n fhat = covpair.dot(self.um_minus_mu0) + mu0\n\n if Ks_nn is not None:\n if full_cov:\n C = Ks_nn + covpair.dot(self.uS - self.Ks_mm).dot(covpair.T)\n v = np.diag(C)\n else:\n C = Ks_nn + np.sum(covpair.dot(self.uS - self.Ks_mm) * covpair, axis=1)\n v = C\n C = C[:, None]\n\n if np.any(v < 0):\n logging.error(\"Negative variance in _f_given_u(), %f\" % np.min(v))\n # caused by the accumulation of small errors. Possibly only occurs when s is very small?\n\n if full_cov:\n fixidxs = np.argwhere(v < 0).flatten()\n C[fixidxs, fixidxs] = 1e-6 # set to small number.\n else:\n C[C<0] = 1e-6\n\n return fhat, C\n else:\n return fhat\n\n def _expec_s(self):\n if not self.use_svi:\n return super(GPClassifierSVI, self)._expec_s()\n\n self.old_s = self.s\n if self.cov_type == 'diagonal':\n invK_mm_expecFF = self.uS + self.um_minus_mu0.dot(self.um_minus_mu0.T)\n else:\n invK_mm_expecFF = self.invK_mm.dot(self.uS + self.um_minus_mu0.dot(self.um_minus_mu0.T))\n self.rate_s = self.rate_s0 + 0.5 * np.trace(invK_mm_expecFF)\n # Update expectation of s. See approximations for Binary Gaussian Process Classification, Hannes Nickisch\n self.s = self.shape_s / self.rate_s\n self.Elns = psi(self.shape_s) - np.log(self.rate_s)\n if self.verbose:\n logging.debug(\"Updated inverse output scale: \" + str(self.s))\n\n self.Ks_mm = self.K_mm / self.s\n self.invKs_mm = self.invK_mm * self.s\n self.Ks_nm = self.K_nm / self.s\n\n def _update_sample(self):\n\n # once the iterations over G are complete, we accept this stochastic VB update\n self.prev_u_invSm = self.u_invSm\n self.prev_u_invS = self.u_invS\n self.prev_u_Lambda = self.u_Lambda\n\n self._update_sample_idxs()\n\n self.Ks_mm = self.K_mm / self.s\n self.invKs_mm = self.invK_mm * self.s\n self.Ks_nm = self.K_nm / self.s\n\n #self.G = 0 # reset because we will need to compute afresh with new sample. This shouldn't be necessary\n self.z_i = self.z[self.data_obs_idx_i]\n self.mu0_i = self.mu0[self.data_idx_i]\n\n def init_inducing_points(self, inducing_coords, K_mm=None, invK_mm=None, K_nm=None, V_nn=None):\n self.ninducing = inducing_coords.shape[0]\n self.inducing_coords = inducing_coords\n if K_mm is not None:\n self.K_mm = K_mm\n if invK_mm is not None:\n self.invK_mm = invK_mm\n if K_nm is not None:\n self.K_nm = K_nm\n if V_nn is not None:\n self.V_nn = V_nn # the prior variance at the observation data points\n\n self.u_invSm = np.zeros((self.ninducing, 1), dtype=float) # theta_1\n if self.cov_type == 'diagonal':\n self.u_invS = np.zeros((self.ninducing), dtype=float) # theta_2\n self.u_Lambda = np.zeros((self.ninducing), dtype=float) # observation precision at inducing points\n else:\n self.u_invS = np.zeros((self.ninducing, self.ninducing), dtype=float) # theta_2\n self.u_Lambda = np.zeros((self.ninducing, self.ninducing), dtype=float) # observation precision at inducing points\n self.uS = self.K_mm * self.rate_s0 / self.shape_s0 # initialise properly to prior\n self.um_minus_mu0 = np.zeros((self.ninducing, 1))\n\n def _update_sample_idxs(self):\n if self.n_obs <= self.update_size:\n # we don't need stochastic updates if the update size is larger than number of observations\n self.data_idx_i = np.arange(self.obs_f.size)\n self.data_obs_idx_i = np.arange(self.n_obs)\n return\n\n # do this in the first iteration\n if self.nsplits == 0:\n self.nsplits = int(np.ceil(self.n_obs / float(self.update_size)))\n\n if self.exhaustive_train:\n self.min_iter = self.nsplits\n if self.max_iter_VB < self.min_iter_VB * self.exhaustive_train:\n self.max_iter_VB = self.min_iter_VB * self.exhaustive_train\n\n # do this each time we reach the end of updating for all splits in the current set\n if self.data_splits is None or np.mod(self.current_data_split+1, self.nsplits) == 0:\n # create nsplits random splits -- shuffle data and split\n rand_order = np.random.permutation(self.n_obs)\n self.data_splits = []\n\n for n in range(self.nsplits):\n ending = self.update_size * (n + 1)\n if ending > self.n_obs:\n ending = self.n_obs\n self.data_splits.append(rand_order[self.update_size * n:ending])\n\n self.current_data_split = 0\n else:\n self.current_data_split += 1\n\n self.data_idx_i = self.data_splits[self.current_data_split]\n self.data_obs_idx_i = self.data_idx_i\n\n # Prediction methods ---------------------------------------------------------------------------------------------\n #\n def _get_training_cov(self):\n if not self.use_svi:\n return super(GPClassifierSVI, self)._get_training_cov()\n # return the covariance matrix for training points to inducing points (if used) and the variance of the training points.\n if self.K is not None:\n return self.K_nm, self.K\n else:\n return self.K_nm, self.K_nm.dot(self.invK_mm).dot(self.K_nm.T)\n\n def _get_training_feats(self):\n if not self.use_svi:\n return super(GPClassifierSVI, self)._get_training_feats()\n return self.inducing_coords\n\n def _expec_f_output(self, Ks_starstar, Ks_star, mu0, full_cov=False, reuse_output_kernel=False):\n \"\"\"\n Compute the expected value of f and the variance or covariance of f\n :param Ks_starstar: prior variance at the output points (scalar or 1-D vector), or covariance if full_cov==True.\n :param Ks_star: covariance between output points and training points\n :param mu0: prior mean for output points\n :param full_cov: set to True to compute the full posterior covariance between output points\n :return f, C: posterior expectation of f, variance or covariance of the output locations.\n \"\"\"\n if not self.use_svi:\n return super(GPClassifierSVI, self)._expec_f_output(Ks_starstar, Ks_star, mu0, full_cov, reuse_output_kernel)\n\n if self.covpair_out is None or not reuse_output_kernel:\n self.covpair_out = scipy.linalg.solve(self.K_mm/self.s, Ks_star.T).T\n f, C_out = self._f_given_u(self.covpair_out, mu0, Ks_starstar, full_cov=full_cov)\n\n return f, C_out\n" ]
[ [ "numpy.sum", "numpy.ones", "numpy.argwhere", "numpy.diag", "numpy.any", "scipy.special.psi", "numpy.trace", "numpy.log", "numpy.random.choice", "numpy.linalg.slogdet", "sklearn.cluster.MiniBatchKMeans", "numpy.eye", "numpy.zeros", "scipy.linalg.solve", "scipy.linalg.inv", "numpy.arange", "numpy.mod", "numpy.min", "numpy.random.permutation", "numpy.array" ] ]
habichta/ETHZDeepReinforcementLearning
[ "e1ae22159753724290f20068214bb3d94fcb7be4" ]
[ "abb_rl_algorithms/DDDQN_PER/rl_logging.py" ]
[ "\n\nimport numpy as np\nimport datetime as dt\nimport tensorflow as tf\nimport os,csv\nslim = tf.contrib.slim\n\n\n\ndef save_statistics(train_writer, episodes_reward_list, episodes_mean_max_q_value_list, episodes_mean_chosen_q_value_list=None, episodes_mean_batch_reward_list=None, episode_mean_action_q_value_list=None,step=1, action_counter=None, set=\"training\", write_path=None):\n print(\"Epoch statistics for: \" + str(set))\n\n summary = tf.Summary()\n if len(episodes_mean_max_q_value_list) > 0 and len(episodes_reward_list)>0:\n total_mean_reward = sum(episodes_reward_list) / len(episodes_reward_list)\n total_median_reward = np.median(np.array(episodes_reward_list))\n total_std_reward = np.std(np.array(episodes_reward_list))\n total_perc_reward_list = list()\n for i in [10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99]:\n total_perc_reward_list.append(np.percentile(episodes_reward_list,q=i))\n\n\n total_mean_max_q_value = sum(episodes_mean_max_q_value_list) / len(episodes_mean_max_q_value_list)\n total_median_max_q_value = np.median(np.array(episodes_mean_max_q_value_list))\n total_std_max_q_value = np.std(np.array(episodes_mean_max_q_value_list))\n total_perc_max_q_value_list = list()\n for i in [10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99]:\n total_perc_max_q_value_list.append(np.percentile(episodes_mean_max_q_value_list, q=i))\n\n summary.value.add(tag=str(set) + \"_total_mean_step_reward\", simple_value=total_mean_reward)\n summary.value.add(tag=str(set) + \"_total_median_step_reward\", simple_value=total_median_reward)\n summary.value.add(tag=str(set) + \"_total_std_step_reward\", simple_value=total_std_reward)\n summary.value.add(tag=str(set) + \"_total_mean_max_step_q\", simple_value=total_mean_max_q_value)\n summary.value.add(tag=str(set) + \"_total_median_max_step_q\", simple_value=total_median_max_q_value)\n summary.value.add(tag=str(set) + \"_total_std_max_step_q\", simple_value=total_std_max_q_value)\n\n\n if episodes_mean_batch_reward_list is not None:\n total_mean_batch_reward_value = sum(episodes_mean_batch_reward_list) / len(episodes_mean_batch_reward_list)\n total_median_batch_reward_value = np.median(np.array(episodes_mean_batch_reward_list))\n total_std_batch_reward_value = np.std(np.array(episodes_mean_batch_reward_list))\n total_perc_batch_reward_value_list = list()\n for i in [10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99]:\n total_perc_batch_reward_value_list.append(np.percentile(episodes_mean_batch_reward_list, q=i))\n\n summary.value.add(tag=str(set) + \"_total_mean_step_batch_reward\",\n simple_value=total_mean_batch_reward_value)\n summary.value.add(tag=str(set) + \"_total_median_step_batch_reward\",\n simple_value=total_median_batch_reward_value)\n summary.value.add(tag=str(set) + \"_total_std_step_batch_reward\", simple_value=total_std_batch_reward_value)\n\n if episodes_mean_chosen_q_value_list is not None:\n total_mean_chosen_q_value = sum(episodes_mean_chosen_q_value_list) / len(episodes_mean_chosen_q_value_list)\n total_median_chosen_q_value = np.median(np.array(episodes_mean_chosen_q_value_list))\n total_std_chosen_q_value = np.std(np.array(episodes_mean_chosen_q_value_list))\n total_perc_chosen_q_value_list = list()\n for i in [10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99]:\n total_perc_chosen_q_value_list.append(np.percentile(episodes_mean_chosen_q_value_list, q=i))\n\n summary.value.add(tag=str(set) + \"_total_mean_chosen_step_q\", simple_value=total_mean_chosen_q_value)\n summary.value.add(tag=str(set) + \"_total_median_chosen_step_q\", simple_value=total_median_chosen_q_value)\n summary.value.add(tag=str(set) + \"_total_std_chosen_step_q\", simple_value=total_std_chosen_q_value)\n\n if episode_mean_action_q_value_list is not None:\n total_mean_action_q_values = np.mean(np.array(episode_mean_action_q_value_list),axis=0)\n total_median_action_q_values = np.median(np.array(episode_mean_action_q_value_list),axis=0)\n\n for i in range(total_mean_action_q_values.shape[0]):\n summary.value.add(tag=str(set) + \"_mean_action_{}_step_q\".format(str(i)), simple_value=total_mean_action_q_values[i])\n summary.value.add(tag=str(set) + \"_median_action_{}_step_q\".format(str(i)), simple_value=total_median_action_q_values[i])\n\n print(\"#### Reward:\")\n print(\"Total mean step reward:\", total_mean_reward)\n print(\"Total median step reward:\", total_median_reward)\n print(\"Total std. step reward:\", total_std_reward)\n print(\"Total step reward percentiles:\", str(total_perc_reward_list))\n print(\"#### Maximum Action-Value (Q):\")\n print(\"Total mean max step Q:\", total_mean_max_q_value)\n print(\"Total median max step Q:\", total_median_max_q_value)\n print(\"Total std. max step Q:\", total_std_max_q_value)\n print(\"Total step Q max percentiles:\", str(total_perc_max_q_value_list))\n\n\n if action_counter is not None:\n for elem,cnt in action_counter.items():\n summary.value.add(tag=str(set) + \"_action_count_\"+str(elem), simple_value=cnt)\n\n\n train_writer.add_summary(summary, int(step))\n train_writer.flush()\n\n\n if write_path is not None:\n with open(os.path.join(write_path, str(set)+\"_\"+str(step)+\"_results.csv\"), \"w+\") as f:\n w = csv.writer(f)\n w.writerow([str(dt.datetime.now())])\n for key, val in [(\"Total mean step reward:\", str(total_mean_reward)),(\"Total median step reward:\",str(total_median_reward)),(\"Total median step reward:\", str(total_median_reward)),(\"Total step reward percentiles:\", str(total_perc_reward_list)),(\"Total mean step Q:\", str(total_mean_max_q_value)),(\"Total median step Q:\", str(total_median_max_q_value)),(\"Total std. step Q:\", str(total_std_max_q_value)),(\"Total step Q percentiles:\", str(total_perc_max_q_value_list))]:\n w.writerow([key, val])\n\n else:\n print(\"No logging since either reward and q-value list were empty\")\n\n\n\ndef step_log(train_writer,epoch,episode_nr, num_episode_per_epoch,total_steps,episode_steps,total_loss,reward, mean_max_q_value, mean_chosen_q_value, sec_per_batch,learning_rate,epsilon):\n time_now = str(dt.datetime.now())\n\n try:\n print(\n \"####################################################################################\")\n print(\"Time: \" + time_now + \", Epoch: \" + str(epoch) + \", Episode Nr/Next Epoch: \" + str(episode_nr)+\"/\"+str(epoch*num_episode_per_epoch) + \", Total steps:\" + str(total_steps) + \", Episode step: \" + str(episode_steps) + \\\n \", Minibatch Huberloss: \" + \\\n \"{:.6f}\".format(total_loss) + \", Step Reward: \" + str(reward) +\", Mean Max-Action-Value (Q): \" +str(mean_max_q_value)+\", Mean Chosen-Action-Value: \"+str(mean_chosen_q_value) + \", Epsilon: \" +str(epsilon) +\n \", sec/Batch: \" + \"{:.2f}\".format(\n sec_per_batch))\n except Exception as e:\n print(\"Error printing information\")\n\n summary = tf.Summary()\n summary.value.add(tag=\"learning_rate\", simple_value=float(learning_rate))\n summary.value.add(tag=\"epsilon\", simple_value=epsilon)\n summary.value.add(tag=\"total_loss\", simple_value=total_loss)\n train_writer.add_summary(summary, int(total_steps))\n\n\n\n\ndef _save_gradient_stats(train_writer,gradients,learning_rate,step):\n\n\n ratio_statistics = list()\n grad_norm_statistics=list()\n grad_mean_statistics=list()\n grad_max_statistics = list()\n for grad, var in gradients:\n grad_step = np.linalg.norm(grad * -learning_rate)\n var_norm = np.linalg.norm(var)\n if var_norm > 0:\n wg_ratio = grad_step / var_norm\n ratio_statistics.append((wg_ratio))\n grad_norm_statistics.append(np.linalg.norm(grad))\n grad_mean_statistics.append(np.mean(grad))\n grad_max_statistics.append(np.max(grad))\n\n mean_wg_ratio = sum(ratio_statistics) / len(ratio_statistics)\n median_wg_ratio = np.median(ratio_statistics)\n max_wg_ratio = max(ratio_statistics)\n min_wg_ratio = min(ratio_statistics)\n\n max_grad_norm = max(grad_norm_statistics)\n mean_grad_norm = sum(grad_norm_statistics)/len(grad_norm_statistics)\n median_grad_norm = np.median(grad_norm_statistics)\n min_grad_norm = min(grad_norm_statistics)\n\n mean_grad = np.mean(grad_mean_statistics)\n max_grad =np.max(grad_max_statistics)\n min_grad = np.max(grad_max_statistics)\n median_grad = np.max(grad_max_statistics)\n\n summary_gwratio = tf.Summary()\n summary_gwratio.value.add(tag=\"gradient_ratio_mean_wg\", simple_value=mean_wg_ratio)\n summary_gwratio.value.add(tag=\"gradient_ratio_median_wg\", simple_value=median_wg_ratio)\n summary_gwratio.value.add(tag=\"gradient_ratio_max_wg\", simple_value=max_wg_ratio)\n summary_gwratio.value.add(tag=\"gradient_ratio_min_wg\", simple_value=min_wg_ratio)\n\n summary_gwratio.value.add(tag=\"gradient_norm_max\", simple_value=max_grad_norm)\n summary_gwratio.value.add(tag=\"gradient_norm_mean\", simple_value=mean_grad_norm)\n summary_gwratio.value.add(tag=\"gradient_norm_median\", simple_value=median_grad_norm)\n summary_gwratio.value.add(tag=\"gradient_norm_min\", simple_value=min_grad_norm)\n\n summary_gwratio.value.add(tag=\"gradient_mean\", simple_value=mean_grad)\n summary_gwratio.value.add(tag=\"gradient_max\", simple_value=max_grad)\n summary_gwratio.value.add(tag=\"gradient_min\", simple_value=min_grad)\n summary_gwratio.value.add(tag=\"gradient_median\", simple_value=median_grad)\n\n train_writer.add_summary(summary_gwratio, step)\n\n\n\ndef create_summaries(network,img_sequence_length):\n summaries = set()\n for end_point in network.end_points:\n x = network.end_points[end_point]\n summaries.add(tf.summary.histogram('activations/' + end_point, x))\n\n for variable in slim.get_model_variables():\n summaries.add(tf.summary.histogram(variable.op.name, variable))\n\n\n for grad, var in network.gradients:\n summaries.add(tf.summary.histogram(var.op.name + '/gradients', grad))\n\n _weight_image_summary(summaries, network.first_layer_weights, img_sequence_length, scope=\"\")\n merged_train = tf.summary.merge(list(summaries), name='train_summary_op')\n\n return summaries,merged_train\n\n\n\ndef _weight_image_summary(summaries, weights,img_sequence_length, scope=\"\"):\n # visualization of first convolutional layer\n # weights is of shape (length,width,depth,filters), z.b. (8,8,6,64) for two images with 3 channels each\n\n\n if weights is not None:\n split_nr = img_sequence_length\n\n split_tensors = tf.split(weights, split_nr, axis=2, name=\"split\") # list of [(8,8,3,64),(8,8,3,64),...]\n filter_cols = list()\n for split in split_tensors:\n padded_filters = tf.pad(split, tf.constant([[1, 1], [1, 1], [0, 0], [0, 0]]),\n mode='CONSTANT') # filter to 10x10x3x64\n\n padded_filters_shape = padded_filters.get_shape().as_list() # 10x10x3x64\n trsp_pf = tf.transpose(padded_filters, perm=[3, 0, 1, 2]) # 64x10x10x3\n filter_col = tf.reshape(trsp_pf, shape=[1, -1, padded_filters_shape[1],\n padded_filters_shape[2]]) # 1x64x10x10x3 => 1x640x10x3\n\n filter_cols.append(filter_col)\n\n stacked_slices = tf.stack(filter_cols) # 3x1x640x10x3\n\n trsp_ss = tf.transpose(stacked_slices, perm=[1, 2, 0, 3, 4])\n\n trsp_ss_shape = trsp_ss.get_shape().as_list() # 1x640x3x10x3\n\n weight_image = tf.reshape(trsp_ss, shape=[1, trsp_ss_shape[1], -1, trsp_ss_shape[4]]) # 1x640x30x3\n summaries.add(tf.summary.image(tensor=weight_image, name=\"weights\"))\n\n\n\n\ndef save_summaries(sess,merge_op,feed_dict,train_writer,gradients,learning_rate,step):\n if gradients is not None:\n _save_gradient_stats( train_writer, gradients, learning_rate, step)\n summary = sess.run(merge_op,feed_dict=feed_dict)\n train_writer.add_summary(summary, step)\n\n\n\n" ]
[ [ "tensorflow.summary.histogram", "tensorflow.stack", "tensorflow.reshape", "tensorflow.summary.image", "numpy.median", "tensorflow.Summary", "numpy.max", "tensorflow.split", "numpy.array", "tensorflow.constant", "tensorflow.transpose", "numpy.linalg.norm", "numpy.percentile", "numpy.mean" ] ]
GGCarrotsBerlin/test
[ "47d7414e8c4f2aa419710645c68bf32b584b29fa" ]
[ "backend_app/util.py" ]
[ "import numpy as np\n\nNW = (52.58363, 13.2035)\nSE = (52.42755, 13.62648)\nNE = (NW[0], SE[1])\nSW = (SE[0], NW[1])\n\n\ndef flatten_list(irregularly_nested_list):\n \"\"\"Generator which recursively flattens list of lists\n :param irregularly_nested_list: iterable object containing iterable and non-iterable objects as elements\n \"\"\"\n for el in irregularly_nested_list:\n if isinstance(el, list):\n for sub_el in flatten_list(el):\n yield sub_el\n else:\n yield el\n\n\ndef create_grid_of_berlin(cnt_x=60, cnt_y=40):\n x = np.linspace(NW[0], SE[0], cnt_x)\n y = np.linspace(NW[1], SE[1], cnt_y)\n return x, y\n" ]
[ [ "numpy.linspace" ] ]
bh107/benchpress
[ "e1dcda446a986d4d828b14d807e37e10cf4a046b" ]
[ "benchpress/benchmarks/lu/python_numpy/lu.py" ]
[ "from __future__ import print_function\nfrom benchpress.benchmarks import util\nimport numpy as np\n\nbench = util.Benchmark(\"LU decomposition on the matrix so that A = L*U\", \"<size>\")\n\n\ndef lu(a):\n \"\"\"\n Perform LU decomposition on the matrix `a` so that A = L*U\n \"\"\"\n u = a.copy()\n l = np.identity(a.shape[0], a.dtype)\n for c in range(1, u.shape[0]):\n l[c:, c - 1] = (u[c:, c - 1] / u[c - 1, c - 1:c])\n u[c:, c - 1:] = u[c:, c - 1:] - l[c:, c - 1][:, None] * u[c - 1, c - 1:]\n bench.flush()\n return (l, u)\n\n\ndef main():\n n = bench.args.size[0]\n matrix = bench.random_array((n, n))\n bench.start()\n res = lu(matrix)\n bench.stop()\n bench.pprint()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.identity" ] ]
mattpoggi/SistemiDigitaliM20-21
[ "202e520a571a2bb961851763f37e9293c3af400d" ]
[ "Mengascini-Spina/Sistemi-Digitali-M/Datasets/Utilities/Maps/Noiseprint/noiseprint.py" ]
[ "from pathlib import Path\nimport os\nfrom PIL import Image\nfrom tensorflow.python.keras.layers import Conv2D, BatchNormalization, Activation\n\nimport logging\nlogging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\nimport tensorflow as tf\ntf.get_logger().setLevel('ERROR')\nimport numpy as np\nfrom tensorflow.python.keras.models import Model\n\nfrom Datasets.Utilities.Maps.Noiseprint.utility import jpeg_qtableinv\n\n\nclass BiasLayer(tf.keras.layers.Layer):\n\n def build(self, input_shape):\n self.bias = self.add_weight('bias', shape=input_shape[-1], initializer=\"zeros\")\n\n @tf.function\n def call(self, inputs, training=None):\n return inputs + self.bias\n\n\ndef _FullConvNetV2(num_levels=17, padding='SAME'):\n \"\"\"FullConvNet model.\"\"\"\n activation_fun = [tf.nn.relu, ] * (num_levels - 1) + [tf.identity, ]\n filters_num = [64, ] * (num_levels - 1) + [1, ]\n batch_norm = [False, ] + [True, ] * (num_levels - 2) + [False, ]\n\n inp = tf.keras.layers.Input([None, None, 1])\n model = inp\n\n for i in range(num_levels):\n model = Conv2D(filters_num[i], 3, padding=padding, use_bias=False)(model)\n if batch_norm[i]:\n model = BatchNormalization(epsilon=1e-5)(model)\n model = BiasLayer()(model)\n model = Activation(activation_fun[i])(model)\n\n return Model(inp, model)\n\n\nclass NoiseprintEngineV2:\n save_path = os.path.join(os.path.dirname(__file__), 'noiseprint_V2/net_jpg%d/')\n slide = 1024 # 3072\n largeLimit = 1050000 # 9437184\n overlap = 34\n\n def __init__(self, quality=None):\n self.model = _FullConvNetV2()\n configSess = tf.compat.v1.ConfigProto()\n configSess.gpu_options.allow_growth = True\n self.quality = quality\n self.loaded_quality = None\n if quality is not None:\n self.load_session(quality)\n\n def load_session(self, quality):\n # log(\"Setting quality to %d \" % quality)\n quality = min(max(quality, 51), 101)\n if quality == self.loaded_quality:\n return\n checkpoint = self.save_path % quality\n self.model.load_weights(checkpoint)\n self.loaded_quality = quality\n\n @tf.function(experimental_relax_shapes=True,\n input_signature=[tf.TensorSpec(shape=(1, None, None, 1), dtype=tf.float32)])\n def _predict_small(self, img):\n return self.model(img)\n\n def _predict_large(self, img):\n res = np.zeros((img.shape[0], img.shape[1]), np.float32)\n for index0 in range(0, img.shape[0], self.slide):\n index0start = index0 - self.overlap\n index0end = index0 + self.slide + self.overlap\n\n for index1 in range(0, img.shape[1], self.slide):\n index1start = index1 - self.overlap\n index1end = index1 + self.slide + self.overlap\n clip = img[max(index0start, 0): min(index0end, img.shape[0]),\n max(index1start, 0): min(index1end, img.shape[1])]\n res_chunk = self._predict_small(clip[np.newaxis, :, :, np.newaxis])\n res_chunk = np.squeeze(res_chunk)\n\n if index0 > 0:\n res_chunk = res_chunk[self.overlap:, :]\n if index1 > 0:\n res_chunk = res_chunk[:, self.overlap:]\n res_chunk = res_chunk[:min(self.slide, res_chunk.shape[0]), :min(self.slide, res_chunk.shape[1])]\n\n res[index0: min(index0 + self.slide, res.shape[0]),\n index1: min(index1 + self.slide, res.shape[1])] = res_chunk\n return res\n\n def predict(self, img):\n if img.shape[0] * img.shape[1] > self.largeLimit:\n return self._predict_large(img)\n else:\n return tf.squeeze(self._predict_small(tf.convert_to_tensor(img[np.newaxis, :, :, np.newaxis]))).numpy()\n\ndef normalize_noiseprint(noiseprint, margin=34):\n v_min = np.min(noiseprint[margin:-margin, margin:-margin])\n v_max = np.max(noiseprint[margin:-margin, margin:-margin])\n return ((noiseprint - v_min) / (v_max - v_min)).clip(0, 1)\n" ]
[ [ "tensorflow.get_logger", "tensorflow.compat.v1.ConfigProto", "numpy.zeros", "numpy.squeeze", "tensorflow.python.keras.layers.Activation", "tensorflow.python.keras.models.Model", "numpy.max", "tensorflow.python.keras.layers.BatchNormalization", "numpy.min", "tensorflow.TensorSpec", "tensorflow.convert_to_tensor", "tensorflow.python.keras.layers.Conv2D", "tensorflow.keras.layers.Input" ] ]
hex-plex/Pong-ReinforcementLearning
[ "62d257ae03afb7561403d0f48650344254dcc06e" ]
[ "ball.py" ]
[ "import pygame\nfrom random import randint\nBLACK = (0,0,0)\nimport numpy as np\n\nclass Ball(pygame.sprite.Sprite):\n\n def __init__(self, color , width ,height, twidth, theight):\n\n super().__init__()\n\n self.image = pygame.Surface([width,height])\n self.image.fill(BLACK)\n self.image.set_colorkey(BLACK)\n self.twidth = twidth\n self.width = width\n self.theight = theight\n self.height = height\n pygame.draw.rect(self.image,color , [0,0,width, height])\n\n self.velocity = [randint(4,8),randint(-8,8)]\n self.rect = self.image.get_rect()\n\n def update(self):\n self.rect.x = min(max(self.rect.x+self.velocity[0],0),self.twidth-self.width)\n ## Clipping solves a lot of glitches should have done this earlier\n self.rect.y = min(max(self.rect.y+self.velocity[1],0),self.theight-self.height)\n ## Clipping solves a lot of glitches should have done this earlier\n def bounce(self):\n self.velocity[0] *= -1\n self.velocity[1] = randint(-8,8)\n def posi(self):\n return self.rect\n def spawn(self):\n self.velocity = [ np.random.choice([-1,1])*randint(4,8) ,randint(-8,8)]\n self.rect.x = (self.twidth - self.width) / 2\n self.rect.y = (self.theight - self.height) / 2\n return True\n" ]
[ [ "numpy.random.choice" ] ]
mnfienen/modflow-export
[ "e56e49285a7ef71797ce047151f6dbabbd8fe245" ]
[ "mfexport/tests/test_results_export.py" ]
[ "import os\nfrom pathlib import Path\nimport pytest\nfrom flopy.utils import binaryfile as bf\nimport numpy as np\nimport fiona\nimport rasterio\nfrom shapely.geometry import box\nimport pytest\nfrom ..grid import load_modelgrid\nfrom ..results import export_cell_budget, export_heads, export_drawdown, export_sfr_results\n\n\[email protected](scope='module')\ndef lpr_output_path(test_output_folder):\n return os.path.join(test_output_folder, 'lpr')\n\n\ndef check_files(outfiles, variables, kstpkper=None, layers=None):\n replace = [('model_top', 'top')]\n variables = set(variables)\n if kstpkper is not None and np.isscalar(kstpkper[0]):\n kstpkper = [kstpkper]\n written = set()\n for f in outfiles:\n assert os.path.getsize(f) > 0\n fname = os.path.split(f)[1]\n for pair in replace:\n fname = fname.replace(*pair)\n props = parse_fname(fname)\n assert props['var'] in variables\n written.add(props['var'])\n if kstpkper is not None:\n assert (props['stp'], props['per']) in kstpkper\n if props['lay'] is not None:\n assert props['lay'] in layers\n # verify that all variables were exported\n assert len(written.difference(variables)) == 0\n\n\ndef parse_fname(fname):\n props = {'var': None,\n 'lay': None,\n 'per': None,\n 'stp': None,\n 'suffix': None}\n if 'stress_period_data' in fname:\n props['var'] = os.path.splitext(fname)[0]\n return props\n info = os.path.splitext(fname)[0].split('_')\n props['var'] = info.pop(0)\n for i in range(len(info)):\n item = info.pop(0)\n if 'ctr' in item:\n continue\n for p in ['lay', 'per', 'stp']:\n if p in item:\n props[p] = int(item.strip(p))\n return props\n\n\ndef compare_polygons(p1, p2, **kwargs):\n \"\"\"Check that two polygons have the same extent\"\"\"\n assert np.allclose(p1.area, p2.area, **kwargs)\n assert np.allclose(p1.intersection(p2).area, p1.area, **kwargs)\n\n\ndef test_cell_budget_export(model):\n m, grid, output_path = model\n precision = 'single'\n binary_grid_file = None\n skip = []\n if m.version == 'mf6':\n precision = 'double'\n binary_grid_file = os.path.join(m.model_ws, '{}.dis.grb'.format(m.name))\n skip = ['WEL']\n file = os.path.join(m.model_ws, '{}.cbc'.format(m.name))\n #file = 'Examples/data/lpr/lpr_inset.cbc'\n assert os.path.exists(file)\n cbobj = bf.CellBudgetFile(file, precision=precision)\n layers = list(range(cbobj.nlay))\n kstpkper = cbobj.get_kstpkper()[0]\n variables = [bs.decode().strip() for bs in cbobj.textlist\n if bs.decode().strip() not in skip]\n nrow, ncol = cbobj.nrow, cbobj.ncol\n cbobj.close()\n outfiles = export_cell_budget(file, grid,\n binary_grid_file=binary_grid_file,\n kstpkper=kstpkper,\n precision=precision,\n output_path=output_path)\n check_files(outfiles, variables, kstpkper)\n tifs = [f for f in outfiles if f.endswith('.tif')]\n for f in tifs:\n with rasterio.open(f) as src:\n assert src.width == ncol\n assert src.height == nrow\n compare_polygons(grid.bbox, box(*src.bounds))\n\n\[email protected](('export_depth_to_water,export_layers,'\n 'export_water_table'), \n ((True, False, True),\n (False, True, False)\n ))\ndef test_heads_export(model, export_depth_to_water, export_layers, \n export_water_table):\n m, grid, output_path = model\n file = os.path.join(m.model_ws, '{}.hds'.format(m.name))\n #file = 'Examples/data/lpr/lpr_inset.hds'\n variables = ['hds']\n if export_depth_to_water:\n variables += ['wt', 'dtw', 'op']\n if export_water_table and 'wt' not in variables:\n variables.append('wt')\n hdsobj = bf.HeadFile(file)\n kstpkper = hdsobj.get_kstpkper()[-1:]\n layers = list(range(hdsobj.nlay))\n nrow, ncol = hdsobj.nrow, hdsobj.ncol\n hdsobj.close()\n outfiles = export_heads(file, grid, -1e4, -9999,\n kstpkper=kstpkper, \n export_depth_to_water=export_depth_to_water,\n export_water_table=export_water_table, \n export_layers=export_layers,\n land_surface_elevations=m.dis.top.array,\n output_path=output_path)\n check_files(outfiles, variables, kstpkper, layers)\n tifs = [f for f in outfiles if f.endswith('.tif')]\n for f in tifs:\n with rasterio.open(f) as src:\n assert src.width == ncol\n assert src.height == nrow\n compare_polygons(grid.bbox, box(*src.bounds))\n shps = [f for f in outfiles if f.endswith('.shp')]\n for f in shps:\n with fiona.open(f) as src:\n assert box(*src.bounds).within(grid.bbox)\n #compare_polygons(grid.bbox, box(*src.bounds), rtol=0.1)\n\n\ndef test_drawdown_export(model):\n m, grid, output_path = model\n file = os.path.join(m.model_ws, '{}.hds'.format(m.name))\n #file = 'Examples/data/lpr/lpr_inset.hds'\n variables = ['ddn', 'wt-ddn']\n hdsobj = bf.HeadFile(file)\n kstpkper0 = hdsobj.get_kstpkper()[1]\n kstpkper1 = hdsobj.get_kstpkper()[-1]\n layers = list(range(hdsobj.nlay))\n nrow, ncol = hdsobj.nrow, hdsobj.ncol\n hdsobj.close()\n outfiles = export_drawdown(file, grid, -1e4, -9999,\n kstpkper0=kstpkper0,\n kstpkper1=kstpkper1,\n output_path=output_path)\n check_files(outfiles, variables, [kstpkper1], layers)\n tifs = [f for f in outfiles if f.endswith('.tif')]\n for f in tifs:\n with rasterio.open(f) as src:\n assert src.width == ncol\n assert src.height == nrow\n compare_polygons(grid.bbox, box(*src.bounds))\n shps = [f for f in outfiles if f.endswith('.shp')]\n for f in shps:\n with fiona.open(f) as src:\n assert box(*src.bounds).within(grid.bbox)\n\n\ndef test_sfr_results_export(lpr_model, lpr_modelgrid, lpr_output_path):\n mf2005_sfr_outputfile = 'Examples/data/lpr/lpr_inset.sfr.out'\n kstpkper = [(4, 0)]\n variables = ['sfrout', 'baseflow', 'qaquifer']\n outfiles = export_sfr_results(mf2005_sfr_outputfile=mf2005_sfr_outputfile,\n model=lpr_model,\n grid=lpr_modelgrid,\n kstpkper=kstpkper,\n output_length_units='feet',\n output_time_units='seconds',\n output_path=lpr_output_path\n )\n check_files(outfiles, variables, kstpkper)\n\n\[email protected]('use_flopy', (False, True))\ndef test_mf6sfr_results_export(shellmound_model, shellmound_modelgrid, shellmound_output_path, \n use_flopy):\n mf6_sfr_stage_file = os.path.join(shellmound_model.model_ws, '{}.sfr.stage.bin'\n .format(shellmound_model.name))\n mf6_sfr_budget_file = os.path.join(shellmound_model.model_ws, '{}.sfr.out.bin'\n .format(shellmound_model.name))\n model_ws = Path(shellmound_model.model_ws)\n if use_flopy:\n model = shellmound_model\n package_data_file=None\n else:\n package_data_file = model_ws / f'external/{shellmound_model.name}_packagedata.dat'\n model = None\n hdsobj = bf.HeadFile(mf6_sfr_stage_file, text='stage')\n kstpkper = hdsobj.get_kstpkper()[:1] + hdsobj.get_kstpkper()[-1:]\n variables = ['sfrout', 'baseflow', 'qaquifer']\n outfiles = export_sfr_results(mf6_sfr_stage_file=mf6_sfr_stage_file,\n mf6_sfr_budget_file=mf6_sfr_budget_file,\n model=model,\n mf6_package_data=package_data_file,\n grid=shellmound_modelgrid,\n kstpkper=kstpkper,\n output_length_units='feet',\n output_time_units='seconds',\n output_path=shellmound_output_path\n )\n check_files(outfiles, variables, kstpkper)\n\n\ndef test_parse_fname():\n fname = 'wel0_stress_period_data.shp'\n result = parse_fname(fname)\n assert result['var'] == os.path.splitext(fname)[0]" ]
[ [ "numpy.allclose", "numpy.isscalar" ] ]
neurophysik/jitcode
[ "cbc815da01974597057c8d78f74d16169bdad580" ]
[ "examples/double_fhn_restricted_lyap.py" ]
[ "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom jitcode import jitcode_restricted_lyap, y\nimport numpy as np\nfrom scipy.stats import sem\n\na = -0.025794\nb1 = 0.01\nb2 = 0.01\nc = 0.02\nk = 0.128\n\nf = [\n\ty(0) * ( a-y(0) ) * ( y(0)-1.0 ) - y(1) + k * (y(2) - y(0)),\n\tb1*y(0) - c*y(1),\n\ty(2) * ( a-y(2) ) * ( y(2)-1.0 ) - y(3) + k * (y(0) - y(2)),\n\tb2*y(2) - c*y(3)\n\t]\n\ninitial_state = np.random.random(4)\n\nvectors = [\n\tnp.array([1.,0.,1.,0.]),\n\tnp.array([0.,1.,0.,1.])\n\t]\n\nODE = jitcode_restricted_lyap(f, vectors=vectors)\nODE.set_integrator(\"dopri5\")\nODE.set_initial_value(initial_state,0.0)\n\ndata = np.hstack([ODE.integrate(T)[1] for T in range(10,100000,10)])\n\nprint(np.average(data[500:]), sem(data[500:]))\n" ]
[ [ "numpy.random.random", "numpy.array", "scipy.stats.sem", "numpy.average" ] ]
okinter11/VTuber-Python-Unity
[ "ebfdf729ffb07a1698acd07aa0e2baec0d6bbb02" ]
[ "facial_features.py" ]
[ "\"\"\"\r\nMiscellaneous facial features detection implementation\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\nfrom enum import Enum\r\n\r\nclass Eyes(Enum):\r\n LEFT = 1\r\n RIGHT = 2\r\n\r\nclass FacialFeatures:\r\n\r\n eye_key_indicies=[\r\n [\r\n # Left eye\r\n # eye lower contour\r\n 33,\r\n 7,\r\n 163,\r\n 144,\r\n 145,\r\n 153,\r\n 154,\r\n 155,\r\n 133,\r\n # eye upper contour (excluding corners)\r\n 246,\r\n 161,\r\n 160,\r\n 159,\r\n 158,\r\n 157,\r\n 173\r\n ],\r\n [\r\n # Right eye\r\n # eye lower contour\r\n 263,\r\n 249,\r\n 390,\r\n 373,\r\n 374,\r\n 380,\r\n 381,\r\n 382,\r\n 362,\r\n # eye upper contour (excluding corners)\r\n 466,\r\n 388,\r\n 387,\r\n 386,\r\n 385,\r\n 384,\r\n 398\r\n ]\r\n ]\r\n\r\n # custom img resize function\r\n def resize_img(img, scale_percent):\r\n width = int(img.shape[1] * scale_percent / 100.0)\r\n height = int(img.shape[0] * scale_percent / 100.0)\r\n\r\n return cv2.resize(img, (width, height), interpolation = cv2.INTER_AREA)\r\n\r\n # calculate eye apsect ratio to detect blinking\r\n # and/ or control closing/ opening of eye\r\n def eye_aspect_ratio(image_points, side):\r\n\r\n p1, p2, p3, p4, p5, p6 = 0, 0, 0, 0, 0, 0\r\n tip_of_eyebrow = 0\r\n\r\n # get the contour points at img pixel first\r\n # following the eye aspect ratio formula with little modifications\r\n # to match the facemesh model\r\n if side == Eyes.LEFT:\r\n\r\n eye_key_left = FacialFeatures.eye_key_indicies[0]\r\n\r\n p2 = np.true_divide(\r\n np.sum([image_points[eye_key_left[10]], image_points[eye_key_left[11]]], axis=0),\r\n 2)\r\n p3 = np.true_divide(\r\n np.sum([image_points[eye_key_left[13]], image_points[eye_key_left[14]]], axis=0),\r\n 2)\r\n p6 = np.true_divide(\r\n np.sum([image_points[eye_key_left[2]], image_points[eye_key_left[3]]], axis=0),\r\n 2)\r\n p5 = np.true_divide(\r\n np.sum([image_points[eye_key_left[5]], image_points[eye_key_left[6]]], axis=0),\r\n 2)\r\n p1 = image_points[eye_key_left[0]]\r\n p4 = image_points[eye_key_left[8]]\r\n\r\n # tip_of_eyebrow = image_points[63]\r\n tip_of_eyebrow = image_points[105]\r\n\r\n elif side == Eyes.RIGHT:\r\n eye_key_right = FacialFeatures.eye_key_indicies[1]\r\n\r\n p3 = np.true_divide(\r\n np.sum([image_points[eye_key_right[10]], image_points[eye_key_right[11]]], axis=0),\r\n 2)\r\n p2 = np.true_divide(\r\n np.sum([image_points[eye_key_right[13]], image_points[eye_key_right[14]]], axis=0),\r\n 2)\r\n p5 = np.true_divide(\r\n np.sum([image_points[eye_key_right[2]], image_points[eye_key_right[3]]], axis=0),\r\n 2)\r\n p6 = np.true_divide(\r\n np.sum([image_points[eye_key_right[5]], image_points[eye_key_right[6]]], axis=0),\r\n 2)\r\n p1 = image_points[eye_key_right[8]]\r\n p4 = image_points[eye_key_right[0]]\r\n\r\n tip_of_eyebrow = image_points[334]\r\n\r\n # https://downloads.hindawi.com/journals/cmmm/2020/1038906.pdf\r\n # Fig (3)\r\n ear = np.linalg.norm(p2-p6) + np.linalg.norm(p3-p5)\r\n ear /= (2 * np.linalg.norm(p1-p4) + 1e-6)\r\n ear = ear * (np.linalg.norm(tip_of_eyebrow-image_points[2]) / np.linalg.norm(image_points[6]-image_points[2]))\r\n return ear\r\n\r\n # calculate mouth aspect ratio to detect mouth movement\r\n # to control opening/ closing of mouth in avatar\r\n # https://miro.medium.com/max/1508/0*0rVqugQAUafxXYXE.jpg\r\n def mouth_aspect_ratio(image_points):\r\n p1 = image_points[78]\r\n p2 = image_points[81]\r\n p3 = image_points[13]\r\n p4 = image_points[311]\r\n p5 = image_points[308]\r\n p6 = image_points[402]\r\n p7 = image_points[14]\r\n p8 = image_points[178]\r\n\r\n mar = np.linalg.norm(p2-p8) + np.linalg.norm(p3-p7) + np.linalg.norm(p4-p6)\r\n mar /= (2 * np.linalg.norm(p1-p5) + 1e-6)\r\n return mar\r\n\r\n def mouth_distance(image_points):\r\n p1 = image_points[78]\r\n p5 = image_points[308]\r\n return np.linalg.norm(p1-p5)\r\n\r\n\r\n # detect iris through new landmark coordinates produced by mediapipe\r\n # replacing the old image processing method\r\n def detect_iris(image_points, iris_image_points, side):\r\n '''\r\n return:\r\n x_rate: how much the iris is toward the left. 0 means totally left and 1 is totally right.\r\n y_rate: how much the iris is toward the top. 0 means totally top and 1 is totally bottom.\r\n '''\r\n\r\n iris_img_point = -1\r\n p1, p4 = 0, 0\r\n eye_y_high, eye_y_low = 0, 0\r\n x_rate, y_rate = 0.5, 0.5\r\n\r\n # get the corresponding image coordinates of the landmarks\r\n if side == Eyes.LEFT:\r\n iris_img_point = 468\r\n\r\n eye_key_left = FacialFeatures.eye_key_indicies[0]\r\n p1 = image_points[eye_key_left[0]]\r\n p4 = image_points[eye_key_left[8]]\r\n\r\n eye_y_high = image_points[eye_key_left[12]]\r\n eye_y_low = image_points[eye_key_left[4]]\r\n\r\n elif side == Eyes.RIGHT:\r\n iris_img_point = 473\r\n\r\n eye_key_right = FacialFeatures.eye_key_indicies[1]\r\n p1 = image_points[eye_key_right[8]]\r\n p4 = image_points[eye_key_right[0]]\r\n\r\n eye_y_high = image_points[eye_key_right[12]]\r\n eye_y_low = image_points[eye_key_right[4]]\r\n\r\n p_iris = iris_image_points[iris_img_point - 468]\r\n\r\n # find the projection of iris_image_point on the straight line fromed by p1 and p4\r\n # through vector dot product\r\n # to get x_rate\r\n\r\n vec_p1_iris = [p_iris[0] - p1[0], p_iris[1] - p1[1]]\r\n vec_p1_p4 = [p4[0] - p1[0], p4[1] - p1[1]]\r\n \r\n x_rate = (np.dot(vec_p1_iris, vec_p1_p4) / (np.linalg.norm(p1-p4) + 1e-06)) / (np.linalg.norm(p1-p4) + 1e-06)\r\n\r\n # find y-rate simiilarily\r\n\r\n vec_eye_h_iris = [p_iris[0] - eye_y_high[0], p_iris[1] - eye_y_high[1]]\r\n vec_eye_h_eye_l = [eye_y_low[0] - eye_y_high[0], eye_y_low[1] - eye_y_high[1]]\r\n\r\n y_rate = (np.dot(vec_eye_h_eye_l, vec_eye_h_iris) / (np.linalg.norm(eye_y_high - eye_y_low) + 1e-06)) / (np.linalg.norm(eye_y_high - eye_y_low) + 1e-06)\r\n\r\n return x_rate, y_rate\r\n" ]
[ [ "numpy.sum", "numpy.dot", "numpy.linalg.norm" ] ]
aliny2003/pycwr
[ "7459371588e6d0d6d0737e249afa3921fe073151" ]
[ "pycwr/draw/colormap/cm.py" ]
[ "\"\"\"\nthis code is modified from pyart.graph.cm file, developed by Helmus, J.J. & Collis, S.M.\nhttps://github.com/ARM-DOE/pyart\n==============\n\nRadar related colormaps.\n\n.. autosummary::\n :toctree: generated/\n\n revcmap\n _reverser\n _reverse_cmap_spec\n _generate_cmap\n\n\nAvailable colormaps, reversed versions (_r) are also provided, these\ncolormaps are available within matplotlib with names 'pyart_COLORMAP':\n\n * BlueBrown10\n * BlueBrown11\n * BrBu10\n * BrBu12\n * Bu10\n * Bu7\n * BuDOr12\n * BuDOr18\n * BuDRd12\n * BuDRd18\n * BuGr14\n * BuGy8\n * BuOr10\n * BuOr12\n * BuOr8\n * BuOrR14\n * Carbone11\n * Carbone17\n * Carbone42\n * Cat12\n * EWilson17\n * GrMg16\n * Gray5\n * Gray9\n * NWSRef\n * NWSVel\n * NWS_SPW\n * PD17\n * RRate11\n * RdYlBu11b\n * RefDiff\n * SCook18\n * StepSeq25\n * SymGray12\n * Theodore16\n * Wild25\n * LangRainbow12\n * CN_ref\n * CN_vel\n\n\"\"\"\n# the code for colormaps in this file were adapted from pyart by Helmus, J.J. & Collis, S.M.\n# https://github.com/ARM-DOE/pyart\n\n# This file was adapted from the cm.py file of the matplotlib project,\n# http://matplotlib.org/.\n# Copyright (c) 2012-2013 Matplotlib Development Team; All Rights Reserved\n\nfrom __future__ import print_function, division\nimport warnings\n\nimport matplotlib as mpl\nimport matplotlib.colors as colors\nfrom ._cm import datad\nimport matplotlib.cm\n\ncmap_d = dict()\n\n# reverse all the colormaps.\n# reversed colormaps have '_r' appended to the name.\n\n\ndef _reverser(f):\n \"\"\" perform reversal. \"\"\"\n def freversed(x):\n \"\"\" f specific reverser. \"\"\"\n return f(1 - x)\n return freversed\n\n\ndef revcmap(data):\n \"\"\"Can only handle specification *data* in dictionary format.\"\"\"\n data_r = {}\n for key, val in data.items():\n if callable(val):\n valnew = _reverser(val)\n # This doesn't work: lambda x: val(1-x)\n # The same \"val\" (the first one) is used\n # each time, so the colors are identical\n # and the result is shades of gray.\n else:\n # Flip x and exchange the y values facing x = 0 and x = 1.\n valnew = [(1.0 - x, y1, y0) for x, y0, y1 in reversed(val)]\n data_r[key] = valnew\n return data_r\n\n\ndef _reverse_cmap_spec(spec):\n \"\"\"Reverses cmap specification *spec*, can handle both dict and tuple\n type specs.\"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", FutureWarning)\n if 'red' in spec:\n return revcmap(spec)\n else:\n revspec = list(reversed(spec))\n if len(revspec[0]) == 2: # e.g., (1, (1.0, 0.0, 1.0))\n revspec = [(1.0 - a, b) for a, b in revspec]\n return revspec\n\n\ndef _generate_cmap(name, lutsize):\n \"\"\"Generates the requested cmap from it's name *name*. The lut size is\n *lutsize*.\"\"\"\n\n spec = datad[name]\n\n # Generate the colormap object.\n if 'red' in spec:\n return colors.LinearSegmentedColormap(name, spec, lutsize)\n else:\n return colors.LinearSegmentedColormap.from_list(name, spec, lutsize)\n\nLUTSIZE = mpl.rcParams['image.lut']\n\n# need this list because datad is changed in loop\n_cmapnames = list(datad.keys())\n\n# Generate the reversed specifications ...\n\nfor cmapname in _cmapnames:\n spec = datad[cmapname]\n spec_reversed = _reverse_cmap_spec(spec)\n datad[cmapname + '_r'] = spec_reversed\n\n# Precache the cmaps with ``lutsize = LUTSIZE`` ...\n\n# Use datad.keys() to also add the reversed ones added in the section above:\nfor cmapname in datad.keys():\n cmap_d[cmapname] = _generate_cmap(cmapname, LUTSIZE)\n\nlocals().update(cmap_d)\n\n# register the colormaps so that can be accessed with the names pyart_XXX\nfor name, cmap in cmap_d.items():\n if name in [\"ref\", \"vel\"]:\n matplotlib.cm.register_cmap(name=\"CN_\"+name, cmap=cmap)\n else:\n full_name = 'pyart_' + name\n matplotlib.cm.register_cmap(name=full_name, cmap=cmap)\n\n" ]
[ [ "matplotlib.colors.LinearSegmentedColormap", "matplotlib.colors.LinearSegmentedColormap.from_list" ] ]
keshavpdl/sign-language-recognition-using-cnn-and-tensorflow
[ "11e7255fd5d7bdc791626670db6274d41fbd9c58" ]
[ "dataset.py" ]
[ "import cv2 # working with, mainly resizing, images\r\nimport numpy as np # dealing with arrays\r\nimport os # dealing with directories\r\nfrom random import shuffle # mixing up or currently ordered data that might lead our network astray in training.\r\n\r\npath='data'\r\n\r\nIMG_SIZE = 96\r\n\r\ndef create_train_data():\r\n training_data = []\r\n label = 0\r\n for (dirpath,dirnames,filenames) in os.walk(path):\r\n for dirname in dirnames:\r\n print(dirname)\r\n for(direcpath,direcnames,files) in os.walk(path+\"/\"+dirname):\r\n for file in files:\r\n actual_path=path+\"/\"+dirname+\"/\"+file\r\n print(files)\r\n # label=label_img(dirname)\r\n path1 =path+\"/\"+dirname+'/'+file\r\n img=cv2.imread(path1,cv2.IMREAD_GRAYSCALE)#cv2.imread() method loads an image from the specified file.\r\n img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))# To resize an image, OpenCV provides cv2.resize() function.\r\n training_data.append([np.array(img),label])\r\n label = label + 1\r\n print(label)\r\n shuffle(training_data)\r\n np.save('train_data.npy', training_data)\r\n print(training_data)\r\n return training_data\r\n\r\ncreate_train_data()" ]
[ [ "numpy.array", "numpy.save" ] ]