repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
NooneBug/adapter_on_entity_typing | [
"b8d2850dbed47adbf21c9a8021cef69a9b5d60dd"
] | [
"result_scripts/generate_predictions.py"
] | [
"import configparser\nfrom adapter_entity_typing.network_classes.classifiers import EarlyStoppingWithColdStart\nfrom torch.utils.data.dataloader import DataLoader\nfrom adapter_entity_typing.network import load_model\nfrom collections import defaultdict\nimport torch\nimport json\nimport numpy as np\nfrom tqdm import tqdm\n\nimport sys\n\n# parameter_tags = ['bert_ft_2_figer']\nparameter_tags = [sys.argv[1]]\n\nconfig = configparser.ConfigParser()\ntraining_config_file = \"result_scripts/generate_predictions_parameters.ini\"\nconfig.read(\"result_scripts/generate_predictions_parameters.ini\")\nprint(list(config.keys()))\nconfig = config[parameter_tags[0]]\n\nsig = torch.nn.Sigmoid()\n\n# model_path = config['ModelRootPath'] + config['ModelName']\n# classifier = get_model(model_path)\n\n# max_context_side_size = classifier.configuration('MaxContextSideSize')\n# max_entity_size = classifier.configuration('MaxEntitySize')\n\n# train_dataset, dev_dataset, test_dataset, label2id = prepare_entity_typing_datasets(classifier)\n\n\n# vocab_len = len(id2label)\n\n# add_classifier(model = classifier, labels = label2id)\n\n# model = adapterPLWrapper.load_from_checkpoint(model_path, \n# adapterClassifier = classifier, \n# id2label = id2label, \n# lr = 1e-4)\n# model.cuda()\n# model.eval()\n\nmicros = {\n \"p\": [],\n \"r\": [],\n \"f1\": []}\nmacros = {\n \"p\": [],\n \"r\": [],\n \"f1\": []}\nmacro_examples = {\n \"p\": [],\n \"r\": [],\n \"f1\": []}\n \nexperiment_name = config['fileName']\nperformance_file = config['performanceFile'] + experiment_name\nprediction_file = config['predictionFile'] + experiment_name\naverage_std_file = config['AvgStdFile'] + experiment_name\n\ndev_or_test = config['dev_or_test']\nif dev_or_test == 'both':\n keys = ['dev', 'test']\nelif dev_or_test == 'dev':\n keys = ['dev']\nelif dev_or_test == 'test':\n keys = ['test']\nelse:\n raise Exception('please provide a meaningfull value for \"dev_or_test\"')\n\nmacros = {k: {subk: [] for subk in keys} for k, v in macros.items()}\nmicros = {k: {subk: [] for subk in keys} for k, v in macros.items()}\nmacro_examples= {k: {subk: [] for subk in keys} for k, v in macros.items()}\n\nfor model, _, dev_dataset, test_dataset, label2id in load_model(parameter_tags[0]): # , \"results_scripts/generate_preditcions_parameters.ini\"):\n\n dev_loader = DataLoader(dev_dataset, batch_size = 100, num_workers=20)\n test_loader = DataLoader(test_dataset, batch_size = 100, num_workers=20)\n id2label = {v: k for k,v in label2id.items()}\n\n\n if dev_or_test == 'both':\n data_to_pred = ['dev', 'test']\n datasets = [dev_loader, test_loader]\n dataset_paths = [model.configuration('PathInputDev'), model.configuration('PathInputTest')]\n\n elif dev_or_test == 'dev':\n data_to_pred = ['dev']\n datasets = [dev_loader]\n dataset_paths = [model.configuration('PathInputDev')]\n\n elif dev_or_test == 'test':\n data_to_pred = ['test']\n datasets = [test_loader]\n dataset_paths = [model.configuration('PathInputTest')]\n\n else:\n raise Exception('please provide a meaningfull value for \"dev_or_test\"')\n\n for dataset_id, d in enumerate(data_to_pred):\n all_preds = []\n all_preds_and_logits = []\n all_labels = []\n top_k_labels = []\n loader = datasets[dataset_id]\n for mention, attn, labels in loader:\n \n mention = mention.cuda()\n attn = attn.cuda()\n preds = sig(model(mention, attn))\n \n batch_preds = []\n batch_preds_and_logits = []\n batch_top_k_labels = []\n for i, pred in enumerate(preds):\n mask = pred > .5\n ex_preds = []\n ex_preds_and_logits = [] \n pred_ids = mask.nonzero()\n no_pred = True\n for p in pred_ids:\n ex_preds.append(id2label[p.item()])\n ex_preds_and_logits.append((id2label[p.item()], round(preds[i][p].item(), 3)))\n no_pred = False\n # sort logits by pred\n topk_values, topk_indexes = torch.topk(pred, k = 5)\n top_k_l = []\n for val, index in zip(topk_values, topk_indexes):\n val = round(val.item(), 3)\n lab = id2label[index.item()]\n top_k_l.append((lab, val))\n \n if no_pred:\n ex_preds.append(top_k_l[0][0])\n ex_preds_and_logits.append(top_k_l[0])\n\n sorted_ex_preds_and_logits = sorted(ex_preds_and_logits, key=lambda tup: tup[1], reverse = True)\n batch_preds.append(ex_preds)\n batch_preds_and_logits.append(sorted_ex_preds_and_logits)\n batch_top_k_labels.append(top_k_l)\n \n all_preds.extend(batch_preds)\n all_preds_and_logits.extend(batch_preds_and_logits)\n top_k_labels.extend(batch_top_k_labels)\n\n mask = labels == 1\n batch_labels = []\n for m in mask:\n ex_labels = []\n labels_ids = m.nonzero()\n for l in labels_ids:\n ex_labels.append(id2label[l.item()])\n batch_labels.append(ex_labels)\n all_labels.extend(batch_labels)\n\n correct_count = defaultdict(int)\n actual_count = defaultdict(int)\n predict_count = defaultdict(int)\n # compute singular class performances and macro performances\n bar = tqdm(desc=\"computing macro performances\", total=len(all_preds))\n for labels, preds in zip(all_labels, all_preds):\n for pred in preds:\n predict_count[pred] += 1\n\n if pred in labels:\n correct_count[pred] += 1\n \n for label in labels:\n actual_count[label] += 1\n bar.update(1)\n bar.close()\n\n def compute_f1(p, r):\n return (2*p*r)/(p + r) if p + r else 0\n\n precisions = {k: correct_count[k]/predict_count[k] if predict_count[k] else 0 for k in label2id.keys()}\n recalls = {k: correct_count[k]/actual_count[k] if actual_count[k] else 0 for k in label2id.keys()}\n f1s = {k: compute_f1(precisions[k], recalls[k]) for k in label2id.keys()}\n\n macro_p = np.mean(list(precisions.values()))\n macro_r = np.mean(list(recalls.values()))\n macro_f1 = compute_f1(macro_p, macro_r)\n\n macros['p'][d].append(macro_p)\n macros['r'][d].append(macro_r)\n macros['f1'][d].append(macro_f1)\n\n #compute macro_example performances\n ma_e_precisions = []\n ma_e_recalls = []\n n = len(all_labels)\n\n bar = tqdm(desc=\"computing macro examples performances\", total=len(all_preds))\n \n for labels, preds in zip(all_labels, all_preds):\n correct_preds = len(set(labels).intersection(set(preds)))\n ma_e_precisions.append(correct_preds/len(preds))\n ma_e_recalls.append(correct_preds / len(labels))\n bar.update(1)\n bar.close()\n\n macro_example_p = np.mean(ma_e_precisions)\n macro_example_r = np.mean(ma_e_recalls)\n macro_example_f1 = compute_f1(macro_example_p, macro_example_r)\n \n macro_examples['p'][d].append(macro_example_p)\n macro_examples['r'][d].append(macro_example_r)\n macro_examples['f1'][d].append(macro_example_f1)\n \n #compute micro performances\n micro_correct_counter = 0\n micro_true_counter = 0\n micro_pred_counter = 0\n\n bar = tqdm(desc=\"computing micro performances\", total=len(all_preds)) \n for labels, preds in zip(all_labels, all_preds):\n micro_true_counter += len(labels)\n micro_pred_counter += len(preds)\n correct_preds = len(set(labels).intersection(set(preds)))\n micro_correct_counter += correct_preds\n bar.update(1)\n bar.close()\n micro_p = micro_correct_counter/micro_pred_counter\n micro_r = micro_correct_counter/micro_true_counter\n micro_f1 = compute_f1(micro_p, micro_r)\n\n micros['p'][d].append(micro_p)\n micros['r'][d].append(micro_r)\n micros['f1'][d].append(micro_f1)\n\n with open(dataset_paths[dataset_id], 'r') as inp:\n lines = [json.loads(l) for l in inp.readlines()]\n\n label_sentences = defaultdict(list)\n\n bar = tqdm(desc=\"generating sentences\", total=len(lines))\n \n for l, preds_and_logits, top_k in zip(lines, all_preds_and_logits, top_k_labels):\n sentence = ' '.join(l['left_context_token'])\n sentence += ' ' + l['mention_span'] + ' '\n sentence += ' '.join(l['right_context_token'])\n labels = l['y_str']\n\n for lab in labels:\n label_sentences[lab].append((sentence, l['mention_span'], preds_and_logits, top_k, labels))\n bar.update(1)\n bar.close()\n\n ordered_labels = list(sorted(label2id.keys()))\n\n with open(prediction_file + '_' + d + '.txt', 'a') as out:\n out.write('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format('label_#', 'precision', \n 'recall', 'f1', 'sentence', 'mention', \n 'preds_and_logits', 'top_k_labels_and_logits', 'true_labels'))\n for label in ordered_labels:\n i = 0\n for sentence, mention, preds_and_logits, top_k, true_label in label_sentences[label]:\n out_string = '{}\\t{:.4f}\\t{:.4f}\\t{:.4f}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(label + '_' + str(i + 1),\n precisions[label],\n recalls[label],\n f1s[label],\n sentence,\n mention,\n preds_and_logits,\n top_k,\n true_label)\n out.write(out_string)\n i += 1\n with open(performance_file + '_' + d + '.txt', 'a') as out:\n out.write('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format('macro_examples_p', 'macro_examples_r', 'macro_examples_f1',\n 'macro_p','macro_r', 'macro_f1',\n 'micro_p', 'micro_r', 'micro_f1'))\n out.write('{:.4f}\\t{:.4f}\\t{:.4f}\\t{:.4f}\\t{:.4f}\\t{:.4f}\\t{:.4f}\\t{:.4f}\\t{:.4f}\\n'.format(macro_example_p,\n macro_example_r,\n macro_example_f1,\n macro_p,\n macro_r,\n macro_f1,\n micro_p,\n micro_r,\n micro_f1))\n \n\n\n\nname = {\n \"p\": \"precision\",\n \"r\": \"recall\",\n \"f1\": \"f1\"\n}\nfor d in keys:\n results = {}\n for result_name, result in zip([\"micro\", \"macro\", \"example\"],\n [ micros, macros, macro_examples]):\n print(result_name)\n print(result)\n print()\n for k, v in result.items():\n v = np.array(v[d])\n mu = np.mean(v)\n sd = np.std(v)\n results[\"{}_{}\".format(result_name, k)] = (mu, sd)\n\n with open(average_std_file + '_'+ d + '.txt', 'a') as out:\n # out.write('{:^40}\\n'.format('-'))\n out.write(\"model,mu,sd\\n\")\n for k, (m, s) in results.items():\n out.write('{},{:.4f},{:.4f}\\n'.format(k, m, s))\n out.write('\\n')\n"
] | [
[
"torch.nn.Sigmoid",
"numpy.std",
"numpy.mean",
"torch.topk",
"torch.utils.data.dataloader.DataLoader",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
macthecadillac/Interacting-Fermions | [
"6122d2a7e67533b28e581929995ce8e2a2ad41fc",
"6122d2a7e67533b28e581929995ce8e2a2ad41fc"
] | [
"spinsys/time_dependent.py",
"tests/test_quasi_heisenberg_nleg_blkdiag_H_2Dself_consistency.py"
] | [
"\"\"\"\nThis file is part of spinsys.\n\nSpinsys is free software: you can redistribute it and/or modify\nit under the terms of the BSD 3-clause license. See LICENSE.txt\nfor exact terms and conditions.\n\"\"\"\n\nimport numpy as np\n\n\nclass TimeMachine():\n\n def __init__(self, eigvs, eigvecs, psi):\n \"\"\"\n Time evolves a given vector to any point in the past or future.\n\n Args: \"eigvs\" eigenenergies of the Hamiltonian. Numpy 1D array\n \"eigvecs\" eigenvectors of the Hamiltonian. Numpy 2D square array\n \"psi\" initial state. Numpy 1D array\n \"\"\"\n self.eigenenergies = eigvs\n self.back_transform_matrix = eigvecs\n self.initial_state = self._convert_to_eigenbasis(eigvecs, psi)\n self.curr_state = self.initial_state.copy()\n self.coeffs = 1 # the exponential coeffs for psi when time evolving\n self.last_dt = 0\n\n def _convert_to_eigenbasis(self, U, psi):\n return U.T.conjugate().dot(psi)\n\n def evolve_by_step(self, dt, basis='orig'):\n \"\"\"Evolves the state by dt\n\n Args: \"dt\" time step, float\n \"basis\" \"orig\" or \"energy\". The basis of the returned state\n Returns: Numpy 1D array\n \"\"\"\n if not dt == self.last_dt:\n self.coeffs = np.exp(-1j * self.eigenenergies * dt)\n self.last_dt = dt\n self.curr_state *= self.coeffs\n if basis == 'orig':\n return self.back_transform_matrix.dot(self.curr_state)\n elif basis == 'energy':\n return self.curr_state\n\n def evolve_to_time(self, t, basis='orig'):\n \"\"\"Evolves the state by time \"t\"\n\n Args: \"t\" time, float\n \"basis\" \"orig\" or \"energy\". The basis of the returned state\n Returns: Numpy 1D array\n \"\"\"\n self.coeffs = np.exp(-1j * self.eigenenergies * t)\n self.curr_state = self.coeffs * self.initial_state\n if basis == 'orig':\n return self.back_transform_matrix.dot(self.curr_state)\n elif basis == 'energy':\n return self.curr_state\n",
"from spinsys.hamiltonians.quasi_heisenberg import nleg_blkdiag\nfrom numpy import testing\nimport numpy as np\nimport unittest\n\n\nclass TestConsistencyNLegH2D(unittest.TestCase):\n\n def test_N6_W0_csqrt2_2_3leg_open(self):\n N, W, c, phi, J = 6, 0, np.sqrt(2), 0, 1\n H1 = nleg_blkdiag.H(N, W1=W, c1=c, phi1=phi, J1=J,\n W2=W, c2=c, phi2=phi, J2=J,\n nleg=2, mode='open').toarray()\n H2 = nleg_blkdiag.H(N, W1=W, c1=c, phi1=phi, J1=J,\n W2=W, c2=c, phi2=phi, J2=J,\n nleg=3, mode='open').toarray()\n E1 = np.linalg.eigvalsh(H1)\n E2 = np.linalg.eigvalsh(H2)\n testing.assert_array_almost_equal(E1, E2)\n\n def test_N8_W3_csqrt2_2_4leg_open(self):\n N, W, c, phi, J = 8, 3, np.sqrt(2), 0, 1\n H1 = nleg_blkdiag.H(N, W1=W, c1=c, phi1=phi, J1=J,\n W2=W, c2=c, phi2=phi, J2=J,\n nleg=2, mode='open').toarray()\n H2 = nleg_blkdiag.H(N, W1=W, c1=c, phi1=phi, J1=J,\n W2=W, c2=c, phi2=phi, J2=J,\n nleg=4, mode='open').toarray()\n E1 = np.linalg.eigvalsh(H1)\n E2 = np.linalg.eigvalsh(H2)\n testing.assert_array_almost_equal(E1, E2)\n\n def test_N12_W3_csqrt2_3_4leg_open(self):\n N, W, c, phi, J = 12, 3, np.sqrt(2), 0, 1\n H1 = nleg_blkdiag.H(N, W1=W, c1=c, phi1=phi, J1=J,\n W2=W, c2=c, phi2=phi, J2=J,\n nleg=3, mode='open').toarray()\n H2 = nleg_blkdiag.H(N, W1=W, c1=c, phi1=phi, J1=J,\n W2=W, c2=c, phi2=phi, J2=J,\n nleg=4, mode='open').toarray()\n E1 = np.linalg.eigvalsh(H1)\n E2 = np.linalg.eigvalsh(H2)\n testing.assert_array_almost_equal(E1, E2)\n\n def test_N6_W13_W21_csqrt2_2_3leg_open(self):\n N, W1, W2, c, phi, J = 6, 3, 1, np.sqrt(2), 0, 1\n H1 = nleg_blkdiag.H(N, W1=W1, c1=c, phi1=phi, J1=J,\n W2=W2, c2=c, phi2=phi, J2=J,\n nleg=2, mode='open').toarray()\n H2 = nleg_blkdiag.H(N, W1=W2, c1=c, phi1=phi, J1=J,\n W2=W1, c2=c, phi2=phi, J2=J,\n nleg=3, mode='open').toarray()\n E1 = np.linalg.eigvalsh(H1)\n E2 = np.linalg.eigvalsh(H2)\n testing.assert_array_almost_equal(E1, E2)\n\n def test_N6_W3_c1sqrt2_c2sqrt3_2_3leg_open(self):\n N, W, c1, c2, phi, J = 6, 0, np.sqrt(2), np.sqrt(3), 0, 1\n H1 = nleg_blkdiag.H(N, W1=W, c1=c1, phi1=phi, J1=J,\n W2=W, c2=c2, phi2=phi, J2=J,\n nleg=2, mode='open').toarray()\n H2 = nleg_blkdiag.H(N, W1=W, c1=c2, phi1=phi, J1=J,\n W2=W, c2=c1, phi2=phi, J2=J,\n nleg=3, mode='open').toarray()\n E1 = np.linalg.eigvalsh(H1)\n E2 = np.linalg.eigvalsh(H2)\n testing.assert_array_almost_equal(E1, E2)\n\n def test_N12_W13_W28_c1sqrt2_c2sqrt5_J11_J24_3_4leg_open(self):\n N, W1, W2, c1, c2 = 12, 3, 8, np.sqrt(2), np.sqrt(5)\n phi, J1, J2 = 0, 1, 4\n H1 = nleg_blkdiag.H(N, W1=W1, c1=c1, phi1=phi, J1=J1,\n W2=W2, c2=c2, phi2=phi, J2=J2,\n nleg=3, mode='open').toarray()\n H2 = nleg_blkdiag.H(N, W1=W2, c1=c2, phi1=phi, J1=J2,\n W2=W1, c2=c1, phi2=phi, J2=J1,\n nleg=4, mode='open').toarray()\n E1 = np.linalg.eigvalsh(H1)\n E2 = np.linalg.eigvalsh(H2)\n testing.assert_array_almost_equal(E1, E2)\n\n def test_N6_W0_csqrt2_2_3leg_periodic(self):\n N, W, c, phi, J = 6, 0, np.sqrt(2), 0, 1\n H1 = nleg_blkdiag.H(N, W1=W, c1=c, phi1=phi, J1=J,\n W2=W, c2=c, phi2=phi, J2=J,\n nleg=2, mode='periodic').toarray()\n H2 = nleg_blkdiag.H(N, W1=W, c1=c, phi1=phi, J1=J,\n W2=W, c2=c, phi2=phi, J2=J,\n nleg=3, mode='periodic').toarray()\n E1 = np.linalg.eigvalsh(H1)\n E2 = np.linalg.eigvalsh(H2)\n testing.assert_array_almost_equal(E1, E2)\n\n def test_N8_W3_csqrt2_2_4leg_periodic(self):\n N, W, c, phi, J = 8, 3, np.sqrt(2), 0, 1\n H1 = nleg_blkdiag.H(N, W1=W, c1=c, phi1=phi, J1=J,\n W2=W, c2=c, phi2=phi, J2=J,\n nleg=2, mode='periodic').toarray()\n H2 = nleg_blkdiag.H(N, W1=W, c1=c, phi1=phi, J1=J,\n W2=W, c2=c, phi2=phi, J2=J,\n nleg=4, mode='periodic').toarray()\n E1 = np.linalg.eigvalsh(H1)\n E2 = np.linalg.eigvalsh(H2)\n testing.assert_array_almost_equal(E1, E2)\n\n def test_N12_W3_csqrt2_3_4leg_periodic(self):\n N, W, c, phi, J = 12, 3, np.sqrt(2), 0, 1\n H1 = nleg_blkdiag.H(N, W1=W, c1=c, phi1=phi, J1=J,\n W2=W, c2=c, phi2=phi, J2=J,\n nleg=3, mode='periodic').toarray()\n H2 = nleg_blkdiag.H(N, W1=W, c1=c, phi1=phi, J1=J,\n W2=W, c2=c, phi2=phi, J2=J,\n nleg=4, mode='periodic').toarray()\n E1 = np.linalg.eigvalsh(H1)\n E2 = np.linalg.eigvalsh(H2)\n testing.assert_array_almost_equal(E1, E2)\n\n def test_N6_W13_W21_csqrt2_2_3leg_periodic(self):\n N, W1, W2, c, phi, J = 6, 3, 1, np.sqrt(2), 0, 1\n H1 = nleg_blkdiag.H(N, W1=W1, c1=c, phi1=phi, J1=J,\n W2=W2, c2=c, phi2=phi, J2=J,\n nleg=2, mode='periodic').toarray()\n H2 = nleg_blkdiag.H(N, W1=W2, c1=c, phi1=phi, J1=J,\n W2=W1, c2=c, phi2=phi, J2=J,\n nleg=3, mode='periodic').toarray()\n E1 = np.linalg.eigvalsh(H1)\n E2 = np.linalg.eigvalsh(H2)\n testing.assert_array_almost_equal(E1, E2)\n\n def test_N6_W3_c1sqrt2_c2sqrt3_2_3leg_periodic(self):\n N, W, c1, c2, phi, J = 6, 0, np.sqrt(2), np.sqrt(3), 0, 1\n H1 = nleg_blkdiag.H(N, W1=W, c1=c1, phi1=phi, J1=J,\n W2=W, c2=c2, phi2=phi, J2=J,\n nleg=2, mode='periodic').toarray()\n H2 = nleg_blkdiag.H(N, W1=W, c1=c2, phi1=phi, J1=J,\n W2=W, c2=c1, phi2=phi, J2=J,\n nleg=3, mode='periodic').toarray()\n E1 = np.linalg.eigvalsh(H1)\n E2 = np.linalg.eigvalsh(H2)\n testing.assert_array_almost_equal(E1, E2)\n\n def test_N12_W13_W28_c1sqrt2_c2sqrt5_J11_J24_3_4leg_periodic(self):\n N, W1, W2, c1, c2 = 12, 3, 8, np.sqrt(2), np.sqrt(5)\n phi, J1, J2 = 0, 1, 4\n H1 = nleg_blkdiag.H(N, W1=W1, c1=c1, phi1=phi, J1=J1,\n W2=W2, c2=c2, phi2=phi, J2=J2,\n nleg=3, mode='periodic').toarray()\n H2 = nleg_blkdiag.H(N, W1=W2, c1=c2, phi1=phi, J1=J2,\n W2=W1, c2=c1, phi2=phi, J2=J1,\n nleg=4, mode='periodic').toarray()\n E1 = np.linalg.eigvalsh(H1)\n E2 = np.linalg.eigvalsh(H2)\n testing.assert_array_almost_equal(E1, E2)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.exp"
],
[
"numpy.linalg.eigvalsh",
"numpy.sqrt",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
indutny/gradtype | [
"0e7e1290e6b4e669126e42339a739ec58dc1154e",
"0e7e1290e6b4e669126e42339a739ec58dc1154e"
] | [
"tools/tsne.py",
"tools/chart-metrics.py"
] | [
"import matplotlib\nimport sys\nimport json\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.axes as axes\nimport numpy as np\nimport sklearn.decomposition\nimport sklearn.preprocessing\nfrom sklearn.manifold import TSNE\n\nCOLOR_MAP = plt.cm.gist_rainbow\nSEED = 0x37255c25\nUSE_TSNE = True\n\nNORMALIZE = False\n\nCATEGORIES = {}\ncategory = 'train' if len(sys.argv) < 3 else sys.argv[2]\n\ndef to_color(index):\n return index / len(CATEGORIES)\n\ndef visualize(entries):\n fig = plt.figure(1, figsize=(8, 6))\n\n axes = plt.gca()\n\n if len(entries[0]['features']) == 2:\n decomps = []\n elif USE_TSNE:\n decomps = [\n sklearn.decomposition.PCA( \\\n n_components=min(50, len(entries[0]['features'])), random_state=SEED),\n TSNE(n_components=2, verbose=2, random_state=SEED,\n perplexity=30)\n ]\n else:\n decomps = [ sklearn.decomposition.PCA(n_components=2, random_state=SEED) ]\n\n coords = [ np.array(e['features']) for e in entries ]\n if NORMALIZE:\n coords = [ x / (np.linalg.norm(x) + 1e-23) for x in coords ]\n\n for decomp in decomps:\n coords = decomp.fit_transform(coords)\n if isinstance(decomp, sklearn.decomposition.PCA):\n print('Explained variance ratio: {}'.format( \\\n decomp.explained_variance_ratio_))\n\n for e, coords in zip(entries, coords):\n e['coords'] = coords\n category = e['category']\n if category in CATEGORIES:\n index = CATEGORIES[category]\n else:\n index = len(CATEGORIES)\n CATEGORIES[category] = index\n e['index'] = index\n\n legend = []\n for label, index in CATEGORIES.items():\n color = COLOR_MAP(to_color(index))\n legend.append(mpatches.Patch(color=color, label=label))\n # axes.legend(handles=legend, fontsize=8)\n\n for e in entries:\n label = e['category']\n index = e['index']\n\n x = e['coords'][0]\n y = e['coords'][1]\n\n color = COLOR_MAP(to_color(index))\n\n marker = 'o'\n size = 8\n alpha = 0.6\n plt.scatter(x, y, c=[ color ], marker=marker,\n edgecolor='k', s=size, alpha=alpha, linewidths=0.0,\n edgecolors='none')\n\n plt.show()\n\nwith open(sys.argv[1]) as input:\n data = json.load(input)\n visualize(data[category])\n",
"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\ncategory = 'train' if len(sys.argv) < 3 else sys.argv[2]\n\nALPHA = 0.2\nNEGATIVE_COLOR = (1.0, 0.098039215686275, 0.223529411764706,)\nPOSITIVE_COLOR = (0.219607843137255, 0.698039215686275, 0.317647058823529,)\n\nfig = plt.figure(1, figsize=(8, 8))\n\nfor category in [ 'train', 'validate' ]:\n with open(sys.argv[1]) as input:\n reader = csv.reader(input, delimiter=',')\n labels = next(reader)\n\n steps = []\n data = {}\n for row in reader:\n steps.append(int(row[1]))\n for value, label in zip(row[2:], labels[2:]):\n if not category in label:\n continue\n\n if not label in data:\n data[label] = []\n data[label].append(float(value))\n\n rev_steps = steps.copy()\n rev_steps.reverse()\n fill_steps = steps + rev_steps\n\n plt.subplot(2, 1, 1 if category is 'train' else 2)\n\n def fill(forward, backward, color, alpha=1.0, label=None):\n color += (alpha,)\n backward_rev = backward.copy()\n backward_rev.reverse()\n plt.fill(fill_steps, forward + backward_rev,\n color=color, label=label)\n\n fill(data[category + '/positive_5'], data[category + '/positive_95'],\n POSITIVE_COLOR, ALPHA)\n fill(data[category + '/positive_10'], data[category + '/positive_90'],\n POSITIVE_COLOR, ALPHA)\n fill(data[category + '/positive_25'], data[category + '/positive_75'],\n POSITIVE_COLOR, ALPHA)\n plt.plot(steps, data[category + '/positive_50'], color=POSITIVE_COLOR,\n label='positive')\n\n fill(data[category + '/negative_5'], data[category + '/negative_95'],\n NEGATIVE_COLOR, ALPHA)\n fill(data[category + '/negative_10'], data[category + '/negative_90'],\n NEGATIVE_COLOR, ALPHA)\n fill(data[category + '/negative_25'], data[category + '/negative_75'],\n NEGATIVE_COLOR, ALPHA)\n\n plt.plot(steps, data[category + '/negative_50'], color=NEGATIVE_COLOR,\n label='negative')\n\n plt.title(category)\n plt.legend(loc='upper left')\n plt.grid(True)\n\nplt.tight_layout()\nplt.show()\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.patches.Patch",
"matplotlib.pyplot.scatter",
"numpy.linalg.norm",
"sklearn.manifold.TSNE",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.fill",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Fostereee/Transformer-MM-Explainability | [
"6dc4925b83a38e39069369da599b11d548128eb5",
"6dc4925b83a38e39069369da599b11d548128eb5",
"6dc4925b83a38e39069369da599b11d548128eb5",
"6dc4925b83a38e39069369da599b11d548128eb5"
] | [
"VisualBERT/tests/models/test_mmbt.py",
"VisualBERT/mmf/utils/text.py",
"VisualBERT/mmf/models/pythia.py",
"VisualBERT/mmf/models/transformers/backends/layers_lrp.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport unittest\n\nimport tests.test_utils as test_utils\nimport torch\nfrom VisualBERT.mmf.common.sample import Sample, SampleList\nfrom VisualBERT.mmf.models.mmbt import MMBT\nfrom VisualBERT.mmf.modules.encoders import (\n ImageEncoderFactory,\n ImageEncoderTypes,\n ResNet152ImageEncoder,\n TextEncoderFactory,\n TextEncoderTypes,\n)\nfrom VisualBERT.mmf.utils.build import build_model\nfrom VisualBERT.mmf.utils.configuration import Configuration\nfrom VisualBERT.mmf.utils.env import setup_imports\nfrom omegaconf import OmegaConf\n\n\nclass TestMMBTTorchscript(unittest.TestCase):\n def setUp(self):\n test_utils.setup_proxy()\n setup_imports()\n model_name = \"mmbt\"\n args = test_utils.dummy_args(model=model_name)\n configuration = Configuration(args)\n config = configuration.get_config()\n model_config = config.model_config[model_name]\n model_config[\"training_head_type\"] = \"classification\"\n model_config[\"num_labels\"] = 2\n model_config.model = model_name\n self.finetune_model = build_model(model_config)\n\n def test_load_save_finetune_model(self):\n self.assertTrue(test_utils.verify_torchscript_models(self.finetune_model))\n\n def test_finetune_model(self):\n self.finetune_model.eval()\n test_sample = Sample()\n test_sample.input_ids = torch.randint(low=0, high=30255, size=(128,)).long()\n test_sample.input_mask = torch.ones(128).long()\n test_sample.segment_ids = torch.zeros(128).long()\n test_sample.image = torch.rand((3, 300, 300)).float()\n test_sample_list = SampleList([test_sample.copy()])\n\n with torch.no_grad():\n model_output = self.finetune_model.model(test_sample_list)\n\n test_sample_list = SampleList([test_sample])\n script_model = torch.jit.script(self.finetune_model.model)\n with torch.no_grad():\n script_output = script_model(test_sample_list)\n\n self.assertTrue(torch.equal(model_output[\"scores\"], script_output[\"scores\"]))\n\n def test_modal_end_token(self):\n self.finetune_model.eval()\n\n # Suppose 0 for <cls>, 1 for <pad> 2 for <sep>\n CLS = 0\n PAD = 1\n SEP = 2\n size = 128\n\n input_ids = torch.randint(low=0, high=30255, size=(size,)).long()\n input_mask = torch.ones(size).long()\n\n input_ids[0] = CLS\n length = torch.randint(low=2, high=size - 1, size=(1,))\n input_ids[length] = SEP\n input_ids[length + 1 :] = PAD\n input_mask[length + 1 :] = 0\n\n test_sample = Sample()\n test_sample.input_ids = input_ids.clone()\n test_sample.input_mask = input_mask.clone()\n test_sample.segment_ids = torch.zeros(size).long()\n test_sample.image = torch.rand((3, 300, 300)).float()\n test_sample_list = SampleList([test_sample])\n\n mmbt_base = self.finetune_model.model.bert\n with torch.no_grad():\n actual_modal_end_token = mmbt_base.extract_modal_end_token(test_sample_list)\n\n expected_modal_end_token = torch.zeros([1]).fill_(SEP).long()\n self.assertTrue(torch.equal(actual_modal_end_token, expected_modal_end_token))\n self.assertTrue(torch.equal(test_sample_list.input_ids[0, :-1], input_ids[1:]))\n self.assertTrue(\n torch.equal(test_sample_list.input_mask[0, :-1], input_mask[1:])\n )\n\n\nclass TestMMBTConfig(unittest.TestCase):\n def test_mmbt_from_params(self):\n # default init\n mmbt = MMBT.from_params(\n modal_encoder=ImageEncoderFactory.Config(\n type=ImageEncoderTypes.resnet152,\n params=ResNet152ImageEncoder.Config(pretrained=False),\n ),\n text_encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),\n )\n\n config = OmegaConf.structured(\n MMBT.Config(\n modal_encoder=ImageEncoderFactory.Config(\n type=ImageEncoderTypes.resnet152,\n params=ResNet152ImageEncoder.Config(pretrained=False),\n ),\n text_encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),\n )\n )\n self.assertIsNotNone(mmbt)\n # Make sure that the config is created from MMBT.Config\n self.assertEqual(mmbt.config, config)\n\n def test_mmbt_pretrained(self):\n test_utils.setup_proxy()\n mmbt = MMBT.from_params()\n self.assertIsNotNone(mmbt)\n\n def test_mmbt_directly_from_config(self):\n config = OmegaConf.structured(\n MMBT.Config(\n modal_encoder=ImageEncoderFactory.Config(\n type=ImageEncoderTypes.resnet152,\n params=ResNet152ImageEncoder.Config(pretrained=False),\n ),\n text_encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),\n )\n )\n mmbt = MMBT(config)\n self.assertIsNotNone(mmbt)\n # Make sure that the config is created from MMBT.Config\n self.assertEqual(mmbt.config, config)\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\nText utils module contains implementations for various decoding strategies like\nGreedy, Beam Search and Nucleus Sampling.\n\nIn your model's config you can specify ``inference`` attribute to use these strategies\nin the following way:\n\n.. code::\n\n model_config:\n some_model:\n inference:\n - type: greedy\n - params: {}\n\"\"\"\nimport os\nimport re\nfrom collections import Counter\nfrom itertools import chain\n\nimport torch\nfrom VisualBERT.mmf.common.registry import registry\nfrom VisualBERT.mmf.utils.file_io import PathManager\nfrom VisualBERT.mmf.utils.general import get_absolute_path\n\n\nSENTENCE_SPLIT_REGEX = re.compile(r\"(\\W+)\")\n\n\ndef generate_ngrams(tokens, n=1):\n \"\"\"Generate ngrams for particular 'n' from a list of tokens\n\n Args:\n tokens (List[str]): List of tokens for which the ngram are to be generated\n n (int, optional): n for which ngrams are to be generated. Defaults to 1.\n\n Returns:\n List[str]: List of ngrams generated.\n \"\"\"\n shifted_tokens = (tokens[i:] for i in range(n))\n tuple_ngrams = zip(*shifted_tokens)\n return (\" \".join(i) for i in tuple_ngrams)\n\n\ndef generate_ngrams_range(tokens, ngram_range=(1, 3)):\n \"\"\"Generates and returns a list of ngrams for all n present in ngram_range\n\n Args:\n tokens (List[str]): List of string tokens for which ngram are to be generated\n ngram_range (List[int], optional): List of 'n' for which ngrams are to be\n generated. For e.g. if ngram_range = (1, 4) then it will returns\n 1grams, 2grams and 3grams. Defaults to (1, 3).\n\n Returns:\n List[str]: List of ngrams for each n in ngram_range\n \"\"\"\n assert len(ngram_range) == 2, (\n \"'ngram_range' should be a tuple\" \" of two elements which is range of numbers\"\n )\n return chain(*(generate_ngrams(tokens, i) for i in range(*ngram_range)))\n\n\ndef tokenize(sentence, regex=SENTENCE_SPLIT_REGEX, keep=None, remove=None):\n if keep is None:\n keep = [\"'s\"]\n if remove is None:\n remove = [\",\", \"?\"]\n sentence = sentence.lower()\n\n for token in keep:\n sentence = sentence.replace(token, \" \" + token)\n\n for token in remove:\n sentence = sentence.replace(token, \"\")\n\n tokens = regex.split(sentence)\n tokens = [t.strip() for t in tokens if len(t.strip()) > 0]\n return tokens\n\n\ndef word_tokenize(word, remove=None):\n if remove is None:\n remove = [\",\", \"?\"]\n word = word.lower()\n\n for item in remove:\n word = word.replace(item, \"\")\n word = word.replace(\"'s\", \" 's\")\n\n return word.strip()\n\n\ndef load_str_list(fname):\n with PathManager.open(fname) as f:\n lines = f.readlines()\n lines = [line.strip() for line in lines]\n return lines\n\n\nclass VocabDict:\n UNK_TOKEN = \"<unk>\"\n PAD_TOKEN = \"<pad>\"\n START_TOKEN = \"<s>\"\n END_TOKEN = \"</s>\"\n\n PAD_INDEX = 0\n SOS_INDEX = 1\n EOS_INDEX = 2\n UNK_INDEX = 3\n\n def __init__(self, vocab_file, data_dir=None):\n if not os.path.isabs(vocab_file) and data_dir is not None:\n vocab_file = get_absolute_path(os.path.join(data_dir, vocab_file))\n\n if not PathManager.exists(vocab_file):\n raise RuntimeError(f\"Vocab file {vocab_file} for vocab dict doesn't exist\")\n\n self.word_list = load_str_list(vocab_file)\n self._build()\n\n def _build(self):\n if self.UNK_TOKEN not in self.word_list:\n self.word_list = [self.UNK_TOKEN] + self.word_list\n\n self.word2idx_dict = {w: n_w for n_w, w in enumerate(self.word_list)}\n\n # String (word) to integer (index) dict mapping\n self.stoi = self.word2idx_dict\n # Integer to string (word) reverse mapping\n self.itos = self.word_list\n self.num_vocab = len(self.word_list)\n\n self.UNK_INDEX = (\n self.word2idx_dict[self.UNK_TOKEN]\n if self.UNK_TOKEN in self.word2idx_dict\n else None\n )\n\n self.PAD_INDEX = (\n self.word2idx_dict[self.PAD_TOKEN]\n if self.PAD_TOKEN in self.word2idx_dict\n else None\n )\n\n def idx2word(self, n_w):\n return self.word_list[n_w]\n\n def __len__(self):\n return len(self.word_list)\n\n def get_size(self):\n return len(self.word_list)\n\n def get_unk_index(self):\n return self.UNK_INDEX\n\n def get_unk_token(self):\n return self.UNK_TOKEN\n\n def word2idx(self, w):\n if w in self.word2idx_dict:\n return self.word2idx_dict[w]\n elif self.UNK_INDEX is not None:\n return self.UNK_INDEX\n else:\n raise ValueError(\n \"word %s not in dictionary \\\n (while dictionary does not contain <unk>)\"\n % w\n )\n\n def tokenize_and_index(self, sentence):\n inds = [self.word2idx(w) for w in tokenize(sentence)]\n return inds\n\n\nclass VocabFromText(VocabDict):\n DEFAULT_TOKENS = [\n VocabDict.PAD_TOKEN,\n VocabDict.UNK_TOKEN,\n VocabDict.START_TOKEN,\n VocabDict.END_TOKEN,\n ]\n\n def __init__(\n self,\n sentences,\n min_count=1,\n regex=SENTENCE_SPLIT_REGEX,\n keep=None,\n remove=None,\n only_unk_extra=False,\n ):\n if keep is None:\n keep = []\n if remove is None:\n remove = []\n token_counter = Counter()\n\n for sentence in sentences:\n tokens = tokenize(sentence, regex=regex, keep=keep, remove=remove)\n token_counter.update(tokens)\n\n token_list = []\n for token in token_counter:\n if token_counter[token] >= min_count:\n token_list.append(token)\n\n extras = self.DEFAULT_TOKENS\n\n if only_unk_extra:\n extras = [self.UNK_TOKEN]\n\n self.word_list = extras + token_list\n self._build()\n\n\nclass TextDecoder:\n \"\"\"Base class to be inherited by all decoding strategies. Contains\n implementations that are common for all strategies.\n\n Args:\n vocab (list): Collection of all words in vocabulary.\n\n \"\"\"\n\n def __init__(self, vocab):\n self._vocab = vocab\n self._vocab_size = vocab.get_size()\n\n # Lists to store completed sequences and scores\n self._complete_seqs = []\n self._complete_seqs_scores = []\n\n def init_batch(self, sample_list):\n img_size = sample_list.image_feature_0.size()\n self._batch_size, feature_size_1, feature_size_2 = img_size\n t_batch_size = self._batch_size * self._decode_size\n self.seqs = sample_list.answers.new_full(\n (t_batch_size, 1), self._vocab.SOS_INDEX, dtype=torch.long\n )\n sample_list.image_feature_0 = (\n sample_list.image_feature_0.unsqueeze(1)\n .expand(-1, self._decode_size, -1, -1)\n .reshape(t_batch_size, feature_size_1, feature_size_2)\n )\n self.sample_list = sample_list\n return sample_list\n\n def add_next_word(self, seqs, prev_word_inds, next_word_inds):\n return torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1)\n\n def find_complete_inds(self, next_word_inds):\n incomplete_inds = []\n for ind, next_word in enumerate(next_word_inds):\n if next_word != self._vocab.EOS_INDEX:\n incomplete_inds.append(ind)\n complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))\n return complete_inds, incomplete_inds\n\n def update_data(self, data, prev_word_inds, next_word_inds, incomplete_inds):\n data[\"texts\"] = next_word_inds[incomplete_inds].unsqueeze(1)\n h1 = data[\"state\"][\"td_hidden\"][0][prev_word_inds[incomplete_inds]]\n c1 = data[\"state\"][\"td_hidden\"][1][prev_word_inds[incomplete_inds]]\n h2 = data[\"state\"][\"lm_hidden\"][0][prev_word_inds[incomplete_inds]]\n c2 = data[\"state\"][\"lm_hidden\"][1][prev_word_inds[incomplete_inds]]\n data[\"state\"] = {\"td_hidden\": (h1, c1), \"lm_hidden\": (h2, c2)}\n return data\n\n\[email protected]_decoder(\"beam_search\")\nclass BeamSearch(TextDecoder):\n def __init__(self, vocab, config):\n super().__init__(vocab)\n self._decode_size = config[\"inference\"][\"params\"][\"beam_length\"]\n\n def init_batch(self, sample_list):\n self.sample_list = super().init_batch(sample_list)\n\n # initialize with t_batch_size = _batch_size * _decode_size\n self.top_k_scores = sample_list.answers.new_zeros(\n (self._batch_size * self._decode_size, 1), dtype=torch.float\n )\n # maintain _decode_size, _complete_seqs and _complete_seqs_scores\n # for each example in a batch.\n self._decode_sizes = [self._decode_size] * self._batch_size\n for _ in range(self._batch_size):\n self._complete_seqs.append([])\n self._complete_seqs_scores.append([])\n return self.sample_list\n\n def decode(self, t, data, scores):\n # Add predicted scores to top_k_scores\n scores = torch.nn.functional.log_softmax(scores, dim=1)\n scores = self.top_k_scores.expand_as(scores) + scores\n\n # Find next top k scores and words. We flatten the scores tensor here\n # and get the top_k_scores and their indices top_k_words\n top_k_scores, top_k_words = [], []\n ex_start = 0\n for decode_size in self._decode_sizes:\n ex_end = ex_start + decode_size\n if t == 0:\n top_k_score, top_k_word = scores[ex_start].topk(\n decode_size, 0, True, True\n )\n else:\n top_k_score, top_k_word = (\n scores[ex_start:ex_end].view(-1).topk(decode_size, 0, True, True)\n )\n top_k_scores.extend(top_k_score)\n top_k_words.append(top_k_word)\n ex_start = ex_end\n self.top_k_scores = torch.stack(top_k_scores)\n # Convert to vocab indices. top_k_words contain indices from a flattened\n # k x vocab_size tensor. To get prev_word_indices we divide top_k_words\n # by vocab_size to determine which index in the beam among k generated\n # the next top_k_word. To get next_word_indices we take top_k_words\n # modulo vocab_size index. For example :\n # vocab_size : 9491\n # top_k_words : [610, 7, 19592, 9529, 292]\n # prev_word_ind : [0, 0, 2, 1, 0]\n # next_word_ind : [610, 7, 610, 38, 292]\n # further, shift the prev_word_ind by ex_start to find corresponding example\n # within a batch.\n\n ex_start = 0\n prev_word_inds, next_word_inds = [], []\n for ex_idx, decode_size in enumerate(self._decode_sizes):\n prev_word_inds.extend((top_k_words[ex_idx] // self._vocab_size) + ex_start)\n next_word_inds.extend(top_k_words[ex_idx] % self._vocab_size)\n ex_start += decode_size\n prev_word_inds = torch.stack(prev_word_inds)\n next_word_inds = torch.stack(next_word_inds)\n\n # Add new words to sequences\n self.seqs = self.add_next_word(self.seqs, prev_word_inds, next_word_inds)\n # Find completed sequences\n complete_inds, incomplete_inds = self.find_complete_inds(next_word_inds)\n\n # Add to completed sequences and Reduce beam length\n ex_start = 0\n for ex_idx, decode_size in enumerate(self._decode_sizes):\n for beam_idx in range(ex_start, ex_start + decode_size):\n if beam_idx in complete_inds:\n top_k_score = self.top_k_scores[beam_idx]\n self._complete_seqs[ex_idx].append(self.seqs[beam_idx].tolist())\n self._complete_seqs_scores[ex_idx].append(top_k_score)\n self._decode_sizes[ex_idx] -= 1\n ex_start += decode_size\n\n # Proceed with incomplete sequences\n if sum(self._decode_sizes) == 0:\n return True, data, 0\n self.seqs = self.seqs[incomplete_inds]\n self.top_k_scores = self.top_k_scores[incomplete_inds].unsqueeze(1)\n\n # TODO: Make the data update generic for any type of model\n # This is specific to BUTD model only.\n image_feature_0 = self.sample_list.image_feature_0\n self.sample_list.image_feature_0 = image_feature_0[incomplete_inds]\n data = self.update_data(data, prev_word_inds, next_word_inds, incomplete_inds)\n\n next_beam_length = len(prev_word_inds[incomplete_inds])\n\n return False, data, next_beam_length\n\n def get_result(self):\n captions = []\n max_len = 0\n for ex_idx in range(len(self._complete_seqs_scores)):\n if len(self._complete_seqs_scores[ex_idx]) == 0:\n captions.append([0] * 5)\n max_len = max(5, max_len)\n else:\n max_score = max(self._complete_seqs_scores[ex_idx])\n max_idx = self._complete_seqs_scores[ex_idx].index(max_score)\n captions.append(self._complete_seqs[ex_idx][max_idx])\n max_len = max(max_len, len(captions[-1]))\n for ex_idx in range(len(captions)):\n padded_tokens = [self._vocab.PAD_INDEX] * (max_len - len(captions[ex_idx]))\n captions[ex_idx].extend(padded_tokens)\n return torch.FloatTensor(captions)\n\n\[email protected]_decoder(\"nucleus_sampling\")\nclass NucleusSampling(TextDecoder):\n \"\"\"Nucleus Sampling is a new text decoding strategy that avoids likelihood\n maximization. Rather, it works by sampling from the smallest set of top\n tokens which have a cumulative probability greater than a specified\n threshold.\n\n Present text decoding strategies like beam search do not work well on open-ended\n generation tasks (even on strong language models like GPT-2). They tend to repeat\n text a lot and the main reason behind it is that they try to maximize likelihood,\n which is a contrast from human-generated text which has a mix of high and low\n probability tokens.\n\n Nucleus Sampling is a stochastic approach and resolves this issue. Moreover,\n it improves upon other stochastic methods like top-k sampling by choosing the\n right amount of tokens to sample from. The overall result is better text\n generation on the same language model.\n\n Link to the paper introducing Nucleus Sampling (Section 6) -\n https://arxiv.org/pdf/1904.09751.pdf\n\n Args:\n vocab (list): Collection of all words in vocabulary.\n sum_threshold (float): Ceiling of sum of probabilities of tokens to\n sample from.\n \"\"\"\n\n def __init__(self, vocab, config):\n super().__init__(vocab)\n self._decode_size = 1\n # Threshold for sum of probability\n self._threshold = config[\"inference\"][\"params\"][\"sum_threshold\"]\n\n def decode(self, t, data, scores):\n # Convert scores to probabilities\n scores = torch.nn.functional.softmax(scores, dim=1)\n # Sort scores in descending order and then select the top m elements having\n # sum more than threshold.\n # We get the top_m_scores and their indices top_m_words\n if t == 0:\n top_m_scores, top_m_words = scores[0].sort(0, True)\n else:\n top_m_scores, top_m_words = scores.view(-1).sort(0, True)\n\n last_index = 0\n score_sum = 0\n for score in top_m_scores:\n last_index += 1\n score_sum += score\n if score_sum >= self._threshold:\n break\n\n top_m_scores = torch.div(top_m_scores[:last_index], score_sum)\n top_m_words = top_m_words[:last_index]\n\n # Zero value inside prev_word_inds because we are predicting a single\n # stream of output.\n prev_word_ind = torch.tensor([0])\n # Get next word based on probabilities of top m words.\n next_word_ind = top_m_words[torch.multinomial(top_m_scores, 1)]\n # Add next word to sequence\n\n self.seqs = self.add_next_word(self.seqs, prev_word_ind, next_word_ind)\n # Check if sequence is complete\n complete_inds, incomplete_inds = self.find_complete_inds(next_word_ind)\n # If sequence is complete then return\n if len(complete_inds) > 0:\n self._complete_seqs.extend(self.seqs[complete_inds].tolist())\n return True, data, 0\n\n self.seqs = self.seqs[incomplete_inds]\n\n data = self.update_data(data, prev_word_ind, next_word_ind, incomplete_inds)\n\n return False, data, 1\n\n def get_result(self):\n if len(self._complete_seqs) == 0:\n captions = torch.FloatTensor([0] * 5).unsqueeze(0)\n else:\n captions = torch.FloatTensor(self._complete_seqs[0]).unsqueeze(0)\n return captions\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport copy\n\nimport omegaconf\nimport torch\nfrom VisualBERT.mmf.common.registry import registry\nfrom VisualBERT.mmf.models.base_model import BaseModel\nfrom VisualBERT.mmf.modules.embeddings import (\n ImageFeatureEmbedding,\n MultiHeadImageFeatureEmbedding,\n PreExtractedEmbedding,\n TextEmbedding,\n)\nfrom VisualBERT.mmf.modules.layers import ClassifierLayer, ModalCombineLayer\nfrom VisualBERT.mmf.utils.build import build_image_encoder\nfrom torch import nn\n\n\[email protected]_model(\"pythia\")\nclass Pythia(BaseModel):\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n self._global_config = registry.get(\"config\")\n self._datasets = self._global_config.datasets.split(\",\")\n\n @classmethod\n def config_path(cls):\n return \"configs/models/pythia/defaults.yaml\"\n\n @classmethod\n def format_state_key(cls, key):\n key = key.replace(\"fa_history\", \"fa_context\")\n key = key.replace(\n \"image_feature_encoders.0.module.lc\", \"image_feature_encoders.0.lc\"\n )\n return key\n\n def build(self):\n self._build_word_embedding()\n self._init_text_embeddings(\"text\")\n self._init_feature_encoders(\"image\")\n self._init_feature_embeddings(\"image\")\n self._init_combine_layer(\"image\", \"text\")\n self._init_classifier(self._get_classifier_input_dim())\n self._init_extras()\n\n def _build_word_embedding(self):\n assert len(self._datasets) > 0\n text_processor = registry.get(self._datasets[0] + \"_text_processor\")\n vocab = text_processor.vocab\n self.word_embedding = vocab.get_embedding(torch.nn.Embedding, embedding_dim=300)\n\n def _init_text_embeddings(self, attr=\"text\"):\n if \"embeddings\" not in attr:\n attr += \"_embeddings\"\n\n text_embeddings = []\n text_embeddings_list_config = self.config[attr]\n\n embeddings_out_dim = 0\n\n for text_embedding in text_embeddings_list_config:\n embedding_type = text_embedding.type\n embedding_kwargs = copy.deepcopy(text_embedding.params)\n\n self._update_text_embedding_args(embedding_kwargs)\n\n embedding = TextEmbedding(embedding_type, **embedding_kwargs)\n\n text_embeddings.append(embedding)\n embeddings_out_dim += embedding.text_out_dim\n\n setattr(self, attr + \"_out_dim\", embeddings_out_dim)\n setattr(self, attr, nn.ModuleList(text_embeddings))\n\n def _update_text_embedding_args(self, args):\n # Add model_data_dir to kwargs\n args.model_data_dir = self.config.model_data_dir\n\n def _init_feature_encoders(self, attr):\n feat_encoders = []\n feat_encoders_list_config = self.config[attr + \"_feature_encodings\"]\n feature_dim = self.config[attr + \"_feature_dim\"]\n setattr(self, attr + \"_feature_dim\", feature_dim)\n\n for feat_encoder in feat_encoders_list_config:\n feat_encoder_config = copy.deepcopy(feat_encoder)\n with omegaconf.open_dict(feat_encoder_config):\n feat_encoder_config.params.model_data_dir = self.config.model_data_dir\n feat_encoder_config.params.in_dim = feature_dim\n feat_model = build_image_encoder(feat_encoder_config, direct_features=True)\n feat_encoders.append(feat_model)\n setattr(self, attr + \"_feature_dim\", feat_model.out_dim)\n\n setattr(self, attr + \"_feature_encoders\", nn.ModuleList(feat_encoders))\n\n def _init_feature_embeddings(self, attr):\n feature_embeddings_list = []\n num_feature_feat = len(getattr(self.config, f\"{attr}_feature_encodings\"))\n\n self.feature_embeddings_out_dim = 0\n\n for _ in range(num_feature_feat):\n feature_embeddings = []\n feature_attn_model_list = self.config[attr + \"_feature_embeddings\"]\n\n for feature_attn_model_params in feature_attn_model_list:\n feature_embedding = ImageFeatureEmbedding(\n getattr(self, attr + \"_feature_dim\"),\n self.text_embeddings_out_dim,\n **feature_attn_model_params,\n )\n feature_embeddings.append(feature_embedding)\n self.feature_embeddings_out_dim += feature_embedding.out_dim\n\n feature_embeddings = nn.ModuleList(feature_embeddings)\n feature_embeddings_list.append(feature_embeddings)\n\n self.feature_embeddings_out_dim *= getattr(self, attr + \"_feature_dim\")\n\n setattr(\n self, attr + \"_feature_embeddings_out_dim\", self.feature_embeddings_out_dim\n )\n del self.feature_embeddings_out_dim\n setattr(\n self,\n attr + \"_feature_embeddings_list\",\n nn.ModuleList(feature_embeddings_list),\n )\n\n def _get_embeddings_attr(self, attr):\n embedding_attr1 = attr\n if hasattr(self, attr + \"_embeddings_out_dim\"):\n embedding_attr1 = attr + \"_embeddings_out_dim\"\n else:\n embedding_attr1 = attr + \"_feature_embeddings_out_dim\"\n\n return embedding_attr1\n\n def _init_combine_layer(self, attr1, attr2):\n config_attr = attr1 + \"_\" + attr2 + \"_modal_combine\"\n\n multi_modal_combine_layer = ModalCombineLayer(\n self.config[config_attr].type,\n getattr(self, self._get_embeddings_attr(attr1)),\n getattr(self, self._get_embeddings_attr(attr2)),\n **self.config[config_attr].params,\n )\n\n setattr(\n self,\n attr1 + \"_\" + attr2 + \"_multi_modal_combine_layer\",\n multi_modal_combine_layer,\n )\n\n def _init_classifier(self, combined_embedding_dim):\n # TODO: Later support multihead\n num_choices = registry.get(self._datasets[0] + \"_num_final_outputs\")\n\n self.classifier = ClassifierLayer(\n self.config.classifier.type,\n in_dim=combined_embedding_dim,\n out_dim=num_choices,\n **self.config.classifier.params,\n )\n\n def _init_extras(self):\n self.inter_model = None\n\n def get_optimizer_parameters(self, config):\n combine_layer = self.image_text_multi_modal_combine_layer\n params = [\n {\"params\": self.word_embedding.parameters()},\n {\"params\": self.image_feature_embeddings_list.parameters()},\n {\"params\": self.text_embeddings.parameters()},\n {\"params\": combine_layer.parameters()},\n {\"params\": self.classifier.parameters()},\n {\n \"params\": self.image_feature_encoders.parameters(),\n \"lr\": (config.optimizer.params.lr * 0.1),\n },\n ]\n\n return params\n\n def _get_classifier_input_dim(self):\n return self.image_text_multi_modal_combine_layer.out_dim\n\n def process_text_embedding(\n self, sample_list, embedding_attr=\"text_embeddings\", info=None\n ):\n text_embeddings = []\n\n # Get \"text\" attribute in case of \"text_embeddings\" case\n # and \"context\" attribute in case of \"context_embeddings\"\n texts = getattr(sample_list, embedding_attr.split(\"_\")[0])\n\n # Get embedding models\n text_embedding_models = getattr(self, embedding_attr)\n\n for text_embedding_model in text_embedding_models:\n # TODO: Move this logic inside\n if isinstance(text_embedding_model, PreExtractedEmbedding):\n embedding = text_embedding_model(sample_list.question_id)\n else:\n embedding = text_embedding_model(texts)\n text_embeddings.append(embedding)\n\n text_embeddding_total = torch.cat(text_embeddings, dim=1)\n\n return text_embeddding_total\n\n def process_feature_embedding(\n self, attr, sample_list, text_embedding_total, extra=None, batch_size_t=None\n ):\n if extra is None:\n extra = []\n feature_embeddings = []\n feature_attentions = []\n features = []\n batch_size_t = (\n sample_list.get_batch_size() if batch_size_t is None else batch_size_t\n )\n\n # Convert list of keys to the actual values\n extra = sample_list.get_fields(extra)\n\n feature_idx = 0\n\n # Get all of the features, which are in the form, \"image_feature_0\"\n # \"image_feature_1\" ...\n while True:\n feature = getattr(sample_list, f\"{attr}_feature_{feature_idx:d}\", None)\n if feature is None:\n break\n feature_idx += 1\n feature = feature[:batch_size_t]\n features.append(feature)\n\n feature_encoders = getattr(self, attr + \"_feature_encoders\")\n # Each feature should have a separate image feature encoders\n assert len(features) == len(feature_encoders), (\n \"Number of feature encoders, {} are not equal \"\n \"to number of features, {}.\".format(len(feature_encoders), len(features))\n )\n\n # Now, iterate to get final attended image features\n for i, feature in enumerate(features):\n # Get info related to the current feature. info is generally\n # in key of format \"image_info_0\" for 0th feature\n feature_info = getattr(sample_list, f\"{attr}_info_{i:d}\", {})\n # For Pythia, we need max_features to mask attention\n feature_dim = getattr(feature_info, \"max_features\", None)\n if feature_dim is not None:\n feature_dim = feature_dim[:batch_size_t]\n\n # Attribute in which encoders are saved, for \"image\" it\n # will be \"image_feature_encoders\", other example is\n # \"context_feature_encoders\"\n encoders_attr = attr + \"_feature_encoders\"\n feature_encoder = getattr(self, encoders_attr)[i]\n\n # Encode the features\n encoded_feature = feature_encoder(feature)\n\n # Get all of the feature embeddings\n list_attr = attr + \"_feature_embeddings_list\"\n feature_embedding_models = getattr(self, list_attr)[i]\n\n # Forward through these embeddings one by one\n for feature_embedding_model in feature_embedding_models:\n inp = (encoded_feature, text_embedding_total, feature_dim, extra)\n\n embedding, attention = feature_embedding_model(*inp)\n feature_embeddings.append(embedding)\n feature_attentions.append(attention.squeeze(-1))\n\n # Concatenate all features embeddings and return along with attention\n feature_embedding_total = torch.cat(feature_embeddings, dim=1)\n return feature_embedding_total, feature_attentions\n\n def combine_embeddings(self, *args):\n feature_names = args[0]\n feature_embeddings = args[1]\n\n layer = \"_\".join(feature_names) + \"_multi_modal_combine_layer\"\n return getattr(self, layer)(*feature_embeddings)\n\n def calculate_logits(self, joint_embedding, **kwargs):\n return self.classifier(joint_embedding)\n\n def forward(self, sample_list):\n sample_list.text = self.word_embedding(sample_list.text)\n text_embedding_total = self.process_text_embedding(sample_list)\n\n image_embedding_total, _ = self.process_feature_embedding(\n \"image\", sample_list, text_embedding_total\n )\n\n if self.inter_model is not None:\n image_embedding_total = self.inter_model(image_embedding_total)\n\n joint_embedding = self.combine_embeddings(\n [\"image\", \"text\"], [image_embedding_total, text_embedding_total]\n )\n\n model_output = {\"scores\": self.calculate_logits(joint_embedding)}\n\n return model_output\n\n\n# TODO: Update\[email protected]_model(\"pythia_question_only\")\nclass PythiaQuestionOnly(Pythia):\n def __init__(self, config):\n super().__init__(config)\n\n def forward(self, sample_list):\n text_embedding_total = self.process_text_embedding(sample_list)\n text_embedding_total = text_embedding_total.new_zeros(\n text_embedding_total.size()\n )\n\n fa_txt = self.image_text_multi_modal_combine_layer.module.fa_txt\n dropout = self.image_text_multi_modal_combine_layer.module.dropout\n\n joint_embedding = dropout(fa_txt(text_embedding_total))\n\n linear_text = self.classifier.module.linear_text\n f_o_text = self.classifier.module.f_o_text\n scores = linear_text(f_o_text(joint_embedding))\n\n model_output = {\"scores\": scores}\n\n return model_output\n\n\n# TODO: Update\[email protected]_model(\"pythia_image_only\")\nclass PythiaImageOnly(Pythia):\n def __init__(self, config):\n super().__init__(config)\n\n def forward(self, sample_list):\n text_embedding_total = self.process_text_embedding(sample_list)\n text_embedding_total = text_embedding_total.new_zeros(\n text_embedding_total.size()\n )\n\n image_embedding_total, _ = self.process_feature_embedding(\n \"image\", sample_list, text_embedding_total\n )\n\n if self.inter_model is not None:\n image_embedding_total = self.inter_model(image_embedding_total)\n\n fa_image = self.image_text_multi_modal_combine_layer.module.fa_image\n dropout = self.image_text_multi_modal_combine_layer.module.dropout\n\n joint_embedding = dropout(fa_image(image_embedding_total))\n\n model_output = {\"scores\": self.calculate_logits(joint_embedding)}\n\n return model_output\n\n\[email protected]_model(\"multihead\")\nclass PythiaMultiHead(Pythia):\n def __init__(self, config):\n super().__init__(config)\n\n @classmethod\n def config_path(cls):\n return None\n\n def build(self):\n self._build_word_embedding()\n self._init_text_embeddings(\"text\")\n self._init_feature_encoders(\"image\")\n self._init_feature_projectors(\"image\")\n self._init_feature_embeddings(\"image\")\n self._init_combine_layer(\"image\", \"text\")\n self._init_classifier(self._get_classifier_input_dim())\n self._init_extras()\n\n def _init_feature_projectors(self, attr):\n feature_projectors = []\n feat_encoders_list_config = self.config[attr + \"_feature_projections\"]\n feat_dim = getattr(self, attr + \"_feature_dim\")\n\n for feat_encoder in feat_encoders_list_config:\n feat_encoder_config = copy.deepcopy(feat_encoder)\n feat_encoder_config.params.in_dim = feat_dim\n feat_model = build_image_encoder(feat_encoder_config, direct_features=True)\n\n feature_projectors.append(feat_model)\n setattr(self, attr + \"_feature_dim\", feat_model.out_dim)\n\n setattr(self, attr + \"_feature_projectors\", nn.ModuleList(feature_projectors))\n\n def _init_feature_embeddings(self, attr):\n feature_embeddings_list = []\n num_feature_feat = len(getattr(self.config, f\"{attr}_feature_encodings\"))\n\n self.feature_embeddings_out_dim = 0\n\n for _ in range(num_feature_feat):\n feature_embeddings = []\n feature_attn_model_list = self.config[attr + \"_feature_embeddings\"]\n\n for feature_attn_model_params in feature_attn_model_list:\n feature_embedding = MultiHeadImageFeatureEmbedding(\n getattr(self, attr + \"_feature_dim\"),\n self.text_embeddings_out_dim,\n **feature_attn_model_params,\n )\n feature_embeddings.append(feature_embedding)\n self.feature_embeddings_out_dim += feature_embedding.out_dim\n\n feature_embeddings = nn.ModuleList(feature_embeddings)\n feature_embeddings_list.append(feature_embeddings)\n\n setattr(\n self, attr + \"_feature_embeddings_out_dim\", self.feature_embeddings_out_dim\n )\n del self.feature_embeddings_out_dim\n setattr(\n self,\n attr + \"_feature_embeddings_list\",\n nn.ModuleList(feature_embeddings_list),\n )\n\n def process_feature_embedding(\n self, attr, sample_list, text_embedding_total, extra=None, batch_size_t=None\n ):\n if extra is None:\n extra = []\n feature_embeddings = []\n feature_attentions = []\n features = []\n batch_size_t = (\n sample_list.get_batch_size() if batch_size_t is None else batch_size_t\n )\n\n # Convert list of keys to the actual values\n extra = sample_list.get_fields(extra)\n\n feature_idx = 0\n\n # Get all of the features, which are in the form, \"image_feature_0\"\n # \"image_feature_1\" ...\n while True:\n feature = getattr(sample_list, f\"{attr}_feature_{feature_idx:d}\", None)\n if feature is None:\n break\n feature_idx += 1\n feature = feature[:batch_size_t]\n features.append(feature)\n\n feature_encoders = getattr(self, attr + \"_feature_encoders\")\n # Each feature should have a separate image feature encoders\n assert len(features) == len(feature_encoders), (\n \"Number of feature encoders, {} are not equal \"\n \"to number of features, {}.\".format(len(feature_encoders), len(features))\n )\n\n # Now, iterate to get final attended image features\n for i, feature in enumerate(features):\n # Get info related to the current feature. info is generally\n # in key of format \"image_info_0\" for 0th feature\n feature_info = getattr(sample_list, f\"{attr}_info_{i:d}\", {})\n # For Pythia, we need max_features to mask attention\n feature_dim = getattr(feature_info, \"max_features\", None)\n if feature_dim is not None:\n feature_dim = feature_dim[:batch_size_t]\n\n # Attribute in which encoders are saved, for \"image\" it\n # will be \"image_feature_encoders\", other example is\n # \"context_feature_encoders\"\n encoders_attr = attr + \"_feature_encoders\"\n feature_encoder = getattr(self, encoders_attr)[i]\n\n # Encode the features\n encoded_feature = feature_encoder(feature)\n\n projector_attr = attr + \"_feature_projectors\"\n feature_projector = getattr(self, projector_attr)[i]\n\n encoded_feature = feature_projector(encoded_feature)\n # Get all of the feature embeddings\n list_attr = attr + \"_feature_embeddings_list\"\n feature_embedding_models = getattr(self, list_attr)[i]\n\n # Forward through these embeddings one by one\n for feature_embedding_model in feature_embedding_models:\n inp = (encoded_feature, text_embedding_total, feature_dim, extra)\n\n embedding, attention = feature_embedding_model(*inp)\n feature_embeddings.append(embedding)\n feature_attentions.append(attention.squeeze(-1))\n\n # Concatenate all features embeddings and return along with attention\n feature_embedding_total = torch.cat(feature_embeddings, dim=1)\n return feature_embedding_total, feature_attentions\n",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n__all__ = ['forward_hook', 'Clone', 'Add', 'Cat', 'ReLU', 'GELU', 'Dropout', 'BatchNorm2d', 'Linear', 'MaxPool2d',\n 'AdaptiveAvgPool2d', 'AvgPool2d', 'Conv2d', 'Sequential', 'safe_divide', 'einsum', 'Softmax', 'IndexSelect',\n 'LayerNorm', 'AddEye', 'Tanh', 'MatMul', 'Mul']\n\n\ndef safe_divide(a, b):\n den = b.clamp(min=1e-9) + b.clamp(max=1e-9)\n den = den + den.eq(0).type(den.type()) * 1e-9\n return a / den * b.ne(0).type(b.type())\n\n\ndef forward_hook(self, input, output):\n if type(input[0]) in (list, tuple):\n self.X = []\n for i in input[0]:\n x = i.detach()\n x.requires_grad = True\n self.X.append(x)\n else:\n self.X = input[0].detach()\n self.X.requires_grad = True\n\n self.Y = output\n\n\ndef backward_hook(self, grad_input, grad_output):\n self.grad_input = grad_input\n self.grad_output = grad_output\n\n\nclass RelProp(nn.Module):\n def __init__(self):\n super(RelProp, self).__init__()\n # if not self.training:\n self.register_forward_hook(forward_hook)\n\n def gradprop(self, Z, X, S):\n C = torch.autograd.grad(Z, X, S, retain_graph=True)\n return C\n\n def relprop(self, R, alpha):\n return R\n\n\nclass RelPropSimple(RelProp):\n def relprop(self, R, alpha):\n Z = self.forward(self.X)\n S = safe_divide(R, Z)\n C = self.gradprop(Z, self.X, S)\n\n if torch.is_tensor(self.X) == False:\n outputs = []\n outputs.append(self.X[0] * C[0])\n outputs.append(self.X[1] * C[1])\n else:\n outputs = self.X * (C[0])\n return outputs\n\nclass AddEye(RelPropSimple):\n # input of shape B, C, seq_len, seq_len\n def forward(self, input):\n return input + torch.eye(input.shape[2]).expand_as(input).to(input.device)\n\nclass ReLU(nn.ReLU, RelProp):\n pass\n\nclass Tanh(nn.Tanh, RelProp):\n pass\n\nclass GELU(nn.GELU, RelProp):\n pass\n\nclass Softmax(nn.Softmax, RelProp):\n pass\n\nclass LayerNorm(nn.LayerNorm, RelProp):\n pass\n\nclass Dropout(nn.Dropout, RelProp):\n pass\n\n\nclass MaxPool2d(nn.MaxPool2d, RelPropSimple):\n pass\n\nclass LayerNorm(nn.LayerNorm, RelProp):\n pass\n\nclass AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d, RelPropSimple):\n pass\n\nclass MatMul(RelPropSimple):\n def forward(self, inputs):\n return torch.matmul(*inputs)\n\nclass Mul(RelPropSimple):\n def forward(self, inputs):\n return torch.mul(*inputs)\n\nclass AvgPool2d(nn.AvgPool2d, RelPropSimple):\n pass\n\n\nclass Add(RelPropSimple):\n def forward(self, inputs):\n return torch.add(*inputs)\n\nclass einsum(RelPropSimple):\n def __init__(self, equation):\n super().__init__()\n self.equation = equation\n def forward(self, *operands):\n return torch.einsum(self.equation, *operands)\n\nclass IndexSelect(RelProp):\n def forward(self, inputs, dim, indices):\n self.__setattr__('dim', dim)\n self.__setattr__('indices', indices)\n\n return torch.index_select(inputs, dim, indices)\n\n def relprop(self, R, alpha):\n Z = self.forward(self.X, self.dim, self.indices)\n S = safe_divide(R, Z)\n C = self.gradprop(Z, self.X, S)\n\n if torch.is_tensor(self.X) == False:\n outputs = []\n outputs.append(self.X[0] * C[0])\n outputs.append(self.X[1] * C[1])\n else:\n outputs = self.X * (C[0])\n return outputs\n\n\n\nclass Clone(RelProp):\n def forward(self, input, num):\n self.__setattr__('num', num)\n outputs = []\n for _ in range(num):\n outputs.append(input)\n\n return outputs\n\n def relprop(self, R, alpha):\n Z = []\n for _ in range(self.num):\n Z.append(self.X)\n S = [safe_divide(r, z) for r, z in zip(R, Z)]\n C = self.gradprop(Z, self.X, S)[0]\n\n R = self.X * C\n\n return R\n\nclass Cat(RelProp):\n def forward(self, inputs, dim):\n self.__setattr__('dim', dim)\n return torch.cat(inputs, dim)\n\n def relprop(self, R, alpha):\n Z = self.forward(self.X, self.dim)\n S = safe_divide(R, Z)\n C = self.gradprop(Z, self.X, S)\n\n outputs = []\n for x, c in zip(self.X, C):\n outputs.append(x * c)\n\n return outputs\n\nclass Sequential(nn.Sequential):\n def relprop(self, R, alpha):\n for m in reversed(self._modules.values()):\n R = m.relprop(R, alpha)\n return R\n\nclass BatchNorm2d(nn.BatchNorm2d, RelProp):\n def relprop(self, R, alpha):\n X = self.X\n beta = 1 - alpha\n weight = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3) / (\n (self.running_var.unsqueeze(0).unsqueeze(2).unsqueeze(3).pow(2) + self.eps).pow(0.5))\n Z = X * weight + 1e-9\n S = R / Z\n Ca = S * weight\n R = self.X * (Ca)\n return R\n\n\nclass Linear(nn.Linear, RelProp):\n def relprop(self, R, alpha):\n beta = alpha - 1\n pw = torch.clamp(self.weight, min=0)\n nw = torch.clamp(self.weight, max=0)\n px = torch.clamp(self.X, min=0)\n nx = torch.clamp(self.X, max=0)\n\n def f(w1, w2, x1, x2):\n Z1 = F.linear(x1, w1)\n Z2 = F.linear(x2, w2)\n S1 = safe_divide(R, Z1)\n S2 = safe_divide(R, Z2)\n C1 = x1 * torch.autograd.grad(Z1, x1, S1)[0]\n C2 = x2 * torch.autograd.grad(Z2, x2, S2)[0]\n\n return C1 + C2\n\n activator_relevances = f(pw, nw, px, nx)\n inhibitor_relevances = f(nw, pw, px, nx)\n\n R = alpha * activator_relevances - beta * inhibitor_relevances\n\n return R\n\nclass Conv2d(nn.Conv2d, RelProp):\n def gradprop2(self, DY, weight):\n Z = self.forward(self.X)\n\n output_padding = self.X.size()[2] - (\n (Z.size()[2] - 1) * self.stride[0] - 2 * self.padding[0] + self.kernel_size[0])\n\n return F.conv_transpose2d(DY, weight, stride=self.stride, padding=self.padding, output_padding=output_padding)\n\n def relprop(self, R, alpha):\n if self.X.shape[1] == 3:\n pw = torch.clamp(self.weight, min=0)\n nw = torch.clamp(self.weight, max=0)\n X = self.X\n L = self.X * 0 + \\\n torch.min(torch.min(torch.min(self.X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3,\n keepdim=True)[0]\n H = self.X * 0 + \\\n torch.max(torch.max(torch.max(self.X, dim=1, keepdim=True)[0], dim=2, keepdim=True)[0], dim=3,\n keepdim=True)[0]\n Za = torch.conv2d(X, self.weight, bias=None, stride=self.stride, padding=self.padding) - \\\n torch.conv2d(L, pw, bias=None, stride=self.stride, padding=self.padding) - \\\n torch.conv2d(H, nw, bias=None, stride=self.stride, padding=self.padding) + 1e-9\n\n S = R / Za\n C = X * self.gradprop2(S, self.weight) - L * self.gradprop2(S, pw) - H * self.gradprop2(S, nw)\n R = C\n else:\n beta = alpha - 1\n pw = torch.clamp(self.weight, min=0)\n nw = torch.clamp(self.weight, max=0)\n px = torch.clamp(self.X, min=0)\n nx = torch.clamp(self.X, max=0)\n\n def f(w1, w2, x1, x2):\n Z1 = F.conv2d(x1, w1, bias=None, stride=self.stride, padding=self.padding)\n Z2 = F.conv2d(x2, w2, bias=None, stride=self.stride, padding=self.padding)\n S1 = safe_divide(R, Z1)\n S2 = safe_divide(R, Z2)\n C1 = x1 * self.gradprop(Z1, x1, S1)[0]\n C2 = x2 * self.gradprop(Z2, x2, S2)[0]\n return C1 + C2\n\n activator_relevances = f(pw, nw, px, nx)\n inhibitor_relevances = f(nw, pw, px, nx)\n\n R = alpha * activator_relevances - beta * inhibitor_relevances\n return R\n"
] | [
[
"torch.jit.script",
"torch.ones",
"torch.randint",
"torch.zeros",
"torch.equal",
"torch.no_grad",
"torch.rand"
],
[
"torch.div",
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"torch.multinomial",
"torch.tensor",
"torch.FloatTensor",
"torch.stack"
],
[
"torch.nn.ModuleList",
"torch.cat"
],
[
"torch.nn.functional.conv_transpose2d",
"torch.conv2d",
"torch.add",
"torch.max",
"torch.cat",
"torch.einsum",
"torch.nn.functional.linear",
"torch.nn.functional.conv2d",
"torch.eye",
"torch.is_tensor",
"torch.min",
"torch.matmul",
"torch.mul",
"torch.index_select",
"torch.clamp",
"torch.autograd.grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zachary2wave/UAV-aid-communication | [
"801fe22d839261af43127e31db00f166ed6484a0"
] | [
"gym/envs/wlan/SingleAP.py"
] | [
"import gym\nfrom gym import spaces\nimport numpy as np\nfrom gym.envs.wlan import env_simulated as env\nfrom gym.envs.wlan import thought_out as tho\nfrom gym.utils import seeding\n\nclass ApEnv(gym.Env):\n\n def __init__(self):\n self.Num_AP = 1\n self.Num_UE = 50\n self.channel = [1]\n self.oriTHO = np.zeros([1, self.Num_AP])\n loss_cal = env.Scenario(self.Num_AP, self.Num_UE, freq=2, avr_ap=1)\n self.contact, self.placeAP, self.placeUE, self.Loss = loss_cal.sendout()\n\n self.action_space = spaces.Box(low=-0.2, high=0.2, shape=(self.Num_AP,), dtype=np.float32)\n self.observation_space = spaces.Box(low=-0, high=1, shape=(self.Num_AP,), dtype=np.float32)\n\n self.state = self.Num_AP * [0.5]\n envir = tho.ThoughtOutCal(self.channel, self.state * 60, self.Num_AP, self.Num_UE)\n RSSI, Speed, self.connection = envir.subspeed(self.Loss)\n thought_out_ue, P = envir.thomain(Speed, self.connection)\n # 将UE的转化为AP的\n thought_out_AP = np.zeros([self.Num_AP])\n for kki in range(0, self.Num_AP):\n tempN = np.argwhere(self.connection == kki)\n for kkj in tempN:\n thought_out_AP[kki] += thought_out_ue[kkj]\n self.oriTHO[:] = thought_out_AP[:]\n\n def step(self, u):\n reward = np.zeros([1, self.Num_AP])\n s_ = np.zeros([self.Num_AP])\n for kk in range(0, self.Num_AP):\n if self.state[kk] + u[kk] < 0:\n s_[kk] = 0\n elif self.state[kk] + u[kk] > 1:\n s_[kk] = 1\n else:\n s_[kk] = self.state[kk] + u[kk]\n envir = tho.ThoughtOutCal(self.channel, s_*60, self.Num_AP, self.Num_UE)\n RSSI, Speed, connection = envir.subspeed(self.Loss)\n thought_out_ue, P = envir.thomain(Speed, connection)\n # 将UE的转化为AP的\n thought_out_AP = np.zeros([self.Num_AP])\n for kki in range(0, self.Num_AP):\n tempN = np.argwhere(connection == kki)\n for kkj in tempN:\n thought_out_AP[kki] += thought_out_ue[kkj]\n # 计算reward\n for kk in range(0, self.Num_AP):\n if self.state[kk]+u[kk] < 0:\n reward[kk] = -100\n elif self.state[kk]+u[kk] > 1:\n reward[kk] = -100\n else:\n tempppppp = thought_out_AP[kk]\n reward[kk] = tempppppp * 10\n # reward[kk] = (thought_out_AP[kk]-self.oriTHO[kk])*10\n self.oriTHO[:] = thought_out_AP[:]\n # print(s_.shape)\n return s_, np.sum(reward), False, {}\n\n def reset(self):\n self.state = np.array(self.Num_AP*[0.5])\n # print(self.state.shape)\n return self.state\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n def render(self, mode='human'):\n tho.showplot(self.placeAP, self.placeUE, self.state, self.channel, self.connection)\n return {}"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.argwhere"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mattjshannon/mattpy | [
"52278419fcf56a5a0c25efb1bbc3ffe0f037c1bb"
] | [
"pennyspec/scripts/helpers.py"
] | [
"#!/usr/bin/env python3\n\"\"\"\nhelpers.py\n\nHelpers functions for analyze_pahs.py\n\"\"\"\n\nimport errno\nimport os\nimport pickle\n\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom gaussfitter import onedgaussian, multigaussfit\nfrom scipy.integrate import simps\n\nfrom mattpy.utils import to_sigma, to_fwhm, quant_str\n\n\ndef ensure_exists(path):\n \"\"\"Ensure the path exists; if not, make the directory.\"\"\"\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n\ndef jy_to_si(flux_jy, wave):\n \"\"\"Returns a flux array (converted from Jy to W/m^2/micron).\"\"\"\n flux_si = flux_jy * 3e-12 / wave**2\n return flux_si\n\n\ndef smooth(x, window_len=50, window='hanning'):\n \"\"\"Returns a smoothed version of an array, from Stack Overflow.\"\"\"\n if x.ndim != 1:\n raise ValueError(\"smooth only accepts 1 dimension arrays.\")\n if x.size < window_len:\n raise ValueError(\"Input vector must be bigger than window size.\")\n if window_len < 3:\n return x\n\n acceptable_windows = ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']\n if window not in acceptable_windows:\n raise ValueError(\"Window must be in: \", str(acceptable_windows))\n\n s = np.r_[2 * x[0] - x[window_len - 1::-1], x,\n 2 * x[-1] - x[-1:-window_len:-1]]\n\n if window == 'flat':\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='same')\n\n return y[window_len:-window_len + 1]\n\n\ndef compute_feature_uncertainty(gposition, gsigma, wave_feat, rms):\n\n myrange = [gposition - (3. * gsigma), gposition + (3. * gsigma)]\n\n dl = wave_feat[1] - wave_feat[0]\n N = (myrange[1] - myrange[0]) / dl\n feature_uncertainty = (rms * np.sqrt(N) * dl * 2)\n\n return feature_uncertainty\n\n\ndef params_6gauss(basename, guess):\n\n p_non_aliphatics = {\n 'params':\n [\n guess / 200., 6.89, to_sigma(0.15),\n guess / 200., 7.25, to_sigma(0.12),\n guess / 2., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n 0., 6.82, to_sigma(0.06),\n 0, 7.15, to_sigma(0.05),\n 0, 7.45, to_sigma(0.315),\n 0, 7.77, to_sigma(0.275),\n 0, 8.15, to_sigma(0.165),\n 0, 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess / 100., 6.96, to_sigma(0.21),\n guess / 100, 7.35, to_sigma(0.15),\n guess, 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n p_non_aliphatics_xxoph = {\n 'params':\n [\n 0., 6.89, to_sigma(0.15),\n 0., 7.25, to_sigma(0.12),\n guess / 2., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n True, False, False,\n True, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n 0., 6.82, to_sigma(0.06),\n 0, 7.15, to_sigma(0.05),\n 0, 7.45, to_sigma(0.315),\n 0, 7.77, to_sigma(0.275),\n 0, 8.15, to_sigma(0.165),\n 0, 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess / 100., 6.96, to_sigma(0.21),\n guess / 100, 7.35, to_sigma(0.15),\n guess, 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n\n p0 = {\n 'params':\n [\n guess / 2., 6.89, to_sigma(0.15),\n guess / 4., 7.25, to_sigma(0.12),\n guess / 2., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n guess / 30., 6.82, to_sigma(0.06),\n 0, 7.15, to_sigma(0.05),\n 0, 7.45, to_sigma(0.315),\n 0, 7.77, to_sigma(0.275),\n 0, 8.15, to_sigma(0.165),\n 0, 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.21),\n guess, 7.35, to_sigma(0.15),\n guess, 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n p1 = {\n 'params':\n [\n guess / 2., 6.89, to_sigma(0.15),\n guess / 4., 7.25, to_sigma(0.12),\n guess / 2., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n guess / 30., 6.82, to_sigma(0.06),\n guess / 40., 7.15, to_sigma(0.05),\n guess / 30., 7.45, to_sigma(0.315),\n guess / 30., 7.77, to_sigma(0.275),\n 0, 8.15, to_sigma(0.165),\n guess / 30., 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.21),\n guess, 7.35, to_sigma(0.15),\n guess, 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n p2 = {\n 'params':\n [\n guess / 2., 6.89, to_sigma(0.15),\n 0., 7.25, to_sigma(0.12),\n guess / 2., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n guess / 30., 6.82, to_sigma(0.06),\n 0., 7.15, to_sigma(0.05),\n guess / 30., 7.45, to_sigma(0.315),\n guess / 30., 7.77, to_sigma(0.275),\n 0., 8.15, to_sigma(0.165),\n guess / 30., 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.21),\n guess, 7.35, to_sigma(0.15),\n guess, 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n p3 = {\n 'params':\n [\n guess / 2., 6.89, to_sigma(0.15),\n guess / 4., 7.25, to_sigma(0.12),\n guess / 2., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n guess / 30., 6.82, to_sigma(0.06),\n guess / 30., 7.15, to_sigma(0.05),\n guess / 30., 7.45, to_sigma(0.315),\n guess / 30., 7.77, to_sigma(0.275),\n guess / 30., 8.15, to_sigma(0.165),\n guess / 30., 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.21),\n guess, 7.35, to_sigma(0.15),\n guess, 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n p4 = {\n 'params':\n [\n guess / 2., 6.89, to_sigma(0.15),\n guess / 6., 7.25, to_sigma(0.12),\n guess / 2., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n guess / 30., 6.82, to_sigma(0.06),\n guess / 30., 7.15, to_sigma(0.05),\n guess / 30., 7.45, to_sigma(0.315),\n guess / 30., 7.77, to_sigma(0.275),\n 0., 8.15, to_sigma(0.165),\n guess / 30., 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.21),\n guess, 7.35, to_sigma(0.15),\n guess, 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n p5 = {\n 'params':\n [\n guess / 2., 6.89, to_sigma(0.15),\n 1.21852599e-15 * 0.1, 7.25, to_sigma(0.1),\n guess / 2., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n guess / 30., 6.82, to_sigma(0.06),\n 0., 7.15, to_sigma(0.05),\n guess / 30., 7.45, to_sigma(0.315),\n guess / 30., 7.77, to_sigma(0.275),\n 0., 8.15, to_sigma(0.165),\n guess / 30., 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.21),\n 1.21852599e-15 * 0.25, 7.35, to_sigma(0.13),\n guess, 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n p6 = {\n 'params':\n [\n guess / 2., 6.93, to_sigma(0.15),\n guess / 6., 7.25, to_sigma(0.12),\n guess / 2., 7.55, to_sigma(0.40),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n guess / 30., 6.91, to_sigma(0.06),\n 0., 7.15, to_sigma(0.05),\n guess / 30., 7.53, to_sigma(0.315),\n guess / 30., 7.77, to_sigma(0.275),\n 0., 8.15, to_sigma(0.165),\n guess / 30., 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.16),\n guess, 7.35, to_sigma(0.15),\n guess, 7.65, to_sigma(0.41),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n p7 = {\n 'params':\n [\n guess / 2., 6.89, to_sigma(0.15),\n guess / 6., 7.25, to_sigma(0.12),\n guess / 2., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n guess / 30., 6.82, to_sigma(0.06),\n guess / 30., 7.15, to_sigma(0.05),\n guess / 30., 7.45, to_sigma(0.315),\n guess / 30., 7.77, to_sigma(0.275),\n 0., 8.15, to_sigma(0.165),\n guess / 30., 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.21),\n guess, 7.35, to_sigma(0.15),\n guess, 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n p8 = {\n 'params':\n [\n guess / 2., 6.89, to_sigma(0.15),\n guess / 6., 7.25, to_sigma(0.12),\n guess / 2., 7.60, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n guess / 30., 6.82, to_sigma(0.06),\n guess / 30., 7.15, to_sigma(0.05),\n guess / 30., 7.45, to_sigma(0.315),\n guess / 30., 7.77, to_sigma(0.275),\n guess / 30., 8.15, to_sigma(0.165),\n guess / 30., 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.21),\n guess, 7.35, to_sigma(0.15),\n guess, 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n p9 = {\n 'minpars':\n [\n guess / 30., 6.82, to_sigma(0.06),\n 0., 7.15, to_sigma(0.05),\n guess / 30., 7.45, to_sigma(0.315),\n guess / 30., 7.77, to_sigma(0.275),\n 0., 8.15, to_sigma(0.165),\n guess / 30., 8.49, to_sigma(0.235),\n ],\n 'params':\n [\n guess / 2., 6.89, to_sigma(0.15),\n guess / 6., 7.25, to_sigma(0.12),\n guess / 2., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.21),\n guess, 7.35, to_sigma(0.15),\n guess, 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ]\n }\n\n p10 = {\n 'params':\n [\n guess / 2., 6.89, to_sigma(0.15),\n guess / 4., 7.25, to_sigma(0.12),\n guess / 2., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [False] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n guess / 30., 6.82, to_sigma(0.06),\n guess / 40., 7.22, to_sigma(0.07),\n guess / 30., 7.45, to_sigma(0.315),\n guess / 30., 7.77, to_sigma(0.275),\n 0, 8.15, to_sigma(0.165),\n guess / 30., 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.16),\n guess, 7.32, to_sigma(0.15),\n guess, 7.65, to_sigma(0.6),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n p11 = {\n 'params':\n [\n guess / 2., 6.89, to_sigma(0.15),\n guess / 4., 7.25, to_sigma(0.12),\n guess / 100., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n guess / 30., 6.82, to_sigma(0.06),\n 0, 7.15, to_sigma(0.05),\n 0, 7.45, to_sigma(0.315),\n 0, 7.77, to_sigma(0.275),\n 0, 8.15, to_sigma(0.165),\n 0, 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.21),\n guess, 7.35, to_sigma(0.15),\n guess / 98., 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n p12 = {\n 'params':\n [\n guess / 2., 6.89, to_sigma(0.15),\n guess / 4., 7.25, to_sigma(0.12),\n guess / 2., 7.55, to_sigma(0.44),\n guess / 1., 7.87, to_sigma(0.40),\n guess / 2., 8.25, to_sigma(0.29),\n guess / 2., 8.59, to_sigma(0.36),\n ],\n 'limitedmin': [True] * 18,\n 'limitedmax': [True] * 18,\n 'fixed':\n [\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n False, False, False,\n ],\n 'minpars':\n [\n guess / 30., 6.82, to_sigma(0.06),\n guess / 40., 7.15, to_sigma(0.05),\n guess / 30., 7.45, to_sigma(0.315),\n guess / 30., 7.77, to_sigma(0.275),\n 0, 8.15, to_sigma(0.165),\n guess / 30., 8.49, to_sigma(0.235),\n ],\n 'maxpars':\n [\n guess, 6.96, to_sigma(0.21),\n guess, 7.35, to_sigma(0.15),\n guess, 7.65, to_sigma(0.565),\n guess, 7.97, to_sigma(0.525),\n guess, 8.35, to_sigma(0.415),\n guess, 8.69, to_sigma(0.485),\n ]\n }\n\n param_dict = {\n 'hd97048_convCWsub': p0, # GOOD, wouldn't trust 72 tho\n 'hd135344_convCWsub': p11, # * NO ALIPHATICS TRUSTED!!! *\n 'IRAS05063_CWsub': p3, # GOOD\n 'IRAS05092_CWsub': p0, # GOOD\n 'IRAS05186_CWsub': p0, # GOOD\n 'IRAS05361_CWsub': p0, # GOOD. TRY LIMIT G7.6 FROM LEFT?\n 'IRAS05370_CWsub': p4, # GOOD, don't trust 7.2\n 'IRAS05413_CWsub': p2, # GOOD? ONLY TRUST 6.9, maybe 77 flux\n 'IRAS05588_CWsub': p0, # GOOD\n 'IRAS06111_CWsub': p0, # GOOD\n 'IRAS14429_CWsub': p0, # GOOD\n 'IRAS15482_CWsub': p5, # GOOD, don't trust 7.2 maybe (manual)\n 'iras17047_SWS_CWsub': p10, # GOOD, had to do ct myself\n 'IRASF05110-6616_LR_CWsub': p0, # GOOD\n 'IRASf05192_CWsub': p1, # GOOD, quesitonable 69/72. tho\n 'J004441_CWsub': p0, # GOOD\n 'J010546_CWsub': p6, # GOOD, not perfect but good enough?\n 'j050713_CWsub': p7, # GOOD\n 'J052043_CWsub': p8, # GOOD, had to drop errors?\n 'J052520_CWsub': p1, # GOOD\n 'NGC1978WBT2665_CWsub': p1, # GOOD\n 'SMPLMC076_CWsub': p12, # new\n 'SMPSMC006_CWsub': p9, # GOOD, dropping fluxerr in fit (!!)\n 'SMPSMC011_CWsub': p1, # GOOD\n\n 'xxOph_SWS_CWsub': p_non_aliphatics_xxoph,\n }\n\n\n if basename in param_dict:\n print('In standard PARAM DICT.')\n parameters = param_dict[basename]\n else:\n parameters = p_non_aliphatics\n\n return parameters\n\n # TO DO: UNCERTAINTIES!!!\n\n # pos, flux, sigma = line69_params[basename]\n # amp = flux / (np.sqrt(2) * np.abs(sigma) * np.sqrt(np.pi))\n\n # max_flux72 = 0.35 * flux\n # amp_72_approx = max_flux72 / (np.sqrt(2) * np.abs(sigma)* np.sqrt(np.pi))\n\n # p0['params'][0] = amp\n # p0['params'][1] = pos\n # p0['params'][2] = sigma\n\n # p0['fixed'][0] = True\n # p0['fixed'][1] = True\n # p0['fixed'][2] = True\n\n # p0['maxpars'][3] = amp_72_approx\n # p0['params'][3] = amp_72_approx * 0.5\n\n\n\n\ndef measure_112_RMS(wave, csub):\n xmin = 11.9\n xmax = 12.1\n\n myrange = np.where((wave >= xmin) & (wave <= xmax))\n csub_mr = csub[myrange]\n rms = np.sqrt(np.mean(csub_mr**2))\n\n return rms # , xmin, xmax\n\n\ndef fit_aliphatics(basename, wave, flux, fluxerr, rms, output_dir):\n\n def fit_straight_line(wave, flux, fluxerr):\n\n # Section 1: Fit Aliphatics\n # Define wavelength range of relevance\n lim = np.where((wave > 6.6) & (wave < 7.65))\n waveLim = wave[lim]\n fluxLim = flux[lim]\n errLim = fluxerr[lim]\n\n # Region where 7.2 is found\n lim2 = np.where((wave > 6.7) & (wave < 7.6))\n waveLim2 = wave[lim2]\n fluxLim2 = flux[lim2]\n # errLim2 = fluxerr[lim2]\n\n # Draw a straight line under 7.2 feature\n # Comment section out if no 7.2\n\n winDX = np.where((waveLim2 >= 7.) & (waveLim2 <= 7.2)\n ) # Find the first ancor point\n winWave = waveLim2[winDX]\n winFlux = fluxLim2[winDX]\n anchor1Wave = winWave[np.nanargmin(winFlux)]\n anchor1Flux = np.nanmin(winFlux)\n\n winDX = np.where((waveLim2 >= 7.5)) # Find the second anchor point\n winWave = waveLim2[winDX]\n winFlux = fluxLim2[winDX]\n anchor2Wave = winWave[np.nanargmin(winFlux)]\n anchor2Flux = np.nanmin(winFlux)\n\n # Define the straight line from the anchor points\n x = np.array([anchor1Wave, anchor2Wave])\n y = np.array([anchor1Flux, anchor2Flux])\n StrLine = np.polyfit(x, y, deg=1) # Fit straight line\n StrLineFit = StrLine[0] * waveLim2 + StrLine[1]\n\n # Plot straight line to check\n fig1, ax = plt.subplots()\n ax.plot(waveLim, fluxLim, '-r', lw=2) # Make figure\n ax.errorbar(\n waveLim,\n fluxLim,\n errLim,\n color='r',\n ecolor='0.45',\n lw=2,\n elinewidth=1)\n ax.plot(waveLim2, StrLineFit, 'g-', label='7.2 Cont', lw=2)\n ax.plot(x, y, 'bo')\n\n ax.legend(loc=0, fontsize='small')\n ax.set_xlabel('Wavelength (microns)')\n ax.set_ylabel('Flux (W/m^2)')\n # ax.set_title(fnameStr + ' -- Line fit')\n ax.grid()\n ax.minorticks_on()\n\n ensure_exists(output_dir)\n pdf_filename = output_dir + basename + '_aliphatic_fit_1.pdf'\n print('Saved: ', pdf_filename)\n fig1.savefig(pdf_filename, format='pdf', bbox_inches='tight')\n\n plt.close()\n fig1.clear()\n\n if StrLine[0] > 0: # Subtract straight line from data\n\n lim69 = np.where(waveLim < x[0]) # Create limits of subtraction\n lim72 = np.where((waveLim >= x[0]) & (waveLim <= x[1]))\n limOver = np.where(waveLim > x[1])\n\n flux69 = fluxLim[lim69]\n # Subtraction\n flux72 = fluxLim[lim72] - \\\n (StrLine[0] * waveLim[lim72] + StrLine[1])\n fluxOver = fluxLim[limOver]\n\n fluxFull = []\n fluxFull = np.append(flux69, flux72)\n fluxFull = np.append(fluxFull, fluxOver) # Create array\n\n else:\n fluxFull = fluxLim\n\n return waveLim, fluxFull, errLim\n\n def fit_gaussians(waveLim, fluxFull, errLim, fnameStr='temp_label'):\n\n # End of commented section if no 7.2\n # fluxFull = fluxLim # Comment if spectrum has 7.2 feature\n\n # Fit Gaussian functions to peaks\n # Change ngauss and parameters as needed\n\n # fitAli = multigaussfit(\n # waveLim, fluxFull, ngauss=2, err=errLim,\n # params=[0.12e-15,6.85,0.1,0.5e-16,7.23,0.05],\n # limitedmin=[True,True,True,True,True,True],\n # limitedmax=[True,True,True,True,True,True],\n # minpars=[0.01e-18,6.8,0.03,0,7,0],\n # maxpars=[1.5e-14,6.86,0.2,0.5e-15,7.3,0.06]\n # )\n\n fitAli = multigaussfit(\n waveLim, fluxFull, ngauss=2, err=errLim, params=[\n 0.2e-14, 6.9, 0.1, 0.17e-14, 7.23, 0.05], limitedmin=[\n True, True, True, True, True, True], minpars=[\n 0, 6.8, 0.04, 0, 7, 0], limitedmax=[\n True, True, True, True, True, True], maxpars=[\n 0.5e-13, 7, 0.2, 0.1e-13, 7.25, 0.2])\n\n # Plot fit\n fig2, ax = plt.subplots()\n\n ax.plot(waveLim, fluxFull, '-r', label=fnameStr, lw=2)\n ax.errorbar(\n waveLim,\n fluxFull,\n errLim,\n color='r',\n ecolor='0.45',\n lw=2,\n elinewidth=1)\n ax.plot(waveLim, fitAli[1], '-g', label='Spectral Fit', lw=1.5)\n ax.fill_between(waveLim, fitAli[1], facecolor='green', alpha=0.15)\n ax.axhline(y=0, color='k', ls='-', zorder=-10, lw=2)\n\n ax.legend(loc=0, fontsize='small')\n ax.set_xlabel('Wavelength (microns)')\n ax.set_ylabel('Flux (W/m^2)')\n ax.set_title(fnameStr + ' -- Gaussian Fit')\n ax.grid()\n ax.minorticks_on()\n\n ensure_exists(output_dir)\n pdf_filename = output_dir + basename + '_aliphatic_fit_2.pdf'\n print('Saved: ', pdf_filename)\n fig2.savefig(pdf_filename, format='pdf', bbox_inches='tight')\n\n plt.close()\n fig2.clear()\n\n # print('Fit parameters for aliphatic features:')\n # print(fitAli[0])\n\n return fitAli\n\n def compute_aliphatic_fluxes(fitAli, waveLim, rms):\n\n # Calculate integrated flux of aliphatic features from fit\n Gauss69 = fitAli[0][0] * \\\n np.exp(-(waveLim - fitAli[0][1])**2 / (2 * fitAli[0][2]**2))\n area69 = simps(Gauss69, waveLim)\n\n Gauss72 = fitAli[0][3] * \\\n np.exp(-(waveLim - fitAli[0][4])**2 / (2 * fitAli[0][5]**2))\n area72 = simps(Gauss72, waveLim)\n\n err69 = compute_feature_uncertainty(\n fitAli[0][1], fitAli[0][2], waveLim, rms)\n err72 = compute_feature_uncertainty(\n fitAli[0][4], fitAli[0][5], waveLim, rms)\n\n # fitErr69 = np.sqrt((fitAli[2][0]/fitAli[0][0])**2 + \\\n # (fitAli[2][2]/fitAli[0][2])**2) * area69\n # fitErr72 = np.sqrt((fitAli[2][3]/fitAli[0][3])**2 + \\\n # (fitAli[2][5]/fitAli[0][5])**2) * area72\n\n errTot69 = err69 # + fitErr69\n errTot72 = err72 # + fitErr72\n\n # print('Integrated fluxes of aliphatics:')\n # print(area69, area72)\n\n SNR69 = area69 / errTot69\n SNR72 = area72 / errTot72\n\n # print('S/N of aliphatics: ', SNR69, SNR72)\n\n return area69, err69, SNR69, area72, err72, SNR72\n\n waveLim, fluxFull, errLim = fit_straight_line(wave, flux, fluxerr)\n\n fitAli = fit_gaussians(waveLim, fluxFull, errLim, fnameStr='temp_label')\n\n area69, err69, SNR69, area72, err72, SNR72 = \\\n compute_aliphatic_fluxes(fitAli, waveLim, rms)\n\n return fitAli, waveLim, area69, SNR69, area72, SNR72\n\n\ndef fit_aromatics(basename, wave, flux, fluxerr, rms, output_dir):\n\n def fit_straight_line(wave, flux, fluxerr, fnameStr='temp_label'):\n\n # Section 2: Fit 7.7\n # Limits of feature - change as needed\n lim77 = np.where((wave >= 6.9) & (wave <= 9))\n waveLim77 = wave[lim77]\n fluxLim77 = flux[lim77]\n errLim77 = fluxerr[lim77]\n\n # Limit for 7.2 feature - change as needed\n lim2_a = np.where((wave > 6.9) & (wave < 7.45))\n waveLim2_a = wave[lim2_a]\n fluxLim2_a = flux[lim2_a]\n # errLim2_a = fluxerr[lim2_a]\n\n # Draw a straight line under 7.2 feature\n # Comment section out if no 7.2\n\n winDX77 = np.where((waveLim2_a >= 7.) & (waveLim2_a <= 7.2))\n winWave77 = waveLim2_a[winDX77]\n winFlux77 = fluxLim2_a[winDX77]\n anchor1Wave77 = winWave77[np.nanargmin(winFlux77)]\n anchor1Flux77 = np.nanmin(winFlux77)\n\n winDX77 = np.where((waveLim2_a >= 7.2))\n winWave77 = waveLim2_a[winDX77]\n winFlux77 = fluxLim2_a[winDX77]\n anchor2Wave77 = winWave77[np.nanargmin(winFlux77)]\n anchor2Flux77 = np.nanmin(winFlux77)\n\n # Define the straight line from the anchor points\n x77 = np.array([anchor1Wave77, anchor2Wave77])\n y77 = np.array([anchor1Flux77, anchor2Flux77])\n StrLine77 = np.polyfit(x77, y77, deg=1) # Fit straight line\n StrLineFit77 = StrLine77[0] * waveLim2_a + StrLine77[1]\n\n # Comment out section if no 7.2\n fig3, ax = plt.subplots() # Define figure\n ax.plot(waveLim77, fluxLim77, '-r', label=fnameStr, lw=2)\n ax.errorbar(\n waveLim77,\n fluxLim77,\n errLim77,\n color='r',\n ecolor='0.45',\n lw=2,\n elinewidth=1)\n # Plot straight line - comment out if no 7.2 feature\n ax.plot(waveLim2_a, StrLineFit77, 'g-', lw=2)\n\n ax.axhline(y=0, color='k', ls='-', zorder=-10, lw=2)\n ax.legend(loc=0, fontsize='small')\n ax.grid()\n ax.minorticks_on()\n\n ax.set_title(fnameStr + ' 7.7 complex')\n ax.set_xlabel('Wavelength (microns)')\n ax.set_ylabel('Flux (W/m^2)')\n\n ensure_exists(output_dir)\n pdf_filename = output_dir + basename + '_aromatic_fit_1.pdf'\n print('Saved: ', pdf_filename)\n fig3.savefig(pdf_filename, format='pdf', bbox_inches='tight')\n\n plt.close()\n fig3.clear()\n # End comments if no 7.2\n\n if StrLine77[0] > 0: # Use if spectrum has 7.2\n # Create limits of subtraction\n lim69_a = np.where(waveLim77 < x77[0])\n lim72_a = np.where((waveLim77 >= x77[0]) & (waveLim77 <= x77[1]))\n limOver_a = np.where(waveLim77 > x77[1])\n\n flux69_a = fluxLim77[lim69_a]\n flux72_a = (\n StrLine77[0] *\n waveLim77[lim72_a] +\n StrLine77[1]) # Straight Line\n fluxOver_a = fluxLim77[limOver_a]\n\n fluxFull77 = []\n fluxFull77 = np.append(flux69_a, flux72_a)\n # Create array from wavelength subtraction\n fluxFull77 = np.append(fluxFull77, fluxOver_a)\n\n else:\n fluxFull77 = fluxLim77 # Use if spectrum has no 7.2\n\n return waveLim77, fluxLim77, errLim77, fluxFull77\n\n def fit_gaussians(waveLim77, fluxLim77, errLim77, fluxFull77,\n fnameStr='temp_label'):\n\n # End commented section\n # fluxFull77 = fluxLim77 # Comment if spectrum has a 7.2 feature\n\n # Define feature:\n feature = np.where(\n (waveLim77 > 7.1) & (\n waveLim77 < 8.9)) # Change as needed\n\n # # Fit Gaussian\n # fit77 = multigaussfit(\n # waveLim77[feature], fluxFull77[feature], ngauss=4,\n # err=errLim77[feature],\n # params=[5e-15,7.5,0.07,5.8e-15,7.75,0.06,\n # 1.2e-15,8.1,0.07,1.8e-15,8.65,0.07],\n # limitedmin=[True,True,True,True,True,True,\n # True,True,True,True,True,True],\n # limitedmax=[False,True,False,False,True,False,\n # False,True,False,True,True,False],\n # minpars=[0,7.4,0.05,0,7.7,0.2e-16,8.1,\n # 0.01,0.7e-16,8.4,0.01],\n # maxpars=[3e-14,7.7,0.1,3e-14,8.1,0.2,\n # 3e-14,8.4,0.2,3e-14,8.7,0.2]\n # )\n\n fit77 = multigaussfit(\n waveLim77[feature],\n fluxFull77[feature],\n ngauss=4,\n err=errLim77[feature],\n params=[\n 1.25e-14,\n 7.68,\n 0.1,\n 0.2e-14,\n 7.95,\n 0.06,\n 3e-15,\n 8.227557,\n 0.15,\n 0.3e-14,\n 8.609484,\n 0.08],\n limitedmin=[\n True,\n True,\n True,\n True,\n True,\n True,\n True,\n True,\n True,\n True,\n True,\n True],\n minpars=[\n 0.2e-18,\n 7.5,\n 0.05,\n 0.2e-18,\n 7.75,\n 0.01,\n 0.2e-18,\n 8.1,\n 0.03,\n 0.3e-18,\n 8.5,\n 0],\n limitedmax=[\n True,\n True,\n True,\n True,\n True,\n True,\n False,\n True,\n True,\n True,\n True,\n False],\n maxpars=[\n 2.3e-12,\n 7.9,\n 0.25,\n 1.5e-12,\n 8.2,\n 0.25,\n 8e-12,\n 8.5,\n 0.2,\n 1e-11,\n 8.7,\n 0.12])\n\n # print('Fit parameters of the 7.7 micron complex:')\n # print(fit77[0])\n waveArr = np.arange(waveLim77[feature][0],\n waveLim77[feature][-1], 0.0001)\n # Seperate Gaussian functions\n Gauss76 = fit77[0][0] * \\\n np.exp(-(waveArr - fit77[0][1])**2 / (2 * fit77[0][2]**2))\n Gauss79 = fit77[0][3] * \\\n np.exp(-(waveArr - fit77[0][4])**2 / (2 * fit77[0][5]**2))\n Gauss82 = fit77[0][6] * \\\n np.exp(-(waveArr - fit77[0][7])**2 / (2 * fit77[0][8]**2))\n Gauss86 = fit77[0][9] * \\\n np.exp(-(waveArr - fit77[0][10])**2 / (2 * fit77[0][11]**2))\n\n # err76 = compute_feature_uncertainty(\n # fit77[0][1], fit77[0][2], waveLim77[feature], rms\n # )\n # err79 = compute_feature_uncertainty(\n # fit77[0][4], fit77[0][5], waveLim77[feature], rms\n # )\n # err82 = compute_feature_uncertainty(\n # fit77[0][7], fit77[0][8], waveLim77[feature], rms\n # )\n\n fluxFeat = Gauss76 + Gauss79 + Gauss82\n waveFeat = waveArr\n area77 = simps(fluxFeat, waveFeat)\n\n # fitErr77 = area77 * np.sqrt(\n # (fit77[2][0]/fit77[0][0])**2 + (fit77[2][2]/fit77[0][2])**2 +\n # (fit77[2][3]/fit77[0][3])**2 + (fit77[2][5]/fit77[0][5])**2 +\n # (fit77[2][6]/fit77[0][6])**2 + (fit77[2][8]/fit77[0][8])**2\n # )\n # errTot77 = fitErr77\n\n # SNR77 = area77/errTot77\n\n # wave0 = 7.9 # Initial guess for central wavelength\n # errPercent = 1\n # count = 0\n '''\n # Define function to calculate difference in blue and red flux\n def areaEq(wave0):\n blue = np.where(waveFeat<=wave0)\n # Integration limits to find central wavelengths\n red = np.where(waveFeat>wave0)\n print '**************'\n area77b = simps(fluxFeat[blue], waveFeat[blue])\n area77r = simps(fluxFeat[red], waveFeat[red])\n return area77b - area77r\n\n lambdaC = fsolve(areaEq, wave0, xtol=1.5E-1)\n # Optimise difference in blue and red flux to find central wavelength\n print 'Central wavelength of 7.7 complex'\n print lambdaC\n\n while errPercent >= 0.01:\n count = count + 1\n #print count\n blue = np.where(waveFeat<=wave0)\n # Integration limits to find central wavelengths\n\n red = np.where(waveFeat>wave0)\n area77b = simps(fluxFeat[blue], waveFeat[blue])\n area77r = simps(fluxFeat[red], waveFeat[red])\n errPercent = np.absolute((area77b-area77r)/((area77b+area77r)/2))\n if area77b > area77r:\n wave0 = wave0 - 0.001\n elif area77r > area77b:\n wave0 = wave0 + 0.001\n else:\n continue\n #print area77b, area77r, wave0\n if count > 1000:\n break\n\n print 'error: ', errPercent\n print 'count: ', count\n lambdaC = wave0\n print 'Central wavelength of 7.7 complex: ', lambdaC\n\n blue1 = np.where(waveFeat<=lambdaC)\n red1 = np.where(waveFeat>lambdaC)\n area77blue = simps(fluxFeat[blue1], waveFeat[blue1])\n area77red = simps(fluxFeat[red1], waveFeat[red1])\n\n print 'Total integrated flux of 7.7 complex: ', area77\n #print 'SNR77: ', SNR77\n print 'Blue flux: ', area77blue\n print 'Red flux: ', area77red\n '''\n # Plot Gaussian fit\n fig4, ax = plt.subplots() # Define figure\n\n ax.plot(waveLim77, fluxLim77, '-r', label=fnameStr, lw=2)\n ax.errorbar(\n waveLim77,\n fluxLim77,\n errLim77,\n color='r',\n ecolor='0.45',\n lw=2,\n elinewidth=1)\n ax.plot(waveLim77[feature], fit77[1], '-g', label='Spectral fit', lw=2)\n # ax.plot(waveLim2_a, StrLineFit77, 'b-', lw=2)\n # Plot straight line - comment out if no 7.2 feature\n\n # ax.plot(x77, y77, 'bo')\n # Straight line anchor points - comments out\n # if no 7.2 feature\n\n # Overplot individual Gaussian functions\n ax.plot(waveArr, Gauss76, '-g', lw=2)\n ax.fill_between(waveArr, Gauss76, facecolor='green', alpha=0.15)\n ax.plot(waveArr, Gauss79, '-g', lw=2)\n ax.fill_between(waveArr, Gauss79, facecolor='green', alpha=0.15)\n ax.plot(waveArr, Gauss82, '-g', lw=2)\n ax.fill_between(waveArr, Gauss82, facecolor='green', alpha=0.15)\n ax.plot(waveArr, Gauss86, '-g', lw=2)\n ax.fill_between(waveArr, Gauss86, facecolor='green', alpha=0.15)\n\n # ax.axvline(x=lambdaC, color='black', ls='-', lw=2)\n ax.axhline(y=0, color='b', ls='-', zorder=-10, lw=2)\n ax.legend(loc=0, fontsize='small')\n ax.grid()\n ax.minorticks_on()\n\n ax.set_title(fnameStr + ' 7.7 complex - fit')\n ax.set_xlabel(r'Wavelength ($\\mu m$)')\n ax.set_ylabel('Flux ($W$/$m^2$)')\n axes = plt.gca()\n axes.set_xlim([7.1, 9])\n # axes.set_ylim([0,2e-15])\n\n ensure_exists(output_dir)\n pdf_filename = output_dir + basename + '_aromatic_fit_2.pdf'\n print('Saved: ', pdf_filename)\n fig4.savefig(pdf_filename, format='pdf', bbox_inches='tight')\n\n plt.close()\n fig4.clear()\n\n return fit77, area77, feature\n\n waveLim77, fluxLim77, errLim77, fluxFull77 = \\\n fit_straight_line(wave, flux, fluxerr)\n\n fit77, area77, feature = fit_gaussians(\n waveLim77, fluxLim77, errLim77, fluxFull77)\n\n return waveLim77, fit77, area77, feature\n\n\ndef fit_all(basename, wave, flux, fluxerr, rms, output_dir):\n \"\"\"Fit Gaussians and straight line at the same time or something. Or maybe\n no straight line.\"\"\"\n def param_constraints_OK(p0, line, index):\n # Test if any parameter hitting min/max of constrained range.\n\n def nums_equal(num1, num2, acc=0.01):\n \"\"\"Returns True if numbers are equal within some accuracy.\"\"\"\n if np.abs(num1 - num2) < acc:\n return False\n else:\n return True\n\n # Line position.\n pindex = index * 3 + 1\n fixed_position = p0['fixed'][pindex]\n\n if not fixed_position:\n limited_min = p0['limitedmin'][pindex]\n limited_max = p0['limitedmax'][pindex]\n if limited_min:\n if not nums_equal(p0['minpars'][pindex], line['position']):\n print('Hitting minimum line position.')\n return False\n if limited_max:\n if not nums_equal(p0['maxpars'][pindex], line['position']):\n print('Hitting maximum line position.')\n return False\n\n # Line sigma.\n pindex = index * 3 + 2\n fixed_sigma = p0['fixed'][pindex]\n\n if not fixed_sigma:\n limited_min = p0['limitedmin'][pindex]\n limited_max = p0['limitedmax'][pindex]\n if limited_min:\n if not nums_equal(p0['minpars'][pindex], line['sigma']):\n print('Hitting minimum line sigma.')\n return False\n if limited_max:\n if not nums_equal(p0['maxpars'][pindex], line['sigma']):\n print('Hitting maximum line sigma.')\n return False\n\n return True\n\n def fit_4gauss_2lines(wave, flux, fluxerr, trim, trim_wide):\n\n # Multigauss fit. Intensity, center, sigma (or FWHM?).\n yscale = flux[trim]\n guess = np.nanmax(yscale)\n yfit = multigaussfit(\n wave[trim], flux[trim], ngauss=4, err=fluxerr[trim],\n params=[\n # 0.2e-14, 6.90, 0.10,\n # 0.2e-14, 7.23, 0.05,\n guess / 2., 7.68, 0.10,\n guess, 7.95, 0.06,\n guess / 2., 8.23, 0.15,\n guess / 2., 8.61, 0.08\n ],\n limitedmin=[True] * 12,\n limitedmax=[True] * 12,\n minpars=[\n # 0, 6.8, 0.04,\n # 0, 7, 0,\n 0, 7.5, 0.05,\n 0, 7.75, 0.01,\n 0, 8.1, 0.03,\n 0, 8.5, 0\n ],\n maxpars=[\n # 0.5e-13, 7, 0.2,\n # 0.1e-13, 7.25, 0.2,\n guess, 7.9, 0.25,\n guess, 8.2, 0.25,\n guess, 8.5, 0.2,\n guess, 8.7, 0.12\n ])\n\n g76 = onedgaussian(wave, 0, yfit[0][0], yfit[0][1], yfit[0][2])\n g78 = onedgaussian(wave, 0, yfit[0][3], yfit[0][4], yfit[0][5])\n g82 = onedgaussian(wave, 0, yfit[0][6], yfit[0][7], yfit[0][8])\n g86 = onedgaussian(wave, 0, yfit[0][9], yfit[0][10], yfit[0][11])\n model = g76 + g78 + g82 + g86\n\n # Multigauss fit. Intensity, center, sigma (or FWHM?).\n resid = flux - model\n yfit2 = multigaussfit(\n wave, resid, ngauss=2,\n params=[\n np.nanmax(resid) / 2., 6.88, 0.10,\n np.nanmax(resid) / 2., 7.23, 0.05,\n ],\n limitedmin=[True] * 6,\n limitedmax=[True] * 6,\n minpars=[\n 0, 6.8, 0.04,\n 0, 7, 0,\n ],\n maxpars=[\n np.nanmax(resid), 7, 0.2,\n np.nanmax(resid), 7.30, 0.2,\n ])\n\n line69 = onedgaussian(wave, 0, yfit2[0][0], yfit2[0][1], yfit2[0][2])\n line72 = onedgaussian(wave, 0, yfit2[0][3], yfit2[0][4], yfit2[0][5])\n\n wpeak = {\n '69': yfit2[0][1],\n '72': yfit2[0][4],\n }\n\n return g76, g78, g82, g86, line69, line72, model, yfit, yfit2, wpeak\n\n def fit_6gauss(wave, flux, fluxerr, trim, basename):\n\n # Initial parameters and constraints.\n yscale = flux[trim]\n guess = np.nanmax(yscale)\n p_init = params_6gauss(basename, guess)\n\n # If fluxerr[trim] has zeroes, don't use errors for now?\n if 0 in fluxerr[trim]:\n errpass = None\n else:\n errpass = fluxerr[trim]\n\n if basename in ['J052043_CWsub', 'SMPSMC006_CWsub']:\n errpass = None\n\n # Multigauss fit. Intensity, center, sigma (or FWHM?).\n yfit = multigaussfit(\n wave[trim], flux[trim], ngauss=6, err=errpass,\n params=p_init['params'],\n limitedmin=p_init['limitedmin'],\n limitedmax=p_init['limitedmax'],\n fixed=p_init['fixed'],\n minpars=p_init['minpars'],\n maxpars=p_init['maxpars']\n )\n\n # Save results.\n features = ('line69', 'line72', 'g76', 'g78', 'g82', 'g86')\n keys = ('scale_factor', 'position', 'sigma')\n keys_err = ('scale_factor_err', 'position_err', 'sigma_err')\n results = {}\n\n for i in range(6):\n fit_params = (yfit[0][3*i: 3*i+3])\n fit_params_err = (yfit[2][3*i: 3*i+3])\n\n integrated_fluxerr = compute_feature_uncertainty(\n fit_params[1], fit_params[2], wave[trim], rms\n )\n results[features[i]] = dict(zip(keys, fit_params))\n results[features[i]].update(dict(zip(keys_err, fit_params_err)))\n results[features[i]]['wave'] = wave\n results[features[i]]['spectrum'] = onedgaussian(\n wave, 0, *fit_params)\n results[features[i]]['integrated_flux'] = simps(\n results[features[i]]['spectrum'], results[features[i]]['wave'])\n results[features[i]]['integrated_fluxerr'] = integrated_fluxerr\n\n return yfit, results, p_init\n\n print(basename)\n\n fit4 = False\n\n if fit4:\n\n # Only fit 7-9 micron zone.\n trim = np.where((wave > 7.3) & (wave < 9.2))\n trim_wide = np.where((wave >= 6.0) & (wave <= 10))\n\n # Return fit.\n g76, g78, g82, g86, line69, line72, model, yfit, yfit2, wpeak = \\\n fit_4gauss_2lines(wave, flux, fluxerr, trim, trim_wide)\n\n flux69 = simps(line69, wave)\n flux72 = simps(line72, wave)\n\n # Plot results.\n fig = plt.figure()\n gs = gridspec.GridSpec(ncols=1, nrows=2, figure=fig, hspace=0.3)\n ax1 = plt.subplot(gs[0])\n ax2 = plt.subplot(gs[1])\n\n # Upper panel.\n gsum = g76 + g78 + g82\n ax1.plot(wave[trim_wide], flux[trim_wide], label='Data')\n ax1.plot(wave[trim_wide], model[trim_wide],\n label='Model (Flux: {:.2e} W/m^2)'.format(simps(gsum, wave)))\n for index, item in enumerate((g76, g78, g82, g86)):\n ax1.fill_between(\n wave[trim_wide],\n wave[trim_wide] * 0,\n item[trim_wide],\n lw=0.5,\n alpha=0.3)\n ax1.axhline(y=0, ls='--', lw=0.5, color='k')\n ax1.legend(loc=0)\n xmin, xmax = ax1.get_xlim()\n\n # Lower panel.\n trim_wide2 = np.where((wave >= 6.5) & (wave <= 10))\n ax2.plot(\n wave[trim_wide2],\n flux[trim_wide2] -\n model[trim_wide2],\n label='Residual from 4gauss')\n label69 = \\\n '6.9 ({:.2f} µm, Flux: {:.2e} W/m^2)'.format(wpeak['69'], flux69)\n label72 = \\\n '7.2 ({:.2f} µm, Flux: {:.2e} W/m^2)'.format(wpeak['72'], flux72)\n ax2.fill_between(wave, wave * 0, line69, alpha=0.3, label=label69)\n ax2.fill_between(wave, wave * 0, line72, alpha=0.3, label=label72)\n ax2.axhline(y=0, ls='--', lw=0.5, color='k')\n ax2.legend(loc=0)\n ax2.set_xlim(xmin, xmax)\n\n # Save.\n savename = output_dir + 'fullspec/' + basename + '_test.pdf'\n fig.savefig(savename, bbox_inches='tight')\n print('Saved: ', savename)\n plt.close()\n fig.clear()\n\n else:\n\n # Only fit 7-9 micron zone.\n trim = np.where((wave > 6.0) & (wave < 10))\n\n # Try 6-components.\n yfit, results, p0 = fit_6gauss(wave, flux, fluxerr, trim, basename)\n\n # # Try 6, with LMFIT!\n # yfit2, results2, p02 = fit_6gauss_lmfit(wave, flux, fluxerr, trim)\n # st()\n\n # Plot results.\n fig = plt.figure(figsize=(8, 6))\n gs = gridspec.GridSpec(ncols=1, nrows=2, figure=fig, hspace=0.3)\n ax1 = plt.subplot(gs[0])\n ax2 = plt.subplot(gs[1])\n\n ##############################\n # Upper panel.\n ##############################\n\n flux77 = sum([results[x]['integrated_flux']\n for x in ('g76', 'g78', 'g82')])\n flux77_err = sum([results[x]['integrated_fluxerr']\n for x in ('g76', 'g78', 'g82')])\n spec77 = results['g76']['spectrum'] + results['g78']['spectrum'] + \\\n results['g82']['spectrum']\n\n centroid77 = np.sum(spec77 * wave) / np.sum(spec77)\n model_label = \\\n r'Model (g1-3: {:.2f} µm, {:.2e} +- ' \\\n '{:.2e} W/m$^2$)'.format(centroid77, flux77, flux77_err)\n ax1.errorbar(wave[trim], flux[trim], yerr=fluxerr[trim], label='Data')\n # ax1.plot(wave[trim], flux[trim], label='Data')\n\n ax1.plot(wave[trim], yfit[1], label=model_label, zorder=1000)\n for index, key in enumerate(results):\n ax1.fill_between(wave[trim], wave[trim] * 0,\n results[key]['spectrum'][trim],\n lw=0.5, alpha=0.3)\n ax1.axvline(x=centroid77, color='k', ls='-', lw=0.5)\n ax1.axhline(y=0, ls='--', lw=0.5, color='k')\n ax1.axvline(x=6.9, color='k', ls='-', lw=0.5)\n ax1.axvline(x=7.25, color='k', ls='-', lw=0.5)\n ax1.legend(loc=0, fontsize=8)\n xmin, xmax = ax1.get_xlim()\n\n ##############################\n # Lower panel.\n ##############################\n\n f72_69 = results['line72']['integrated_flux'] / \\\n results['line69']['integrated_flux']\n\n label = 'Residuals (7.2/6.9 = {}' \\\n ')'.format(quant_str(f72_69, precision=\"0.01\"))\n ax2.plot(wave[trim], flux[trim] - yfit[1],\n label=label)\n ax2.axvline(x=6.9, color='k', ls='-', lw=0.5)\n ax2.axvline(x=7.25, color='k', ls='-', lw=0.5)\n\n param_OK_list = [True]\n for index, key in enumerate(results):\n line = results[key]\n label = '{:.2f} µm, {:.2e} +- {:.2e} W/m^2, FWHM={:.2f} µm'.format(\n line['position'], line['integrated_flux'],\n line['integrated_fluxerr'],\n to_fwhm(line['sigma'])\n )\n param_OK_list.append(param_constraints_OK(p0, line, index))\n ax2.fill_between(wave[trim], wave[trim] * 0,\n results[key]['spectrum'][trim],\n lw=0.5, alpha=0.3, label=label)\n ax2.axhline(y=0, ls='--', lw=0.5, color='k')\n mylegend = ax2.legend(loc=0, fontsize=8)\n\n for index, text in enumerate(mylegend.get_texts()):\n if not param_OK_list[index]:\n text.set_color(\"red\")\n\n # Save.\n savename = output_dir + 'fullspec/' + basename + '_6gauss.pdf'\n fig.savefig(savename, bbox_inches='tight')\n print('Saved: ', savename)\n plt.close()\n fig.clear()\n\n # Insert the 7.7 results.\n results['pah77'] = {\n 'flux': flux77,\n 'fluxerr': flux77_err,\n 'centroid': centroid77,\n }\n\n pkl_name = output_dir + 'numeric/' + basename + '.pkl'\n # Record results to disk.\n with open(pkl_name, 'wb') as file:\n pickle.dump(results, file, protocol=pickle.HIGHEST_PROTOCOL)\n print('Saved: ', pkl_name)\n\n txt_name = output_dir + 'numeric/' + basename + '.txt'\n with open(txt_name, 'w') as f:\n f.write('Object name, flux (W/m^2), flux error (W/m^2)\\n')\n f.write(basename + '\\n')\n f.write(str(flux77) + '\\n')\n f.write(str(flux77_err) + '\\n')\n print('Saved: ', txt_name)\n\n return (basename, flux77, flux77_err)\n\n\ndef measure_112(basename, wave, flux, fluxerr, rms, output_dir,\n fnameStr='temp_label'):\n\n lim11 = np.where((wave >= 10.8) & (wave <= 12))\n waveLim11 = wave[lim11]\n fluxLim11 = flux[lim11]\n errLim11 = fluxerr[lim11]\n\n feature11 = np.where((waveLim11 >= 11.1) & (\n waveLim11 <= 11.85)) # Define actual feature\n # Start at 11.1, change end point as needed\n\n area11 = simps(fluxLim11[feature11], waveLim11[feature11]) # Integrate\n\n # print('Integrated flux of 11.2 feature: ', area11)\n\n fig5, ax = plt.subplots() # Define figure\n\n ax.plot(waveLim11, fluxLim11, '-r', label=fnameStr, lw=2)\n ax.errorbar(\n waveLim11,\n fluxLim11,\n errLim11,\n color='r',\n ecolor='0.45',\n lw=2,\n elinewidth=1)\n ax.fill_between(\n waveLim11[feature11],\n fluxLim11[feature11],\n facecolor='red',\n alpha=0.15)\n ax.axhline(y=0, color='k', ls='-', zorder=-10, lw=2)\n\n ax.legend(loc=0, fontsize='small')\n ax.set_title(fnameStr + ' -- 11.2 feature')\n ax.set_xlabel('Wavelength (microns)')\n ax.set_ylabel('Flux (W/m^2)')\n ax.grid()\n ax.minorticks_on()\n\n ensure_exists(output_dir)\n pdf_filename = output_dir + basename + '_11.pdf'\n print('Saved: ', pdf_filename)\n fig5.savefig(pdf_filename, format='pdf', bbox_inches='tight')\n\n plt.close()\n fig5.clear()\n\n return area11\n\n\ndef save_fit_parameters(output_dir, results):\n\n ensure_exists(output_dir)\n\n fitAli, fit77, basename, waveLim, waveLim77, area69, \\\n area72, area77, area11, SNR69, SNR72, feature = results\n\n fnameStr = basename\n\n # Save all fit parameters in one file\n arrParamsID = np.append(fitAli[0] * 0, fit77[0] * 0 + 1)\n arrParams = np.append(fitAli[0], fit77[0])\n # arrParamsErr = np.append(fitAli[2], fit77[2])\n\n txt_filename = output_dir + fnameStr + '_fitParams.txt'\n print('Saved: ', txt_filename)\n np.savetxt(\n txt_filename,\n np.c_[\n arrParamsID,\n arrParams],\n delimiter=',',\n header='Gaussian fit parameters\\n col1: ID - 0=aliphatic, '\n '1=7.7 complex, col2: parameters, col3: fit error')\n # np.savetxt(fnameStr + '_fitParams.txt',\n # np.c_[arrParamsID, arrParams, arrParamsErr], delimiter=',',\n # header='Gaussian fit parameters\\n col1: ID - 0=aliphatic, 1=7.7 complex,\n # col2: parameters, col3: fit error')\n\n # Save all fit models in one file\n arrID = np.append(fitAli[1] * 0, fit77[1] * 0 + 1)\n arrWave = np.append(waveLim, waveLim77[feature])\n arrFluxDensity = np.append(fitAli[1], fit77[1])\n\n txt_filename = output_dir + fnameStr + '_fitModel.txt'\n print('Saved: ', txt_filename)\n np.savetxt(\n txt_filename,\n np.c_[\n arrID,\n arrWave,\n arrFluxDensity],\n delimiter=',',\n header='Full model fit\\n col1: ID - 0=aliphatic, 1=7.7 complex, '\n 'col2: wavelength, col3: flux density')\n\n # arrIntegratedFluxes = np.array([area69, area72, area77, area77blue,\n # area77red, area11, lambdaC]) # Save all integrated fluxes in one file\n # Save all integrated fluxes in one file\n # arrIntegratedFluxes = np.array([area69, area72, area77, area11])\n\n txt_filename = output_dir + fnameStr + '_intFlux.txt'\n print('Saved: ', txt_filename)\n np.savetxt(\n txt_filename,\n np.c_[\n area69,\n area72,\n area77,\n area11],\n delimiter=',',\n header='Integrated fluxes of features\\n col1: 6.9 microns, '\n 'col2: 7.2 microns, col3: total 7.7 complex, col4: blue 7.7, '\n 'col5: red 7.7, col6: 11.2 feature, '\n 'col7: central wavelength of 7.7 (microns)')\n\n txt_filename = output_dir + fnameStr + '_SNR.txt'\n print('Saved: ', txt_filename)\n np.savetxt(\n txt_filename,\n np.c_[\n SNR69,\n SNR72],\n delimiter=',',\n header='col1:SNR69, col2: SNR72, col3: SNR77')\n\n txt_filename = output_dir + fnameStr + 'Full.txt'\n workFile = open(txt_filename, 'w') # Write all data into single file\n workFile.write(fnameStr + 'Fitting and integrated flux data\\n\\n')\n\n workFile.write('Section 1: Aliphatic features\\n')\n workFile.write(\n 'Gaussian fitting parameters for 6.9 and 7.2 micron features\\n')\n workFile.write(str(fitAli[0]) + '\\n')\n workFile.write('Errors on aliphatic fitting parameters\\n')\n workFile.write(str(fitAli[2]) + '\\n')\n workFile.write('Fit model - aliphatic features\\n')\n workFile.write(str(waveLim) + '\\n' + str(fitAli[1]) + '\\n\\n')\n workFile.write('Integrated fluxes of aliphatic features\\n')\n workFile.write('6.9 micron feature ' + str(area69) + '\\n')\n workFile.write('7.2 micron feature ' + str(area72) + '\\n')\n workFile.write('S/N of 6.9: ' + str(SNR69) + '\\n')\n workFile.write('S/N of 7.2: ' + str(SNR72) + '\\n\\n')\n\n workFile.write('Section 2: 7.7 micron complex\\n')\n workFile.write('Gaussian fitting parameters for 7.7 micron complex\\n')\n workFile.write(str(fit77[0]) + '\\n')\n workFile.write('Errors on fitting parameters\\n')\n workFile.write(str(fit77[2]) + '\\n')\n workFile.write('Fit model - 7.7 micron complex\\n')\n workFile.write(str(waveLim77[feature]) + '\\n' + str(fit77[1]) + '\\n')\n workFile.write('Integrated fluxes\\n')\n workFile.write('Total integrated flux of complex: ' + str(area77) + '\\n')\n # workFile.write('Blue flux: ' + str(area77blue) + '\\n')\n # workFile.write('Red flux: ' + str(area77red) + '\\n')\n # workFile.write('SNR77: ' + str(SNR77) + '\\n')\n # workFile.write('Central wavelength of 7.7 complex:' + str(lambdaC) +\n # '\\n\\n')\n\n workFile.write('Section 3: 11.2 micron feature\\n')\n workFile.write('Integrated flux of 11.2 micron feature:\\n')\n workFile.write(str(area11) + '\\n')\n\n workFile.close()\n print('Saved: ', txt_filename)\n\n return\n"
] | [
[
"numpy.nanmax",
"numpy.polyfit",
"numpy.sqrt",
"numpy.nanmin",
"numpy.nanargmin",
"numpy.mean",
"numpy.exp",
"numpy.where",
"matplotlib.pyplot.gca",
"numpy.arange",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.append",
"scipy.integrate.simps",
"numpy.savetxt",
"numpy.array",
"numpy.sum",
"numpy.abs",
"matplotlib.pyplot.subplots",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
entslscheia/allennlp | [
"eeba62e34c8e211ed5963f830528c957f178607b"
] | [
"allennlp/data/fields/knowledge_graph_field.py"
] | [
"\"\"\"\n``KnowledgeGraphField`` is a ``Field`` which stores a knowledge graph representation.\n\"\"\"\nfrom typing import Callable, Dict, List, Set\nfrom collections import defaultdict\n\nimport editdistance\nfrom overrides import overrides\nimport torch\n\nfrom allennlp.common import util\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.data.fields.field import Field\nfrom allennlp.data.token_indexers.token_indexer import TokenIndexer, TokenType\nfrom allennlp.data.tokenizers.word_splitter import SpacyWordSplitter\nfrom allennlp.data.tokenizers.token import Token\nfrom allennlp.data.tokenizers import Tokenizer, WordTokenizer\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.nn import util as nn_util\nfrom allennlp.semparse.contexts.knowledge_graph import KnowledgeGraph\n\nTokenList = List[TokenType]\n\n\nclass KnowledgeGraphField(Field[Dict[str, torch.Tensor]]):\n \"\"\"\n A ``KnowledgeGraphField`` represents a ``KnowledgeGraph`` as a ``Field`` that can be used in a\n ``Model``. For each entity in the graph, we output two things: a text representation of the\n entity, handled identically to a ``TextField``, and a list of linking features for each token\n in some input utterance.\n\n The output of this field is a dictionary::\n\n {\n \"text\": Dict[str, torch.Tensor], # each tensor has shape (batch_size, num_entities, num_entity_tokens)\n \"linking\": torch.Tensor # shape (batch_size, num_entities, num_utterance_tokens, num_features)\n }\n\n The ``text`` component of this dictionary is suitable to be passed into a\n ``TextFieldEmbedder`` (which handles the additional ``num_entities`` dimension without any\n issues). The ``linking`` component of the dictionary can be used however you want to decide\n which tokens in the utterance correspond to which entities in the knowledge graph.\n\n In order to create the ``text`` component, we use the same dictionary of ``TokenIndexers``\n that's used in a ``TextField`` (as we're just representing the text corresponding to each\n entity). For the ``linking`` component, we use a set of hard-coded feature extractors that\n operate between the text corresponding to each entity and each token in the utterance.\n\n Parameters\n ----------\n knowledge_graph : ``KnowledgeGraph``\n The knowledge graph that this field stores.\n utterance_tokens : ``List[Token]``\n The tokens in some utterance that is paired with the ``KnowledgeGraph``. We compute a set\n of features for linking tokens in the utterance to entities in the graph.\n tokenizer : ``Tokenizer``, optional (default=``WordTokenizer()``)\n We'll use this ``Tokenizer`` to tokenize the text representation of each entity.\n token_indexers : ``Dict[str, TokenIndexer]``\n Token indexers that convert entities into arrays, similar to how text tokens are treated in\n a ``TextField``. These might operate on the name of the entity itself, its type, its\n neighbors in the graph, etc.\n feature_extractors : ``List[str]``, optional\n Names of feature extractors to use for computing linking features. These must be\n attributes of this object, without the first underscore. The feature extraction functions\n are listed as the last methods in this class. For example, to use\n :func:`_exact_token_match`, you would pass the string ``exact_token_match``. We will add\n an underscore and look for a function matching that name. If this list is omitted, we will\n use all available feature functions.\n entity_tokens : ``List[List[Token]]``, optional\n If you have pre-computed the tokenization of the table text, you can pass it in here. The\n must be a list of the tokens in the entity text, for each entity in the knowledge graph, in\n the same order in which the knowledge graph returns entities.\n linking_features : ``List[List[List[float]]]``, optional\n If you have pre-computed the linking features between the utterance and the table text, you\n can pass it in here.\n include_in_vocab : ``bool``, optional (default=True)\n If this is ``False``, we will skip the ``count_vocab_items`` logic, leaving out all table\n entity text from the vocabulary computation. You might want to do this if you have a lot\n of rare entities in your tables, and you see the same table in multiple training instances,\n so your vocabulary counts get skewed and include too many rare entities.\n max_table_tokens : ``int``, optional\n If given, we will only keep this number of total table tokens. This bounds the memory\n usage of the table representations, truncating cells with really long text. We specify a\n total number of tokens, not a max cell text length, because the number of table entities\n varies.\n \"\"\"\n\n def __init__(\n self,\n knowledge_graph: KnowledgeGraph,\n utterance_tokens: List[Token],\n token_indexers: Dict[str, TokenIndexer],\n tokenizer: Tokenizer = None,\n feature_extractors: List[str] = None,\n entity_tokens: List[List[Token]] = None,\n linking_features: List[List[List[float]]] = None,\n include_in_vocab: bool = True,\n max_table_tokens: int = None,\n ) -> None:\n\n self.knowledge_graph = knowledge_graph\n self._tokenizer = tokenizer or WordTokenizer(word_splitter=SpacyWordSplitter(pos_tags=True))\n if not entity_tokens:\n entity_texts = [\n knowledge_graph.entity_text[entity].lower() for entity in knowledge_graph.entities\n ]\n # TODO(mattg): Because we do tagging on each of these entities in addition to just\n # tokenizations, this is quite slow, and about half of our data processing time just\n # goes to this (~15 minutes when there are 7k instances). The reason we do tagging is\n # so that we can add lemma features. If we can remove the need for lemma / other\n # hand-written features, like with a CNN, we can cut down our data processing time by a\n # factor of 2.\n self.entity_texts = self._tokenizer.batch_tokenize(entity_texts)\n else:\n self.entity_texts = entity_tokens\n self.utterance_tokens = utterance_tokens\n self._token_indexers: Dict[str, TokenIndexer] = token_indexers\n self._include_in_vocab = include_in_vocab\n self._indexed_entity_texts: Dict[str, TokenList] = None\n self._max_table_tokens = max_table_tokens\n\n feature_extractors = (\n feature_extractors\n if feature_extractors is not None\n else [\n \"number_token_match\",\n \"exact_token_match\",\n \"contains_exact_token_match\",\n \"lemma_match\",\n \"contains_lemma_match\",\n \"edit_distance\",\n \"related_column\",\n \"related_column_lemma\",\n \"span_overlap_fraction\",\n \"span_lemma_overlap_fraction\",\n ]\n )\n self._feature_extractors: List[\n Callable[[str, List[Token], Token, int, List[Token]], float]\n ] = []\n for feature_extractor_name in feature_extractors:\n extractor = getattr(self, \"_\" + feature_extractor_name, None)\n if not extractor:\n raise ConfigurationError(\n f\"Invalid feature extractor name: {feature_extractor_name}\"\n )\n self._feature_extractors.append(extractor)\n\n if not linking_features:\n # For quicker lookups in our feature functions, we'll additionally store some\n # dictionaries that map entity strings to useful information about the entity.\n self._entity_text_map: Dict[str, List[Token]] = {}\n for entity, entity_text in zip(knowledge_graph.entities, self.entity_texts):\n self._entity_text_map[entity] = entity_text\n\n self._entity_text_exact_text: Dict[str, Set[str]] = {}\n for entity, entity_text in zip(knowledge_graph.entities, self.entity_texts):\n self._entity_text_exact_text[entity] = set(e.text for e in entity_text)\n\n self._entity_text_lemmas: Dict[str, Set[str]] = {}\n for entity, entity_text in zip(knowledge_graph.entities, self.entity_texts):\n self._entity_text_lemmas[entity] = set(e.lemma_ for e in entity_text)\n self.linking_features = self._compute_linking_features()\n else:\n self.linking_features = linking_features\n\n @overrides\n def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):\n if self._include_in_vocab:\n for indexer in self._token_indexers.values():\n for entity_text in self.entity_texts:\n for token in entity_text:\n indexer.count_vocab_items(token, counter)\n\n @overrides\n def index(self, vocab: Vocabulary):\n self._indexed_entity_texts = {}\n for indexer_name, indexer in self._token_indexers.items():\n indexer_arrays: Dict[str, List] = defaultdict(list)\n\n for entity_text in self.entity_texts:\n for index_name, indexed in indexer.tokens_to_indices(\n entity_text, vocab, indexer_name\n ).items():\n indexer_arrays[index_name].append(indexed)\n\n self._indexed_entity_texts.update(indexer_arrays)\n\n @overrides\n def get_padding_lengths(self) -> Dict[str, int]:\n num_entities = len(self.entity_texts)\n num_entity_tokens = max(len(entity_text) for entity_text in self.entity_texts)\n\n if self._max_table_tokens:\n # This truncates the number of entity tokens used, enabling larger tables (either in\n # the number of entities in the table, or the number of tokens per entity) to fit in\n # memory, particularly when using ELMo.\n if num_entities * num_entity_tokens > self._max_table_tokens:\n num_entity_tokens = int(self._max_table_tokens / num_entities)\n\n padding_lengths = {\n \"num_entities\": num_entities,\n \"num_utterance_tokens\": len(self.utterance_tokens),\n }\n padding_lengths[\"num_entity_tokens\"] = num_entity_tokens\n lengths = []\n assert self._indexed_entity_texts is not None, (\n \"This field is not indexed yet. Call \"\n \".index(vocab) before determining padding \"\n \"lengths.\"\n )\n for indexer_name, indexer in self._token_indexers.items():\n indexer_lengths = {}\n\n # This is a list of dicts, one for each token in the field.\n entity_lengths = [\n indexer.get_padding_lengths(token)\n for entity_text in self._indexed_entity_texts[indexer_name]\n for token in entity_text\n ]\n # Iterate over the keys in the first element of the list. This is fine as for a given\n # indexer, all entities will return the same keys, so we can just use the first one.\n for key in entity_lengths[0].keys():\n indexer_lengths[key] = max(x.get(key, 0) for x in entity_lengths)\n lengths.append(indexer_lengths)\n\n # Get all the keys which have been used for padding.\n padding_keys = {key for d in lengths for key in d.keys()}\n for padding_key in padding_keys:\n padding_lengths[padding_key] = max(x.get(padding_key, 0) for x in lengths)\n return padding_lengths\n\n @overrides\n def as_tensor(self, padding_lengths: Dict[str, int]) -> Dict[str, torch.Tensor]:\n tensors = {}\n desired_num_entities = padding_lengths[\"num_entities\"]\n desired_num_entity_tokens = padding_lengths[\"num_entity_tokens\"]\n desired_num_utterance_tokens = padding_lengths[\"num_utterance_tokens\"]\n for indexer_name, indexer in self._token_indexers.items():\n padded_entities = util.pad_sequence_to_length(\n self._indexed_entity_texts[indexer_name],\n desired_num_entities,\n default_value=lambda: [],\n )\n padded_tensors = []\n for padded_entity in padded_entities:\n padded_tensor = indexer.as_padded_tensor(\n {\"key\": padded_entity}, {\"key\": desired_num_entity_tokens}, padding_lengths\n )[\"key\"]\n padded_tensors.append(padded_tensor)\n tensor = torch.stack(padded_tensors)\n tensors[indexer_name] = tensor\n padded_linking_features = util.pad_sequence_to_length(\n self.linking_features, desired_num_entities, default_value=lambda: []\n )\n padded_linking_arrays = []\n\n def default_feature_value():\n return [0.0] * len(self._feature_extractors)\n\n for linking_features in padded_linking_features:\n padded_features = util.pad_sequence_to_length(\n linking_features, desired_num_utterance_tokens, default_value=default_feature_value\n )\n padded_linking_arrays.append(padded_features)\n linking_features_tensor = torch.FloatTensor(padded_linking_arrays)\n return {\"text\": tensors, \"linking\": linking_features_tensor}\n\n def _compute_linking_features(self) -> List[List[List[float]]]:\n linking_features = []\n for entity, entity_text in zip(self.knowledge_graph.entities, self.entity_texts):\n entity_features = []\n for token_index, token in enumerate(self.utterance_tokens):\n token_features = []\n for feature_extractor in self._feature_extractors:\n token_features.append(\n feature_extractor(\n entity, entity_text, token, token_index, self.utterance_tokens\n )\n )\n entity_features.append(token_features)\n linking_features.append(entity_features)\n return linking_features\n\n @overrides\n def empty_field(self) -> \"KnowledgeGraphField\":\n return KnowledgeGraphField(KnowledgeGraph(set(), {}), [], self._token_indexers)\n\n @overrides\n def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:\n\n batched_text = nn_util.batch_tensor_dicts(\n tensor[\"text\"] for tensor in tensor_list # type: ignore\n )\n batched_linking = torch.stack([tensor[\"linking\"] for tensor in tensor_list])\n return {\"text\": batched_text, \"linking\": batched_linking}\n\n # Below here we have feature extractor functions. To keep a consistent API for easy logic\n # above, some of these functions have unused arguments.\n\n # These feature extractors are generally pretty specific to the logical form language and\n # problem setting in WikiTableQuestions. This whole notion of feature extraction should\n # eventually be made more general (or just removed, if we can replace it with CNN features...).\n # For the feature functions used in the original parser written in PNP, see here:\n # https://github.com/allenai/pnp/blob/wikitables2/src/main/scala/org/allenai/wikitables/SemanticParserFeatureGenerator.scala\n\n # One notable difference between how the features work here and how they worked in PNP is that\n # we're using the table text when computing string matches, while PNP used the _entity name_.\n # It turns out that the entity name is derived from the table text, so this should be roughly\n # equivalent, except in the case of some numbers. If there are cells with different text that\n # normalize to the same name, you could get `_2` or similar appended to the name, so the way we\n # do it here should just be better. But it's a possible minor source of variation from the\n # original parser.\n\n # Another difference between these features and the PNP features is that the span overlap used\n # a weighting scheme to downweight matches on frequent words (like \"the\"), and the lemma\n # overlap feature value was calculated a little differently. I'm guessing that doesn't make a\n # huge difference...\n\n def _number_token_match(\n self,\n entity: str,\n entity_text: List[Token],\n token: Token,\n token_index: int,\n tokens: List[Token],\n ) -> float:\n # PNP had a \"spanFeatures\" function that said whether an entity was a-priori known to link\n # to a token or set of tokens in the question. This was only used for numbers, and it's\n # not totally clear to me how this number feature overlapped with the token match features\n # in the original implementation (I think in most cases it was the same, except for things\n # like \"four million\", because the token match is derived from the entity name, which would\n # be 4000000, and wouldn't match \"four million\").\n #\n # Our implementation basically just adds a duplicate token match feature that's specific to\n # numbers. It'll break in some rare cases (e.g., \"Which four had four million ...\"), but\n # those shouldn't be a big deal.\n if \":\" in entity:\n # This check works because numbers are the only entities that don't contain \":\". All\n # others in both WikiTables languages do (e.g.: fb:row.row.column_name,\n # date_column:year, string:usl_a_league etc.).\n return 0.0\n return self._contains_exact_token_match(entity, entity_text, token, token_index, tokens)\n\n def _exact_token_match(\n self,\n entity: str,\n entity_text: List[Token],\n token: Token,\n token_index: int,\n tokens: List[Token],\n ) -> float:\n if len(entity_text) != 1:\n return 0.0\n return self._contains_exact_token_match(entity, entity_text, token, token_index, tokens)\n\n def _contains_exact_token_match(\n self,\n entity: str,\n entity_text: List[Token],\n token: Token,\n token_index: int,\n tokens: List[Token],\n ) -> float:\n if token.text in self._entity_text_exact_text[entity]:\n return 1.0\n return 0.0\n\n def _lemma_match(\n self,\n entity: str,\n entity_text: List[Token],\n token: Token,\n token_index: int,\n tokens: List[Token],\n ) -> float:\n if len(entity_text) != 1:\n return 0.0\n return self._contains_lemma_match(entity, entity_text, token, token_index, tokens)\n\n def _contains_lemma_match(\n self,\n entity: str,\n entity_text: List[Token],\n token: Token,\n token_index: int,\n tokens: List[Token],\n ) -> float:\n if token.text in self._entity_text_exact_text[entity]:\n return 1.0\n if token.lemma_ in self._entity_text_lemmas[entity]:\n return 1.0\n return 0.0\n\n def _edit_distance(\n self,\n entity: str,\n entity_text: List[Token],\n token: Token,\n token_index: int,\n tokens: List[Token],\n ) -> float:\n edit_distance = float(editdistance.eval(\" \".join(e.text for e in entity_text), token.text))\n return 1.0 - edit_distance / len(token.text)\n\n def _related_column(\n self,\n entity: str,\n entity_text: List[Token],\n token: Token,\n token_index: int,\n tokens: List[Token],\n ) -> float:\n # Check if the entity is a column name in one of the two WikiTables languages.\n if not entity.startswith(\"fb:row.row\") and \"_column:\" not in entity:\n return 0.0\n for neighbor in self.knowledge_graph.neighbors[entity]:\n if token.text in self._entity_text_exact_text[neighbor]:\n return 1.0\n return 0.0\n\n def _related_column_lemma(\n self,\n entity: str,\n entity_text: List[Token],\n token: Token,\n token_index: int,\n tokens: List[Token],\n ) -> float:\n # Check if the entity is a column name in one of the two WikiTables languages.\n if not entity.startswith(\"fb:row.row\") and \"_column:\" not in entity:\n return 0.0\n for neighbor in self.knowledge_graph.neighbors[entity]:\n if token.text in self._entity_text_exact_text[neighbor]:\n return 1.0\n if token.lemma_ in self._entity_text_lemmas[neighbor]:\n return 1.0\n return 0.0\n\n def _span_overlap_fraction(\n self,\n entity: str,\n entity_text: List[Token],\n token: Token,\n token_index: int,\n tokens: List[Token],\n ) -> float:\n entity_words = set(entity_token.text for entity_token in entity_text)\n if not entity_words:\n # Some tables have empty cells.\n return 0\n seen_entity_words = set()\n token_index_left = token_index\n while token_index < len(tokens) and tokens[token_index].text in entity_words:\n seen_entity_words.add(tokens[token_index].text)\n token_index += 1\n while token_index_left >= 0 and tokens[token_index_left].text in entity_words:\n seen_entity_words.add(tokens[token_index_left].text)\n token_index_left -= 1\n return len(seen_entity_words) / len(entity_words)\n\n def _span_lemma_overlap_fraction(\n self,\n entity: str,\n entity_text: List[Token],\n token: Token,\n token_index: int,\n tokens: List[Token],\n ) -> float:\n entity_lemmas = set(entity_token.lemma_ for entity_token in entity_text)\n if not entity_lemmas:\n # Some tables have empty cells.\n return 0\n seen_entity_lemmas = set()\n token_index_left = token_index\n while token_index < len(tokens) and tokens[token_index].lemma_ in entity_lemmas:\n seen_entity_lemmas.add(tokens[token_index].lemma_)\n token_index += 1\n while token_index_left >= 0 and tokens[token_index_left].lemma_ in entity_lemmas:\n seen_entity_lemmas.add(tokens[token_index_left].lemma_)\n token_index_left -= 1\n return len(seen_entity_lemmas) / len(entity_lemmas)\n"
] | [
[
"torch.stack",
"torch.FloatTensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vishalbelsare/py-bbn | [
"fe6848b4e0fe78d78af13cd06c0d29980ecd5d7f"
] | [
"pybbn/graph/factory.py"
] | [
"import itertools\nimport json\nfrom itertools import product\n\nimport networkx as nx\nimport pandas as pd\nfrom networkx.algorithms.dag import topological_sort\n\nfrom pybbn.graph.dag import Bbn\nfrom pybbn.graph.edge import Edge, EdgeType\nfrom pybbn.graph.node import BbnNode\nfrom pybbn.graph.variable import Variable\n\n\nclass Factory(object):\n \"\"\"\n Factory to convert other API BBNs into py-bbn.\n \"\"\"\n\n @staticmethod\n def from_libpgm_discrete_json(j):\n \"\"\"\n Converts a libpgm discrete network as specified by a JSON string into a py-bbn one.\n Look at https://pythonhosted.org/libpgm/unittestdict.html.\n\n :param j: String representing JSON.\n :return: py-bbn BBN.\n \"\"\"\n return Factory.from_libpgm_discrete_dictionary(json.loads(j))\n\n @staticmethod\n def from_libpgm_discrete_dictionary(d):\n \"\"\"\n Converts a libpgm discrete network as specified by a dictionary into a py-bbn one.\n Look at https://pythonhosted.org/libpgm/unittestdict.html.\n\n :param d: A dictionary representing a libpgm discrete network.\n :return: py-bbn BBN.\n \"\"\"\n\n class LibpgmBBN(object):\n def __init__(self, V, E, Vdata):\n self.V = V\n self.E = E\n self.Vdata = Vdata\n\n bn = LibpgmBBN(d['V'], d['E'], d['Vdata'])\n return Factory.from_libpgm_discrete_object(bn)\n\n @staticmethod\n def from_libpgm_discrete_object(bn):\n \"\"\"\n Converts a libpgm discrete network object into a py-bbn one.\n\n :param bn: libpgm discrete BBN.\n :return: py-bbn BBN.\n \"\"\"\n\n def get_nodes(bn, domain_spaces=True):\n def get_parent_domains(name, bn):\n parents = bn.Vdata[name]['parents']\n domains = []\n\n if parents is None or len(parents) == 0:\n return domains\n\n for parent in parents:\n domain = bn.Vdata[parent]['vals'][:]\n domains.append(domain)\n return domains\n\n def cross_product(domains):\n products = []\n\n if domains is None or len(domains) == 0:\n return products\n\n for e in itertools.product(*domains):\n products.append(e)\n return products\n\n def stringify_cross_product(pa_domains, domain_spaces=True):\n joiner_delim = ', ' if domain_spaces is True else ','\n s = []\n for pa_domain in pa_domains:\n r = joiner_delim.join([\"'{}'\".format(v) for v in pa_domain])\n r = '[{}]'.format(r)\n s.append(r)\n return s\n\n def get_cond_probs(name, bn, domain_spaces=True):\n probs = []\n pa_domains = stringify_cross_product(cross_product(get_parent_domains(name, bn)), domain_spaces)\n if len(pa_domains) == 0:\n probs = bn.Vdata[name]['cprob'][:]\n else:\n for pa_domain in pa_domains:\n cprob = bn.Vdata[name]['cprob'][pa_domain]\n probs.extend(cprob)\n\n return probs\n\n nodes = {}\n for name in bn.V:\n domain = bn.Vdata[name]['vals'][:]\n order = bn.Vdata[name]['ord']\n probs = get_cond_probs(name, bn, domain_spaces)\n node = BbnNode(Variable(order, name, domain), probs)\n nodes[name] = node\n return nodes\n\n def get_edges(bn, nodes):\n edges = []\n for k, v in bn.Vdata.items():\n ch = nodes[k]\n if v['parents'] is not None and len(v['parents']) > 0:\n parents = [nodes[pa] for pa in v['parents']]\n for pa in parents:\n edge = Edge(pa, ch, EdgeType.DIRECTED)\n edges.append(edge)\n return edges\n\n nodes = get_nodes(bn)\n edges = get_edges(bn, nodes)\n\n bbn = Bbn()\n\n for node in sorted(nodes.values(), key=lambda n: n.id):\n bbn.add_node(node)\n\n for e in edges:\n bbn.add_edge(e)\n\n return bbn\n\n @staticmethod\n def from_data(structure, df):\n \"\"\"\n Creates a BBN.\n\n :param structure: A dictionary where keys are names of children and values are list of parent names.\n :param df: A dataframe.\n :return: BBN.\n \"\"\"\n\n def get_profile(df):\n profile = {}\n for c in df.columns:\n values = sorted(list(df[c].value_counts().index))\n profile[c] = values\n return profile\n\n def get_n2i(parents):\n g = nx.DiGraph()\n for k in parents:\n g.add_node(k)\n for ch, pas in parents.items():\n for pa in pas:\n g.add_edge(pa, ch)\n nodes = list(topological_sort(g))\n return {n: i for i, n in enumerate(nodes)}\n\n def get_cpt(name, parents, n2v, df):\n parents = sorted(parents)\n n2v = {k: sorted(v) for k, v in n2v.items()}\n\n n = df.shape[0]\n\n cpts = []\n if len(parents) == 0:\n for v in n2v[name]:\n c = df[df[name] == v].shape[0]\n p = c / n\n cpts.append(p)\n else:\n domains = [(n, d) for n, d in n2v.items() if n in parents]\n domains = sorted(domains, key=lambda tup: tup[0])\n domain_names = [tup[0] for tup in domains]\n domain_values = [tup[1] for tup in domains]\n domains = list(product(*domain_values))\n\n for values in domains:\n probs = []\n denom_q = ' and '.join([f'{n}==\"{v}\"' for n, v in zip(domain_names, values)])\n for v in n2v[name]:\n numer_q = f'{name}==\"{v}\" and {denom_q}'\n\n numer = df.query(numer_q).shape[0] / n\n denom = df.query(denom_q).shape[0] / n\n prob = numer / denom\n probs.append(prob)\n probs = pd.Series(probs)\n probs = probs / probs.sum()\n probs = list(probs)\n cpts.extend(probs)\n\n return cpts\n\n n2v = get_profile(df)\n n2i = get_n2i(df)\n n2c = {n: get_cpt(n, structure[n], n2v, df) for n in structure}\n\n bbn = Bbn()\n\n nodes = {}\n for name in n2v:\n idx = n2i[name]\n values = n2v[name]\n cpts = n2c[name]\n\n v = Variable(idx, name, values)\n node = BbnNode(v, cpts)\n nodes[name] = node\n bbn.add_node(node)\n\n for ch, parents in structure.items():\n ch_node = nodes[ch]\n for pa in parents:\n pa_node = nodes[pa]\n\n edge = Edge(pa_node, ch_node, EdgeType.DIRECTED)\n bbn.add_edge(edge)\n\n return bbn\n"
] | [
[
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
harrisonfeng/ray | [
"7b08db9f8cd85d185879e5bef778e8855f2a06cf"
] | [
"rllib/evaluation/sampler.py"
] | [
"from collections import defaultdict, namedtuple\nimport logging\nimport numpy as np\nimport queue\nimport threading\nimport time\n\nfrom ray.util.debug import log_once\nfrom ray.rllib.evaluation.episode import MultiAgentEpisode, _flatten_action\nfrom ray.rllib.evaluation.rollout_metrics import RolloutMetrics\nfrom ray.rllib.evaluation.sample_batch_builder import \\\n MultiAgentSampleBatchBuilder\nfrom ray.rllib.policy.policy import clip_action\nfrom ray.rllib.policy.tf_policy import TFPolicy\nfrom ray.rllib.env.base_env import BaseEnv, ASYNC_RESET_RETURN\nfrom ray.rllib.env.atari_wrappers import get_wrapper_by_cls, MonitorEnv\nfrom ray.rllib.offline import InputReader\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.debug import summarize\nfrom ray.rllib.utils.tuple_actions import TupleActions\nfrom ray.rllib.utils.tf_run_builder import TFRunBuilder\n\nlogger = logging.getLogger(__name__)\n\nPolicyEvalData = namedtuple(\"PolicyEvalData\", [\n \"env_id\", \"agent_id\", \"obs\", \"info\", \"rnn_state\", \"prev_action\",\n \"prev_reward\"\n])\n\n\nclass PerfStats:\n \"\"\"Sampler perf stats that will be included in rollout metrics.\"\"\"\n\n def __init__(self):\n self.iters = 0\n self.env_wait_time = 0.0\n self.processing_time = 0.0\n self.inference_time = 0.0\n\n def get(self):\n return {\n \"mean_env_wait_ms\": self.env_wait_time * 1000 / self.iters,\n \"mean_processing_ms\": self.processing_time * 1000 / self.iters,\n \"mean_inference_ms\": self.inference_time * 1000 / self.iters\n }\n\n\nclass SamplerInput(InputReader):\n \"\"\"Reads input experiences from an existing sampler.\"\"\"\n\n @override(InputReader)\n def next(self):\n batches = [self.get_data()]\n batches.extend(self.get_extra_batches())\n if len(batches) > 1:\n return batches[0].concat_samples(batches)\n else:\n return batches[0]\n\n\nclass SyncSampler(SamplerInput):\n def __init__(self,\n env,\n policies,\n policy_mapping_fn,\n preprocessors,\n obs_filters,\n clip_rewards,\n rollout_fragment_length,\n callbacks,\n horizon=None,\n pack=False,\n tf_sess=None,\n clip_actions=True,\n soft_horizon=False,\n no_done_at_end=False):\n self.base_env = BaseEnv.to_base_env(env)\n self.rollout_fragment_length = rollout_fragment_length\n self.horizon = horizon\n self.policies = policies\n self.policy_mapping_fn = policy_mapping_fn\n self.preprocessors = preprocessors\n self.obs_filters = obs_filters\n self.extra_batches = queue.Queue()\n self.perf_stats = PerfStats()\n self.rollout_provider = _env_runner(\n self.base_env, self.extra_batches.put, self.policies,\n self.policy_mapping_fn, self.rollout_fragment_length, self.horizon,\n self.preprocessors, self.obs_filters, clip_rewards, clip_actions,\n pack, callbacks, tf_sess, self.perf_stats, soft_horizon,\n no_done_at_end)\n self.metrics_queue = queue.Queue()\n\n def get_data(self):\n while True:\n item = next(self.rollout_provider)\n if isinstance(item, RolloutMetrics):\n self.metrics_queue.put(item)\n else:\n return item\n\n def get_metrics(self):\n completed = []\n while True:\n try:\n completed.append(self.metrics_queue.get_nowait()._replace(\n perf_stats=self.perf_stats.get()))\n except queue.Empty:\n break\n return completed\n\n def get_extra_batches(self):\n extra = []\n while True:\n try:\n extra.append(self.extra_batches.get_nowait())\n except queue.Empty:\n break\n return extra\n\n\nclass AsyncSampler(threading.Thread, SamplerInput):\n def __init__(self,\n env,\n policies,\n policy_mapping_fn,\n preprocessors,\n obs_filters,\n clip_rewards,\n rollout_fragment_length,\n callbacks,\n horizon=None,\n pack=False,\n tf_sess=None,\n clip_actions=True,\n blackhole_outputs=False,\n soft_horizon=False,\n no_done_at_end=False):\n for _, f in obs_filters.items():\n assert getattr(f, \"is_concurrent\", False), \\\n \"Observation Filter must support concurrent updates.\"\n self.base_env = BaseEnv.to_base_env(env)\n threading.Thread.__init__(self)\n self.queue = queue.Queue(5)\n self.extra_batches = queue.Queue()\n self.metrics_queue = queue.Queue()\n self.rollout_fragment_length = rollout_fragment_length\n self.horizon = horizon\n self.policies = policies\n self.policy_mapping_fn = policy_mapping_fn\n self.preprocessors = preprocessors\n self.obs_filters = obs_filters\n self.clip_rewards = clip_rewards\n self.daemon = True\n self.pack = pack\n self.tf_sess = tf_sess\n self.callbacks = callbacks\n self.clip_actions = clip_actions\n self.blackhole_outputs = blackhole_outputs\n self.soft_horizon = soft_horizon\n self.no_done_at_end = no_done_at_end\n self.perf_stats = PerfStats()\n self.shutdown = False\n\n def run(self):\n try:\n self._run()\n except BaseException as e:\n self.queue.put(e)\n raise e\n\n def _run(self):\n if self.blackhole_outputs:\n queue_putter = (lambda x: None)\n extra_batches_putter = (lambda x: None)\n else:\n queue_putter = self.queue.put\n extra_batches_putter = (\n lambda x: self.extra_batches.put(x, timeout=600.0))\n rollout_provider = _env_runner(\n self.base_env, extra_batches_putter, self.policies,\n self.policy_mapping_fn, self.rollout_fragment_length, self.horizon,\n self.preprocessors, self.obs_filters, self.clip_rewards,\n self.clip_actions, self.pack, self.callbacks, self.tf_sess,\n self.perf_stats, self.soft_horizon, self.no_done_at_end)\n while not self.shutdown:\n # The timeout variable exists because apparently, if one worker\n # dies, the other workers won't die with it, unless the timeout is\n # set to some large number. This is an empirical observation.\n item = next(rollout_provider)\n if isinstance(item, RolloutMetrics):\n self.metrics_queue.put(item)\n else:\n queue_putter(item)\n\n def get_data(self):\n if not self.is_alive():\n raise RuntimeError(\"Sampling thread has died\")\n rollout = self.queue.get(timeout=600.0)\n\n # Propagate errors\n if isinstance(rollout, BaseException):\n raise rollout\n\n return rollout\n\n def get_metrics(self):\n completed = []\n while True:\n try:\n completed.append(self.metrics_queue.get_nowait()._replace(\n perf_stats=self.perf_stats.get()))\n except queue.Empty:\n break\n return completed\n\n def get_extra_batches(self):\n extra = []\n while True:\n try:\n extra.append(self.extra_batches.get_nowait())\n except queue.Empty:\n break\n return extra\n\n\ndef _env_runner(base_env, extra_batch_callback, policies, policy_mapping_fn,\n rollout_fragment_length, horizon, preprocessors, obs_filters,\n clip_rewards, clip_actions, pack, callbacks, tf_sess,\n perf_stats, soft_horizon, no_done_at_end):\n \"\"\"This implements the common experience collection logic.\n\n Args:\n base_env (BaseEnv): env implementing BaseEnv.\n extra_batch_callback (fn): function to send extra batch data to.\n policies (dict): Map of policy ids to Policy instances.\n policy_mapping_fn (func): Function that maps agent ids to policy ids.\n This is called when an agent first enters the environment. The\n agent is then \"bound\" to the returned policy for the episode.\n rollout_fragment_length (int): Number of episode steps before\n `SampleBatch` is yielded. Set to infinity to yield complete\n episodes.\n horizon (int): Horizon of the episode.\n preprocessors (dict): Map of policy id to preprocessor for the\n observations prior to filtering.\n obs_filters (dict): Map of policy id to filter used to process\n observations for the policy.\n clip_rewards (bool): Whether to clip rewards before postprocessing.\n pack (bool): Whether to pack multiple episodes into each batch. This\n guarantees batches will be exactly `rollout_fragment_length` in\n size.\n clip_actions (bool): Whether to clip actions to the space range.\n callbacks (dict): User callbacks to run on episode events.\n tf_sess (Session|None): Optional tensorflow session to use for batching\n TF policy evaluations.\n perf_stats (PerfStats): Record perf stats into this object.\n soft_horizon (bool): Calculate rewards but don't reset the\n environment when the horizon is hit.\n no_done_at_end (bool): Ignore the done=True at the end of the episode\n and instead record done=False.\n\n Yields:\n rollout (SampleBatch): Object containing state, action, reward,\n terminal condition, and other fields as dictated by `policy`.\n \"\"\"\n\n # Try to get Env's max_episode_steps prop. If it doesn't exist, catch\n # error and continue.\n max_episode_steps = None\n try:\n max_episode_steps = base_env.get_unwrapped()[0].spec.max_episode_steps\n except Exception:\n pass\n\n # Trainer has a given `horizon` setting.\n if horizon:\n # `horizon` is larger than env's limit -> Error and explain how\n # to increase Env's own episode limit.\n if max_episode_steps and horizon > max_episode_steps:\n raise ValueError(\n \"Your `horizon` setting ({}) is larger than the Env's own \"\n \"timestep limit ({})! Try to increase the Env's limit via \"\n \"setting its `spec.max_episode_steps` property.\".format(\n horizon, max_episode_steps))\n # Otherwise, set Trainer's horizon to env's max-steps.\n elif max_episode_steps:\n horizon = max_episode_steps\n logger.debug(\n \"No episode horizon specified, setting it to Env's limit ({}).\".\n format(max_episode_steps))\n else:\n horizon = float(\"inf\")\n logger.debug(\"No episode horizon specified, assuming inf.\")\n\n # Pool of batch builders, which can be shared across episodes to pack\n # trajectory data.\n batch_builder_pool = []\n\n def get_batch_builder():\n if batch_builder_pool:\n return batch_builder_pool.pop()\n else:\n return MultiAgentSampleBatchBuilder(\n policies, clip_rewards, callbacks.get(\"on_postprocess_traj\"))\n\n def new_episode():\n episode = MultiAgentEpisode(policies, policy_mapping_fn,\n get_batch_builder, extra_batch_callback)\n # Call each policy's Exploration.on_episode_start method.\n for p in policies.values():\n p.exploration.on_episode_start(\n policy=p,\n environment=base_env,\n episode=episode,\n tf_sess=getattr(p, \"_sess\", None))\n # Call custom on_episode_start callback.\n if callbacks.get(\"on_episode_start\"):\n callbacks[\"on_episode_start\"]({\n \"env\": base_env,\n \"policy\": policies,\n \"episode\": episode,\n })\n return episode\n\n active_episodes = defaultdict(new_episode)\n\n while True:\n perf_stats.iters += 1\n t0 = time.time()\n # Get observations from all ready agents\n unfiltered_obs, rewards, dones, infos, off_policy_actions = \\\n base_env.poll()\n perf_stats.env_wait_time += time.time() - t0\n\n if log_once(\"env_returns\"):\n logger.info(\"Raw obs from env: {}\".format(\n summarize(unfiltered_obs)))\n logger.info(\"Info return from env: {}\".format(summarize(infos)))\n\n # Process observations and prepare for policy evaluation\n t1 = time.time()\n active_envs, to_eval, outputs = _process_observations(\n base_env, policies, batch_builder_pool, active_episodes,\n unfiltered_obs, rewards, dones, infos, off_policy_actions, horizon,\n preprocessors, obs_filters, rollout_fragment_length, pack,\n callbacks, soft_horizon, no_done_at_end)\n perf_stats.processing_time += time.time() - t1\n for o in outputs:\n yield o\n\n # Do batched policy eval\n t2 = time.time()\n eval_results = _do_policy_eval(tf_sess, to_eval, policies,\n active_episodes)\n perf_stats.inference_time += time.time() - t2\n\n # Process results and update episode state\n t3 = time.time()\n actions_to_send = _process_policy_eval_results(\n to_eval, eval_results, active_episodes, active_envs,\n off_policy_actions, policies, clip_actions)\n perf_stats.processing_time += time.time() - t3\n\n # Return computed actions to ready envs. We also send to envs that have\n # taken off-policy actions; those envs are free to ignore the action.\n t4 = time.time()\n base_env.send_actions(actions_to_send)\n perf_stats.env_wait_time += time.time() - t4\n\n\ndef _process_observations(base_env, policies, batch_builder_pool,\n active_episodes, unfiltered_obs, rewards, dones,\n infos, off_policy_actions, horizon, preprocessors,\n obs_filters, rollout_fragment_length, pack,\n callbacks, soft_horizon, no_done_at_end):\n \"\"\"Record new data from the environment and prepare for policy evaluation.\n\n Returns:\n active_envs: set of non-terminated env ids\n to_eval: map of policy_id to list of agent PolicyEvalData\n outputs: list of metrics and samples to return from the sampler\n \"\"\"\n\n active_envs = set()\n to_eval = defaultdict(list)\n outputs = []\n large_batch_threshold = max(1000, rollout_fragment_length * 10) if \\\n rollout_fragment_length != float(\"inf\") else 5000\n\n # For each environment\n for env_id, agent_obs in unfiltered_obs.items():\n new_episode = env_id not in active_episodes\n episode = active_episodes[env_id]\n if not new_episode:\n episode.length += 1\n episode.batch_builder.count += 1\n episode._add_agent_rewards(rewards[env_id])\n\n if (episode.batch_builder.total() > large_batch_threshold\n and log_once(\"large_batch_warning\")):\n logger.warning(\n \"More than {} observations for {} env steps \".format(\n episode.batch_builder.total(),\n episode.batch_builder.count) + \"are buffered in \"\n \"the sampler. If this is more than you expected, check that \"\n \"that you set a horizon on your environment correctly and that\"\n \" it terminates at some point. \"\n \"Note: In multi-agent environments, `rollout_fragment_length` \"\n \"sets the batch size based on environment steps, not the \"\n \"steps of \"\n \"individual agents, which can result in unexpectedly large \"\n \"batches. Also, you may be in evaluation waiting for your Env \"\n \"to terminate (batch_mode=`complete_episodes`). Make sure it \"\n \"does at some point.\")\n\n # Check episode termination conditions\n if dones[env_id][\"__all__\"] or episode.length >= horizon:\n hit_horizon = (episode.length >= horizon\n and not dones[env_id][\"__all__\"])\n all_done = True\n atari_metrics = _fetch_atari_metrics(base_env)\n if atari_metrics is not None:\n for m in atari_metrics:\n outputs.append(\n m._replace(custom_metrics=episode.custom_metrics))\n else:\n outputs.append(\n RolloutMetrics(episode.length, episode.total_reward,\n dict(episode.agent_rewards),\n episode.custom_metrics, {},\n episode.hist_data))\n else:\n hit_horizon = False\n all_done = False\n active_envs.add(env_id)\n\n # For each agent in the environment.\n for agent_id, raw_obs in agent_obs.items():\n policy_id = episode.policy_for(agent_id)\n prep_obs = _get_or_raise(preprocessors,\n policy_id).transform(raw_obs)\n if log_once(\"prep_obs\"):\n logger.info(\"Preprocessed obs: {}\".format(summarize(prep_obs)))\n\n filtered_obs = _get_or_raise(obs_filters, policy_id)(prep_obs)\n if log_once(\"filtered_obs\"):\n logger.info(\"Filtered obs: {}\".format(summarize(filtered_obs)))\n\n agent_done = bool(all_done or dones[env_id].get(agent_id))\n if not agent_done:\n to_eval[policy_id].append(\n PolicyEvalData(env_id, agent_id, filtered_obs,\n infos[env_id].get(agent_id, {}),\n episode.rnn_state_for(agent_id),\n episode.last_action_for(agent_id),\n rewards[env_id][agent_id] or 0.0))\n\n last_observation = episode.last_observation_for(agent_id)\n episode._set_last_observation(agent_id, filtered_obs)\n episode._set_last_raw_obs(agent_id, raw_obs)\n episode._set_last_info(agent_id, infos[env_id].get(agent_id, {}))\n\n # Record transition info if applicable\n if (last_observation is not None and infos[env_id].get(\n agent_id, {}).get(\"training_enabled\", True)):\n episode.batch_builder.add_values(\n agent_id,\n policy_id,\n t=episode.length - 1,\n eps_id=episode.episode_id,\n agent_index=episode._agent_index(agent_id),\n obs=last_observation,\n actions=episode.last_action_for(agent_id),\n rewards=rewards[env_id][agent_id],\n prev_actions=episode.prev_action_for(agent_id),\n prev_rewards=episode.prev_reward_for(agent_id),\n dones=(False if (no_done_at_end\n or (hit_horizon and soft_horizon)) else\n agent_done),\n infos=infos[env_id].get(agent_id, {}),\n new_obs=filtered_obs,\n **episode.last_pi_info_for(agent_id))\n\n # Invoke the step callback after the step is logged to the episode\n if callbacks.get(\"on_episode_step\"):\n callbacks[\"on_episode_step\"]({\"env\": base_env, \"episode\": episode})\n\n # Cut the batch if we're not packing multiple episodes into one,\n # or if we've exceeded the requested batch size.\n if episode.batch_builder.has_pending_agent_data():\n if dones[env_id][\"__all__\"] and not no_done_at_end:\n episode.batch_builder.check_missing_dones()\n if (all_done and not pack) or \\\n episode.batch_builder.count >= rollout_fragment_length:\n outputs.append(episode.batch_builder.build_and_reset(episode))\n elif all_done:\n # Make sure postprocessor stays within one episode\n episode.batch_builder.postprocess_batch_so_far(episode)\n\n if all_done:\n # Handle episode termination\n batch_builder_pool.append(episode.batch_builder)\n # Call each policy's Exploration.on_episode_end method.\n for p in policies.values():\n p.exploration.on_episode_end(\n policy=p,\n environment=base_env,\n episode=episode,\n tf_sess=getattr(p, \"_sess\", None))\n # Call custom on_episode_end callback.\n if callbacks.get(\"on_episode_end\"):\n callbacks[\"on_episode_end\"]({\n \"env\": base_env,\n \"policy\": policies,\n \"episode\": episode\n })\n if hit_horizon and soft_horizon:\n episode.soft_reset()\n resetted_obs = agent_obs\n else:\n del active_episodes[env_id]\n resetted_obs = base_env.try_reset(env_id)\n if resetted_obs is None:\n # Reset not supported, drop this env from the ready list\n if horizon != float(\"inf\"):\n raise ValueError(\n \"Setting episode horizon requires reset() support \"\n \"from the environment.\")\n elif resetted_obs != ASYNC_RESET_RETURN:\n # Creates a new episode if this is not async return\n # If reset is async, we will get its result in some future poll\n episode = active_episodes[env_id]\n for agent_id, raw_obs in resetted_obs.items():\n policy_id = episode.policy_for(agent_id)\n policy = _get_or_raise(policies, policy_id)\n prep_obs = _get_or_raise(preprocessors,\n policy_id).transform(raw_obs)\n filtered_obs = _get_or_raise(obs_filters,\n policy_id)(prep_obs)\n episode._set_last_observation(agent_id, filtered_obs)\n to_eval[policy_id].append(\n PolicyEvalData(\n env_id, agent_id, filtered_obs,\n episode.last_info_for(agent_id) or {},\n episode.rnn_state_for(agent_id),\n np.zeros_like(\n _flatten_action(policy.action_space.sample())),\n 0.0))\n\n return active_envs, to_eval, outputs\n\n\ndef _do_policy_eval(tf_sess, to_eval, policies, active_episodes):\n \"\"\"Call compute actions on observation batches to get next actions.\n\n Returns:\n eval_results: dict of policy to compute_action() outputs.\n \"\"\"\n\n eval_results = {}\n\n if tf_sess:\n builder = TFRunBuilder(tf_sess, \"policy_eval\")\n pending_fetches = {}\n else:\n builder = None\n\n if log_once(\"compute_actions_input\"):\n logger.info(\"Inputs to compute_actions():\\n\\n{}\\n\".format(\n summarize(to_eval)))\n\n for policy_id, eval_data in to_eval.items():\n rnn_in = [t.rnn_state for t in eval_data]\n policy = _get_or_raise(policies, policy_id)\n if builder and (policy.compute_actions.__code__ is\n TFPolicy.compute_actions.__code__):\n\n obs_batch = [t.obs for t in eval_data]\n state_batches = _to_column_format(rnn_in)\n\n # TODO(ekl): how can we make info batch available to TF code?\n pending_fetches[policy_id] = policy._build_compute_actions(\n builder,\n obs_batch=obs_batch,\n state_batches=state_batches,\n prev_action_batch=[t.prev_action for t in eval_data],\n prev_reward_batch=[t.prev_reward for t in eval_data],\n timestep=policy.global_timestep)\n else:\n # TODO(sven): Does this work for LSTM torch?\n rnn_in_cols = [\n np.stack([row[i] for row in rnn_in])\n for i in range(len(rnn_in[0]))\n ]\n eval_results[policy_id] = policy.compute_actions(\n [t.obs for t in eval_data],\n state_batches=rnn_in_cols,\n prev_action_batch=[t.prev_action for t in eval_data],\n prev_reward_batch=[t.prev_reward for t in eval_data],\n info_batch=[t.info for t in eval_data],\n episodes=[active_episodes[t.env_id] for t in eval_data],\n timestep=policy.global_timestep)\n if builder:\n for pid, v in pending_fetches.items():\n eval_results[pid] = builder.get(v)\n\n if log_once(\"compute_actions_result\"):\n logger.info(\"Outputs of compute_actions():\\n\\n{}\\n\".format(\n summarize(eval_results)))\n\n return eval_results\n\n\ndef _process_policy_eval_results(to_eval, eval_results, active_episodes,\n active_envs, off_policy_actions, policies,\n clip_actions):\n \"\"\"Process the output of policy neural network evaluation.\n\n Records policy evaluation results into the given episode objects and\n returns replies to send back to agents in the env.\n\n Returns:\n actions_to_send: nested dict of env id -> agent id -> agent replies.\n \"\"\"\n\n actions_to_send = defaultdict(dict)\n for env_id in active_envs:\n actions_to_send[env_id] = {} # at minimum send empty dict\n\n for policy_id, eval_data in to_eval.items():\n rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data])\n\n actions = eval_results[policy_id][0]\n rnn_out_cols = eval_results[policy_id][1]\n pi_info_cols = eval_results[policy_id][2]\n\n if len(rnn_in_cols) != len(rnn_out_cols):\n raise ValueError(\"Length of RNN in did not match RNN out, got: \"\n \"{} vs {}\".format(rnn_in_cols, rnn_out_cols))\n # Add RNN state info\n for f_i, column in enumerate(rnn_in_cols):\n pi_info_cols[\"state_in_{}\".format(f_i)] = column\n for f_i, column in enumerate(rnn_out_cols):\n pi_info_cols[\"state_out_{}\".format(f_i)] = column\n # Save output rows\n actions = _unbatch_tuple_actions(actions)\n policy = _get_or_raise(policies, policy_id)\n for i, action in enumerate(actions):\n env_id = eval_data[i].env_id\n agent_id = eval_data[i].agent_id\n if clip_actions:\n actions_to_send[env_id][agent_id] = clip_action(\n action, policy.action_space)\n else:\n actions_to_send[env_id][agent_id] = action\n episode = active_episodes[env_id]\n episode._set_rnn_state(agent_id, [c[i] for c in rnn_out_cols])\n episode._set_last_pi_info(\n agent_id, {k: v[i]\n for k, v in pi_info_cols.items()})\n if env_id in off_policy_actions and \\\n agent_id in off_policy_actions[env_id]:\n episode._set_last_action(agent_id,\n off_policy_actions[env_id][agent_id])\n else:\n episode._set_last_action(agent_id, action)\n\n return actions_to_send\n\n\ndef _fetch_atari_metrics(base_env):\n \"\"\"Atari games have multiple logical episodes, one per life.\n\n However for metrics reporting we count full episodes all lives included.\n \"\"\"\n unwrapped = base_env.get_unwrapped()\n if not unwrapped:\n return None\n atari_out = []\n for u in unwrapped:\n monitor = get_wrapper_by_cls(u, MonitorEnv)\n if not monitor:\n return None\n for eps_rew, eps_len in monitor.next_episode_results():\n atari_out.append(RolloutMetrics(eps_len, eps_rew))\n return atari_out\n\n\ndef _unbatch_tuple_actions(action_batch):\n # convert list of batches -> batch of lists\n if isinstance(action_batch, TupleActions):\n out = []\n for j in range(len(action_batch.batches[0])):\n out.append([\n action_batch.batches[i][j]\n for i in range(len(action_batch.batches))\n ])\n return out\n return action_batch\n\n\ndef _to_column_format(rnn_state_rows):\n num_cols = len(rnn_state_rows[0])\n return [[row[i] for row in rnn_state_rows] for i in range(num_cols)]\n\n\ndef _get_or_raise(mapping, policy_id):\n \"\"\"Returns a Policy object under key `policy_id` in `mapping`.\n\n Throws an error if `policy_id` cannot be found.\n\n Returns:\n Policy: The found Policy object.\n \"\"\"\n if policy_id not in mapping:\n raise ValueError(\n \"Could not find policy for agent: agent policy id `{}` not \"\n \"in policy map keys {}.\".format(policy_id, mapping.keys()))\n return mapping[policy_id]\n"
] | [
[
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zinechant/BERT-pytorch | [
"7c8bc555f29ff7ba336b38f2eddd072d7910e2bd"
] | [
"trainer/pretrain.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\n\nfrom ..model import BERTLM, BERT\nfrom .optim_schedule import ScheduledOptim\n\nimport tqdm\nimport numpy\n\ndef hook(arr, l, iNo):\n def trace(module, input, output):\n if iNo:\n cid = input[0].get_device()\n arr[l][cid].append(input[0].cpu().detach().numpy())\n print(input[0].shape)\n else:\n cid = output[0].get_device()\n arr[l][cid].append(output[0].cpu().detach().numpy())\n print(output[0].shape)\n return trace\n\nclass BERTTrainer:\n \"\"\"\n BERTTrainer make the pretrained BERT model with two LM training method.\n\n 1. Masked Language Model : 3.3.1 Task #1: Masked LM\n 2. Next Sentence prediction : 3.3.2 Task #2: Next Sentence Prediction\n\n please check the details on README.md with simple example.\n\n \"\"\"\n\n def __init__(self, bert: BERT, vocab_size: int,\n train_dataloader: DataLoader, test_dataloader: DataLoader = None,\n lr: float = 1e-4, betas=(0.9, 0.999), weight_decay: float = 0.01, warmup_steps=10000,\n with_cuda: bool = True, cuda_devices=None, log_freq: int = 10):\n \"\"\"\n :param bert: BERT model which you want to train\n :param vocab_size: total word vocab size\n :param train_dataloader: train dataset data loader\n :param test_dataloader: test dataset data loader [can be None]\n :param lr: learning rate of optimizer\n :param betas: Adam optimizer betas\n :param weight_decay: Adam optimizer weight decay param\n :param with_cuda: traning with cuda\n :param log_freq: logging frequency of the batch iteration\n \"\"\"\n\n # Setup cuda device for BERT training, argument -c, --cuda should be true\n cuda_condition = torch.cuda.is_available() and with_cuda\n self.device = torch.device(\"cuda:0\" if cuda_condition else \"cpu\")\n\n # This BERT model will be saved every epoch\n self.bert = bert\n # Initialize the BERT Language Model, with BERT model\n self.model = BERTLM(bert, vocab_size).to(self.device)\n\n # Distributed GPU training if CUDA can detect more than 1 GPU\n if with_cuda and torch.cuda.device_count() > 1:\n print(\"Using %d GPUS for BERT\" % torch.cuda.device_count())\n self.model = nn.DataParallel(self.model, device_ids=cuda_devices)\n\n # Setting the train and test data loader\n self.train_data = train_dataloader\n self.test_data = test_dataloader\n\n # Setting the Adam optimizer with hyper-param\n self.optim = Adam(self.model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)\n self.optim_schedule = ScheduledOptim(self.optim, self.bert.hidden, n_warmup_steps=warmup_steps)\n\n # Using Negative Log Likelihood Loss function for predicting the masked_token\n self.criterion = nn.NLLLoss(ignore_index=0)\n\n self.log_freq = log_freq\n\n print(\"Total Parameters:\", sum([p.nelement() for p in self.model.parameters()]))\n\n def train(self, epoch, output_path):\n self.iteration(epoch, self.train_data, output_path)\n\n def test(self, epoch):\n self.iteration(epoch, self.test_data, None, train=False)\n\n def iteration(self, epoch, data_loader, output_path, train=True):\n \"\"\"\n loop over the data_loader for training or testing\n if on train status, backward operation is activated\n and also auto save the model every peoch\n\n :param epoch: current epoch index\n :param data_loader: torch.utils.data.DataLoader for iteration\n :param train: boolean value of is train or test\n :return: None\n \"\"\"\n str_code = \"train\" if train else \"test\"\n\n # Setting the tqdm progress bar\n data_iter = tqdm.tqdm(enumerate(data_loader),\n desc=\"EP_%s:%d\" % (str_code, epoch),\n total=len(data_loader),\n bar_format=\"{l_bar}{r_bar}\")\n\n avg_loss = 0.0\n total_correct = 0\n total_element = 0\n\n if output_path:\n handles = []\n ls = range(len(self.bert.transformer_blocks))\n cs = range(torch.cuda.device_count())\n arrs = [[[[] for c in cs] for l in ls],\n [[[] for c in cs] for l in ls]]\n for l, layer in enumerate(self.bert.transformer_blocks):\n handles.append(layer.register_forward_hook(hook(arrs[0], l, True)))\n handles.append(layer.register_full_backward_hook(hook(arrs[1], l, True)))\n # handles.append(layer.register_forward_hook(hook(arrs[0], False)))\n\n for i, data in data_iter:\n if output_path and (i == 10):\n for handle in handles:\n handle.remove()\n arr = numpy.array(arrs)\n print(\"[TRACE]: \" + str(arr.shape))\n with open(output_path + (\"_ep%d.trace\" % epoch), \"wb\") as no:\n numpy.save(no, arr)\n\n # 0. batch_data will be sent into the device(GPU or cpu)\n data = {key: value.to(self.device) for key, value in data.items()}\n\n # 1. forward the next_sentence_prediction and masked_lm model\n next_sent_output, mask_lm_output = self.model.forward(data[\"bert_input\"], data[\"segment_label\"])\n\n # 2-1. NLL(negative log likelihood) loss of is_next classification result\n next_loss = self.criterion(next_sent_output, data[\"is_next\"])\n\n # 2-2. NLLLoss of predicting masked token word\n mask_loss = self.criterion(mask_lm_output.transpose(1, 2), data[\"bert_label\"])\n\n # 2-3. Adding next_loss and mask_loss : 3.4 Pre-training Procedure\n loss = next_loss + mask_loss\n\n # 3. backward and optimization only in train\n if train:\n self.optim_schedule.zero_grad()\n loss.backward()\n self.optim_schedule.step_and_update_lr()\n\n # next sentence prediction accuracy\n correct = next_sent_output.argmax(dim=-1).eq(data[\"is_next\"]).sum().item()\n avg_loss += loss.item()\n total_correct += correct\n total_element += data[\"is_next\"].nelement()\n\n post_fix = {\n \"epoch\": epoch,\n \"iter\": i,\n \"avg_loss\": avg_loss / (i + 1),\n \"avg_acc\": total_correct / total_element * 100,\n \"loss\": loss.item()\n }\n\n if i % self.log_freq == 0:\n data_iter.write(str(post_fix))\n\n print(\"EP%d_%s, avg_loss=\" % (epoch, str_code), avg_loss / len(data_iter), \"total_acc=\",\n total_correct * 100.0 / total_element)\n\n def save(self, epoch, file_path=\"output/bert_trained.model\"):\n \"\"\"\n Saving the current BERT model on file_path\n\n :param epoch: current epoch number\n :param file_path: model output path which gonna be file_path+\"ep%d\" % epoch\n :return: final_output_path\n \"\"\"\n output_path = file_path + \".ep%d\" % epoch\n torch.save(self.bert.state_dict(), output_path)\n print(\"EP:%d Model Saved on:\" % epoch, output_path)\n return output_path\n"
] | [
[
"torch.nn.NLLLoss",
"numpy.save",
"torch.nn.DataParallel",
"torch.cuda.is_available",
"torch.device",
"torch.cuda.device_count",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zxsted/PythonRobotics | [
"ed73b26db7eca9712dca696a9054f6ea2fcf26e9"
] | [
"PathPlanning/PotentialFieldPlanning/potential_field_planning.py"
] | [
"\"\"\"\n\nPotential Field based path planner\n\nauthor: Atsushi Sakai (@Atsushi_twi)\n\nRef:\nhttps://www.cs.cmu.edu/~motionplanning/lecture/Chap4-Potential-Field_howie.pdf\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Parameters\nKP = 5.0 # attractive potential gain\nETA = 100.0 # repulsive potential gain\nAREA_WIDTH = 30.0 # potential area width [m]\n\nshow_animation = True\n\n\ndef calc_potential_field(gx, gy, ox, oy, reso, rr):\n minx = min(ox) - AREA_WIDTH / 2.0\n miny = min(oy) - AREA_WIDTH / 2.0\n maxx = max(ox) + AREA_WIDTH / 2.0\n maxy = max(oy) + AREA_WIDTH / 2.0\n xw = round((maxx - minx) / reso)\n yw = round((maxy - miny) / reso)\n\n # calc each potential\n pmap = [[0.0 for i in range(yw)] for i in range(xw)]\n\n for ix in range(xw):\n x = ix * reso + minx\n\n for iy in range(yw):\n y = iy * reso + miny\n ug = calc_attractive_potential(x, y, gx, gy)\n uo = calc_repulsive_potential(x, y, ox, oy, rr)\n uf = ug + uo\n pmap[ix][iy] = uf\n\n return pmap, minx, miny\n\n\ndef calc_attractive_potential(x, y, gx, gy):\n return 0.5 * KP * np.hypot(x - gx, y - gy)\n\n\ndef calc_repulsive_potential(x, y, ox, oy, rr):\n # search nearest obstacle\n minid = -1\n dmin = float(\"inf\")\n for i in range(len(ox)):\n d = np.hypot(x - ox[i], y - oy[i])\n if dmin >= d:\n dmin = d\n minid = i\n\n # calc repulsive potential\n dq = np.hypot(x - ox[minid], y - oy[minid])\n\n if dq <= rr:\n if dq <= 0.1:\n dq = 0.1\n\n return 0.5 * ETA * (1.0 / dq - 1.0 / rr) ** 2\n else:\n return 0.0\n\n\ndef get_motion_model():\n # dx, dy\n motion = [[1, 0],\n [0, 1],\n [-1, 0],\n [0, -1],\n [-1, -1],\n [-1, 1],\n [1, -1],\n [1, 1]]\n\n return motion\n\n\ndef potential_field_planning(sx, sy, gx, gy, ox, oy, reso, rr):\n\n # calc potential field\n pmap, minx, miny = calc_potential_field(gx, gy, ox, oy, reso, rr)\n\n # search path\n d = np.hypot(sx - gx, sy - gy)\n ix = round((sx - minx) / reso)\n iy = round((sy - miny) / reso)\n gix = round((gx - minx) / reso)\n giy = round((gy - miny) / reso)\n\n if show_animation:\n draw_heatmap(pmap)\n plt.plot(ix, iy, \"*k\")\n plt.plot(gix, giy, \"*m\")\n\n rx, ry = [sx], [sy]\n motion = get_motion_model()\n while d >= reso:\n minp = float(\"inf\")\n minix, miniy = -1, -1\n for i in range(len(motion)):\n inx = ix + motion[i][0]\n iny = iy + motion[i][1]\n if inx >= len(pmap) or iny >= len(pmap[0]):\n p = float(\"inf\") # outside area\n else:\n p = pmap[inx][iny]\n if minp > p:\n minp = p\n minix = inx\n miniy = iny\n ix = minix\n iy = miniy\n xp = ix * reso + minx\n yp = iy * reso + miny\n d = np.hypot(gx - xp, gy - yp)\n rx.append(xp)\n ry.append(yp)\n\n if show_animation:\n plt.plot(ix, iy, \".r\")\n plt.pause(0.01)\n\n print(\"Goal!!\")\n\n return rx, ry\n\n\ndef draw_heatmap(data):\n data = np.array(data).T\n plt.pcolor(data, vmax=100.0, cmap=plt.cm.Blues)\n\n\ndef main():\n print(\"potential_field_planning start\")\n\n sx = 0.0 # start x position [m]\n sy = 10.0 # start y positon [m]\n gx = 30.0 # goal x position [m]\n gy = 30.0 # goal y position [m]\n grid_size = 0.5 # potential grid size [m]\n robot_radius = 5.0 # robot radius [m]\n\n ox = [15.0, 5.0, 20.0, 25.0] # obstacle x position list [m]\n oy = [25.0, 15.0, 26.0, 25.0] # obstacle y position list [m]\n\n if show_animation:\n plt.grid(True)\n plt.axis(\"equal\")\n\n # path generation\n rx, ry = potential_field_planning(\n sx, sy, gx, gy, ox, oy, grid_size, robot_radius)\n\n if show_animation:\n plt.show()\n\n\nif __name__ == '__main__':\n print(__file__ + \" start!!\")\n main()\n print(__file__ + \" Done!!\")\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.pcolor",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"numpy.array",
"matplotlib.pyplot.pause",
"numpy.hypot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Westlanderz/AI-Plat1 | [
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8",
"1187c22819e5135e8e8189c99b86a93a0d66b8d8"
] | [
"venv/Lib/site-packages/caffe2/quantization/server/dequantize_dnnlowp_op_test.py",
"venv/Lib/site-packages/caffe2/python/operator_test/prepend_dim_test.py",
"venv/Lib/site-packages/caffe2/quantization/server/gather_dnnlowp_op_test.py",
"venv/Lib/site-packages/torch/fx/node.py",
"venv/Lib/site-packages/caffe2/contrib/playground/resnetdemo/IN1k_resnet.py",
"venv/Lib/site-packages/caffe2/python/models/seq2seq/train.py",
"venv/Lib/site-packages/caffe2/quantization/server/int8_gen_quant_params_min_max_test.py",
"venv/Lib/site-packages/h5py/tests/test_dimension_scales.py",
"venv/Lib/site-packages/caffe2/contrib/nccl/nccl_ops_test.py",
"venv/Lib/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py",
"venv/Lib/site-packages/torch/distributions/gumbel.py",
"venv/Lib/site-packages/torch/distributed/pipeline/sync/microbatch.py",
"venv/Lib/site-packages/numpy/distutils/exec_command.py",
"venv/Lib/site-packages/caffe2/python/operator_test/rmac_regions_op_test.py",
"venv/Lib/site-packages/numpy/core/tests/test_datetime.py",
"venv/Lib/site-packages/torch/distributions/geometric.py",
"venv/Lib/site-packages/torch/ao/quantization/quantization_mappings.py",
"venv/Lib/site-packages/numpy/lib/polynomial.py",
"venv/Lib/site-packages/caffe2/python/operator_test/sparse_itemwise_dropout_with_replacement_op_test.py",
"venv/Lib/site-packages/torch/fx/passes/param_fetch.py",
"venv/Lib/site-packages/caffe2/python/ideep/transpose_op_test.py",
"venv/Lib/site-packages/caffe2/python/operator_test/find_op_test.py",
"venv/Lib/site-packages/caffe2/python/operator_test/unsafe_coalesce_test.py",
"venv/Lib/site-packages/caffe2/python/operator_test/ctc_beam_search_decoder_op_test.py"
] | [
"\r\n\r\nimport collections\r\n\r\nimport caffe2.python.hypothesis_test_util as hu\r\nimport hypothesis.strategies as st\r\nimport numpy as np\r\nfrom caffe2.python import core, dyndep, workspace\r\nfrom caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close\r\nfrom hypothesis import given\r\n\r\n\r\ndyndep.InitOpsLibrary(\"//caffe2/caffe2/quantization/server:dnnlowp_ops\")\r\nworkspace.GlobalInit([\"caffe2\", \"--caffe2_omp_num_threads=11\"])\r\n\r\n\r\nclass DNNLowPDequantizeOpTest(hu.HypothesisTestCase):\r\n @given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)\r\n def test_dnnlowp_dequantize(self, size, is_empty, gc, dc):\r\n if is_empty:\r\n size = 0\r\n min_ = -10.0\r\n max_ = 20.0\r\n X = (np.random.rand(size) * (max_ - min_) + min_).astype(np.float32)\r\n\r\n Output = collections.namedtuple(\"Output\", [\"Y\", \"op_type\", \"engine\"])\r\n outputs = []\r\n\r\n op_type_list = [\"Dequantize\", \"Int8Dequantize\"]\r\n engine = \"DNNLOWP\"\r\n\r\n outputs.append(Output(X, op_type=\"\", engine=\"\"))\r\n\r\n for op_type in op_type_list:\r\n net = core.Net(\"test_net\")\r\n\r\n quantize = core.CreateOperator(\r\n \"Quantize\", [\"X\"], [\"X_q\"], engine=engine, device_option=gc\r\n )\r\n net.Proto().op.extend([quantize])\r\n\r\n dequantize = core.CreateOperator(\r\n op_type, [\"X_q\"], [\"Y\"], engine=engine, device_option=gc\r\n )\r\n net.Proto().op.extend([dequantize])\r\n\r\n self.ws.create_blob(\"X\").feed(X, device_option=gc)\r\n self.ws.run(net)\r\n outputs.append(\r\n Output(Y=self.ws.blobs[\"Y\"].fetch(), op_type=op_type, engine=engine)\r\n )\r\n\r\n check_quantized_results_close(outputs)\r\n",
"\r\n\r\n\r\n\r\nimport numpy as np\r\n\r\nfrom caffe2.python import core, workspace\r\nfrom caffe2.python.test_util import TestCase\r\nfrom caffe2.proto import caffe2_pb2\r\n\r\n\r\nclass TestPrependDim(TestCase):\r\n def _test_fwd_bwd(self):\r\n old_shape = (128, 2, 4)\r\n new_shape = (8, 16, 2, 4)\r\n X = np.random.rand(*old_shape).astype(np.float32)\r\n Y = np.random.rand(*new_shape).astype(np.float32)\r\n\r\n net = core.Net('net')\r\n\r\n net.GivenTensorFill([], 'X', shape=old_shape, values=X.flatten())\r\n net.GivenTensorFill([], 'Y', shape=new_shape, values=Y.flatten())\r\n\r\n net.PrependDim(['X'], ['X_out'], dim_size=8)\r\n net.DotProduct(['X_out', 'Y'], 'Z')\r\n net.AddGradientOperators(['Z'])\r\n\r\n workspace.RunNetOnce(net)\r\n\r\n X_out = workspace.FetchBlob('X_out')\r\n X_grad = workspace.FetchBlob('X_grad')\r\n Y_grad = workspace.FetchBlob('Y_grad')\r\n\r\n # Check the shape of the gradient\r\n np.testing.assert_array_equal(X_out.shape, Y.shape)\r\n np.testing.assert_array_equal(X_grad.shape, X.shape)\r\n np.testing.assert_array_equal(Y_grad.shape, Y.shape)\r\n\r\n def test_prepend_dim(self):\r\n devices = [core.DeviceOption(caffe2_pb2.CPU, 0)]\r\n if workspace.NumGpuDevices() > 0:\r\n devices.append(core.DeviceOption(workspace.GpuDeviceType, 0))\r\n\r\n for device_opt in devices:\r\n with core.DeviceScope(device_opt):\r\n self._test_fwd_bwd()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import unittest\r\n unittest.main()\r\n",
"\r\n\r\nimport collections\r\n\r\nimport caffe2.python.hypothesis_test_util as hu\r\nimport hypothesis.strategies as st\r\nimport numpy as np\r\nfrom caffe2.python import core, dyndep, workspace\r\nfrom caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close\r\nfrom hypothesis import given\r\n\r\n\r\ndyndep.InitOpsLibrary(\"//caffe2/caffe2/quantization/server:dnnlowp_ops\")\r\nworkspace.GlobalInit([\"caffe2\", \"--caffe2_omp_num_threads=11\"])\r\n\r\n\r\nclass DNNLowPGatherOpTest(hu.HypothesisTestCase):\r\n @given(\r\n dim1=st.integers(256, 512),\r\n dim2=st.integers(32, 256),\r\n is_empty=st.booleans(),\r\n in_quantized=st.booleans(),\r\n out_quantized=st.booleans(),\r\n **hu.gcs_cpu_only\r\n )\r\n def test_dnnlowp_gather(self, dim1, dim2, is_empty, in_quantized, out_quantized, gc, dc):\r\n if is_empty:\r\n dim2 = 0\r\n # FIXME : DNNLOWP Gather doesn't support quantized input and\r\n # dequantized output\r\n if in_quantized:\r\n out_quantized = True\r\n\r\n data = (np.random.rand(dim1) * 2 - 1).astype(np.float32)\r\n index = np.floor(np.random.rand(dim2) * dim1).astype(np.int32)\r\n\r\n Output = collections.namedtuple(\"Output\", [\"out\", \"op_type\", \"engine\"])\r\n outputs = []\r\n\r\n op_engine_list = [\r\n (\"Gather\", \"\"),\r\n (\"Gather\", \"DNNLOWP\"),\r\n (\"Int8Gather\", \"DNNLOWP\"),\r\n ]\r\n\r\n for op_type, engine in op_engine_list:\r\n net = core.Net(\"test_net\")\r\n\r\n do_quantize = \"DNNLOWP\" in engine and in_quantized\r\n do_dequantize = \"DNNLOWP\" in engine and out_quantized\r\n\r\n if do_quantize:\r\n quantize_data = core.CreateOperator(\r\n \"Quantize\", [\"data\"], [\"data_q\"], engine=engine, device_option=gc\r\n )\r\n net.Proto().op.extend([quantize_data])\r\n\r\n gather = core.CreateOperator(\r\n op_type,\r\n [\"data_q\" if do_quantize else \"data\", \"index\"],\r\n [\"out_q\" if do_dequantize else \"out\"],\r\n dequantize_output=not do_dequantize,\r\n engine=engine,\r\n device_option=gc,\r\n )\r\n net.Proto().op.extend([gather])\r\n\r\n if do_dequantize:\r\n dequantize = core.CreateOperator(\r\n \"Dequantize\", [\"out_q\"], [\"out\"], engine=engine, device_option=gc\r\n )\r\n net.Proto().op.extend([dequantize])\r\n\r\n self.ws.create_blob(\"data\").feed(data, device_option=gc)\r\n self.ws.create_blob(\"index\").feed(index, device_option=gc)\r\n self.ws.run(net)\r\n outputs.append(\r\n Output(out=self.ws.blobs[\"out\"].fetch(), op_type=op_type, engine=engine)\r\n )\r\n\r\n check_quantized_results_close(outputs, ref=data)\r\n",
"# Nodes represent a definition of a value in our graph of operators.\r\nfrom typing import TYPE_CHECKING, Union, Callable, Any, Tuple, List, Optional, Dict, Set\r\nfrom ._compatibility import compatibility\r\nfrom .immutable_collections import immutable_dict, immutable_list\r\nimport torch\r\nimport builtins\r\nimport types\r\nfrom torch.fx.operator_schemas import normalize_function, normalize_module, ArgsKwargsPair\r\n\r\nif TYPE_CHECKING:\r\n from .graph import Graph\r\n\r\nBaseArgumentTypes = Union[str, int, float, bool, torch.dtype, torch.Tensor, torch.device, torch.memory_format]\r\nbase_types = BaseArgumentTypes.__args__ # type: ignore[attr-defined]\r\n\r\nTarget = Union[Callable[..., Any], str]\r\n\r\nArgument = Optional[Union[\r\n Tuple[Any, ...], # actually Argument, but mypy can't represent recursive types\r\n List[Any], # actually Argument\r\n Dict[str, Any], # actually Argument\r\n slice, # Slice[Argument, Argument, Argument], but slice is not a templated type in typing\r\n 'Node',\r\n BaseArgumentTypes\r\n]]\r\n\r\n_side_effectful_functions: Set[Callable] = {\r\n torch._assert, torch.ops.profiler._record_function_enter,\r\n torch.ops.profiler._record_function_exit}\r\n\r\n# this is fixed on master, WAR for 1.5\r\ndef _find_module_of_method(orig_method: Callable[..., Any]) -> str:\r\n name = orig_method.__name__\r\n module = orig_method.__module__\r\n if module is not None:\r\n return module\r\n for guess in [torch, torch.nn.functional]:\r\n if getattr(guess, name, None) is orig_method:\r\n return guess.__name__\r\n raise RuntimeError(f'cannot find module for {orig_method}')\r\n\r\n# Borrowed from CPython typing module\r\n# https://github.com/python/cpython/blob/f90dc36c15d7fee0efaf6d39e97be0bdf2683e93/Lib/typing.py#L156\r\ndef _type_repr(obj):\r\n \"\"\"Return the repr() of an object, special-casing types (internal helper).\r\n If obj is a type, we return a shorter version than the default\r\n type.__repr__, based on the module and qualified name, which is\r\n typically enough to uniquely identify a type. For everything\r\n else, we fall back on repr(obj).\r\n \"\"\"\r\n # HACK: In Python 3.6, type aliases from ``typing`` are instances of ``type``, but in\r\n # later Python versions, type aliases are not instances of ``type``!! We want\r\n # all type aliases to fall through to ``repr``, so if we have a type that is\r\n # in the module typing, don't go down this path.\r\n if isinstance(obj, type) and obj.__module__ != 'typing':\r\n if obj.__module__ == 'builtins':\r\n return obj.__qualname__\r\n return f'{obj.__module__}.{obj.__qualname__}'\r\n if obj is ...:\r\n return('...')\r\n if isinstance(obj, types.FunctionType):\r\n return obj.__name__\r\n return repr(obj)\r\n\r\ndef _get_qualified_name(func: Callable[..., Any]) -> str:\r\n # things like getattr just appear in builtins\r\n if getattr(builtins, func.__name__, None) is func:\r\n return func.__name__\r\n name = func.__name__\r\n module = _find_module_of_method(func)\r\n module = module.replace('torch._ops', 'torch.ops') # WAR for bug in how torch.ops assigns module\r\n return f'{module}.{name}'\r\n\r\ndef _format_arg(arg) -> str:\r\n if isinstance(arg, list):\r\n items = ', '.join(_format_arg(a) for a in arg)\r\n return f'[{items}]'\r\n elif isinstance(arg, tuple):\r\n items = ', '.join(_format_arg(a) for a in arg)\r\n maybe_comma = ',' if len(arg) == 1 else ''\r\n return f'({items}{maybe_comma})'\r\n elif isinstance(arg, dict):\r\n items_str = ', '.join(f'{k}: {_format_arg(v)}' for k, v in arg.items())\r\n return f'{{{items_str}}}'\r\n\r\n if isinstance(arg, Node):\r\n return '%' + str(arg)\r\n else:\r\n return str(arg)\r\n\r\n@compatibility(is_backward_compatible=True)\r\nclass Node:\r\n \"\"\"\r\n ``Node`` is the data structure that represents individual operations within\r\n a ``Graph``. For the most part, Nodes represent callsites to various entities,\r\n such as operators, methods, and Modules (some exceptions include nodes that\r\n specify function inputs and outputs). Each ``Node`` has a function specified\r\n by its ``op`` property. The ``Node`` semantics for each value of ``op`` are as follows:\r\n\r\n - ``placeholder`` represents a function input. The ``name`` attribute specifies the name this value will take on.\r\n ``target`` is similarly the name of the argument. ``args`` holds either: 1) nothing, or 2) a single argument\r\n denoting the default parameter of the function input. ``kwargs`` is don't-care. Placeholders correspond to\r\n the function parameters (e.g. ``x``) in the graph printout.\r\n - ``get_attr`` retrieves a parameter from the module hierarchy. ``name`` is similarly the name the result of the\r\n fetch is assigned to. ``target`` is the fully-qualified name of the parameter's position in the module hierarchy.\r\n ``args`` and ``kwargs`` are don't-care\r\n - ``call_function`` applies a free function to some values. ``name`` is similarly the name of the value to assign\r\n to. ``target`` is the function to be applied. ``args`` and ``kwargs`` represent the arguments to the function,\r\n following the Python calling convention\r\n - ``call_module`` applies a module in the module hierarchy's ``forward()`` method to given arguments. ``name`` is\r\n as previous. ``target`` is the fully-qualified name of the module in the module hierarchy to call.\r\n ``args`` and ``kwargs`` represent the arguments to invoke the module on, *including the self argument*.\r\n - ``call_method`` calls a method on a value. ``name`` is as similar. ``target`` is the string name of the method\r\n to apply to the ``self`` argument. ``args`` and ``kwargs`` represent the arguments to invoke the module on,\r\n *including the self argument*\r\n - ``output`` contains the output of the traced function in its ``args[0]`` attribute. This corresponds to the \"return\" statement\r\n in the Graph printout.\r\n \"\"\"\r\n\r\n @compatibility(is_backward_compatible=True)\r\n def __init__(self, graph: 'Graph', name: str, op: str, target: 'Target',\r\n args: Tuple['Argument', ...], kwargs: Dict[str, 'Argument'],\r\n return_type : Optional[Any] = None) -> None:\r\n \"\"\"\r\n Instantiate an instance of ``Node``. Note: most often, you want to use the\r\n Graph APIs, i.e. ``Graph.call_module``, ``Graph.call_method``, etc. rather\r\n than instantiating a ``Node`` directly.\r\n\r\n Args:\r\n graph (Graph): The ``Graph`` to which this ``Node`` should belong.\r\n\r\n name (str): The name to which the output of this ``Node`` should be assigned\r\n\r\n op (str): The opcode for this ``Node``. Can be one of 'placeholder',\r\n 'call_method', 'call_module', 'call_function', 'get_attr',\r\n 'output'\r\n\r\n target ('Target'): The target this op should call. See the broader\r\n ``Node`` docstring for more details.\r\n\r\n args (Tuple['Argument']): The args to be passed to ``target``\r\n\r\n kwargs (Dict[str, 'Argument']): The kwargs to be passed to ``target``\r\n\r\n return_type (Optional[Any]): The python type expression representing the\r\n type of the output of this node. This field can be used for\r\n annotation of values in the generated code or for other types\r\n of analyses.\r\n \"\"\"\r\n self.graph = graph\r\n self.name = name # unique name of value being created\r\n assert op in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output', 'root']\r\n self.op = op # the kind of operation = placeholder|call_method|call_module|call_function|get_attr\r\n if op == 'call_function':\r\n if not callable(target):\r\n raise ValueError(f'Node [graph = {graph}, name = \\'{name}\\'] target {target} has type {torch.typename(target)} '\r\n 'but a Callable is expected')\r\n else:\r\n if not isinstance(target, str):\r\n raise ValueError(f'Node [graph = {graph}, name = \\'{name}\\'] target {target} has type {torch.typename(target)} '\r\n 'but a str is expected')\r\n self.target = target # for method/module/function, the name of the method/module/function/attr\r\n # being invoked, e.g add, layer1, or torch.add\r\n\r\n # All `Node`-valued inputs. Key is the Node, value is don't-care.\r\n # The public API for this is `all_input_nodes`, this private attribute\r\n # should not be accessed directly.\r\n self._input_nodes : Dict[Node, None] = {}\r\n self.__update_args_kwargs(map_arg(args, lambda x: x), map_arg(kwargs, lambda x: x)) # type: ignore[arg-type]\r\n\r\n # All of the nodes that use the value produced by this Node\r\n # Note one user may correspond to several uses, e.g. the node fo ``x + x``\r\n # would appear once here, but represents two uses.\r\n #\r\n # Is a dict to act as an \"ordered set\". Keys are significant, value dont-care\r\n self.users : Dict['Node', None] = {}\r\n # Type expression representing the output value of this node.\r\n # This should contain the same class of Type objects that would appear\r\n # as type annotations for function inputs/outputs.\r\n #\r\n # For placeholder nodes, this value will be used to type-annotate the\r\n # generated function parameters.\r\n # For the return node, this value will be used to type-annotate the\r\n # generated function return type. (Note this is a special case. ``return``\r\n # does not produce a value, it's more of a notation. Thus, this value\r\n # describes the type of args[0] in the ``return`` node.\r\n self.type : Optional[Any] = return_type\r\n self._prev = self\r\n self._next = self\r\n self._erased = False\r\n\r\n # If set, use this fn to print this node\r\n self._repr_fn : Optional[Callable[[Node], str]] = None\r\n self._stack_trace : Optional[str] = None\r\n\r\n # Dictionary to store metadata passes need to do their\r\n # transformations. This metadata is preserved across node copies\r\n self.meta : Dict[str, Any] = {}\r\n\r\n @property\r\n def next(self) -> 'Node':\r\n \"\"\"\r\n Returns the next ``Node`` in the linked list of Nodes.\r\n\r\n Returns:\r\n\r\n The next ``Node`` in the linked list of Nodes.\r\n \"\"\"\r\n return self._next\r\n\r\n @property\r\n def prev(self) -> 'Node':\r\n \"\"\"\r\n Returns the previous ``Node`` in the linked list of Nodes.\r\n\r\n Returns:\r\n\r\n The previous ``Node`` in the linked list of Nodes.\r\n \"\"\"\r\n return self._prev\r\n\r\n @compatibility(is_backward_compatible=True)\r\n def prepend(self, x: 'Node') -> None:\r\n \"\"\"\r\n Insert x before this node in the list of nodes in the graph. Example::\r\n\r\n Before: p -> self\r\n bx -> x -> ax\r\n After: p -> x -> self\r\n bx -> ax\r\n\r\n Args:\r\n x (Node): The node to put before this node. Must be a member of the same graph.\r\n \"\"\"\r\n assert self.graph == x.graph, \"Attempting to move a Node into a different Graph\"\r\n x._remove_from_list()\r\n p = self._prev\r\n p._next, x._prev = x, p\r\n x._next, self._prev = self, x\r\n\r\n @compatibility(is_backward_compatible=True)\r\n def append(self, x: 'Node') -> None:\r\n \"\"\"\r\n Insert x after this node in the list of nodes in the graph.\r\n Equvalent to ``self.next.prepend(x)``\r\n\r\n Args:\r\n x (Node): The node to put after this node. Must be a member of the same graph.\r\n \"\"\"\r\n self._next.prepend(x)\r\n\r\n def _remove_from_list(self):\r\n p, n = self._prev, self._next\r\n p._next, n._prev = n, p\r\n\r\n @property\r\n def args(self) -> Tuple[Argument, ...]:\r\n \"\"\"\r\n The tuple of arguments to this ``Node``. The interpretation of arguments\r\n depends on the node's opcode. See the :class:`Node` docstring for more\r\n information.\r\n\r\n Assignment to this property is allowed. All accounting of uses and users\r\n is updated automatically on assignment.\r\n \"\"\"\r\n return self._args\r\n\r\n @args.setter\r\n def args(self, a : Tuple[Argument, ...]):\r\n \"\"\"\r\n Set the tuple of arguments to this Node. The interpretation of arguments\r\n depends on the node's opcode. See the ``fx.Graph`` docstring for more\r\n information.\r\n \"\"\"\r\n # DO NOT CALL `__update_args_kwargs` directly. The correct way to\r\n # set `args` is via direct assignment, i.e. `node.args = new_args`\r\n self.__update_args_kwargs(map_arg(a, lambda x: x), self._kwargs) # type: ignore[arg-type]\r\n\r\n @property\r\n def kwargs(self) -> Dict[str, Argument]:\r\n \"\"\"\r\n The dict of keyword arguments to this ``Node``. The interpretation of arguments\r\n depends on the node's opcode. See the :class:`Node` docstring for more\r\n information.\r\n\r\n Assignment to this property is allowed. All accounting of uses and users\r\n is updated automatically on assignment.\r\n \"\"\"\r\n return self._kwargs\r\n\r\n @kwargs.setter\r\n def kwargs(self, k : Dict[str, Argument]):\r\n \"\"\"\r\n Set the dict of kwargs to this Node. The interpretation of arguments\r\n depends on the node's opcode. See the ``fx.Graph`` docstring for more\r\n information.\r\n \"\"\"\r\n # DO NOT CALL `__update_args_kwargs` directly. The correct way to\r\n # set `args` is via direct assignment, i.e. `node.kwargs = new_kwargs`\r\n self.__update_args_kwargs(self._args, map_arg(k, lambda x: x)) # type: ignore[arg-type]\r\n\r\n @property\r\n def all_input_nodes(self) -> List['Node']:\r\n \"\"\"\r\n Return all Nodes that are inputs to this Node. This is equivalent to\r\n iterating over ``args`` and ``kwargs`` and only collecting the values that\r\n are Nodes.\r\n\r\n Returns:\r\n\r\n List of ``Nodes`` that appear in the ``args`` and ``kwargs`` of this\r\n ``Node``, in that order.\r\n \"\"\"\r\n return list(self._input_nodes.keys())\r\n\r\n @compatibility(is_backward_compatible=True)\r\n def update_arg(self, idx : int, arg : Argument) -> None:\r\n \"\"\"\r\n Update an existing positional argument to contain the new value\r\n ``arg``. After calling, ``self.args[idx] == arg``.\r\n\r\n Args:\r\n\r\n idx (int): The index into ``self.args`` of the element to update\r\n arg (Argument): The new argument value to write into ``args``\r\n \"\"\"\r\n args = list(self.args)\r\n args[idx] = arg\r\n self.args = tuple(args)\r\n\r\n @compatibility(is_backward_compatible=True)\r\n def update_kwarg(self, key : str, arg : Argument) -> None:\r\n \"\"\"\r\n Update an existing keyword argument to contain the new value\r\n ``arg``. After calling, ``self.kwargs[key] == arg``.\r\n\r\n Args:\r\n\r\n key (str): The key in ``self.kwargs`` of the element to update\r\n arg (Argument): The new argument value to write into ``kwargs``\r\n \"\"\"\r\n kwargs = dict(self.kwargs)\r\n kwargs[key] = arg\r\n self.kwargs = kwargs\r\n\r\n @property\r\n def stack_trace(self) -> Optional[str]:\r\n \"\"\"\r\n Return the Python stack trace that was recorded during tracing, if any.\r\n This property is usually populated by `Tracer.create_proxy`. To record\r\n stack traces during tracing for debug purposes, set\r\n `record_stack_traces = True` on the `Tracer` instance.\r\n \"\"\"\r\n return self._stack_trace\r\n\r\n @stack_trace.setter\r\n def stack_trace(self, trace : Optional[str]):\r\n self._stack_trace = trace\r\n\r\n def __update_args_kwargs(self, new_args : Tuple['Argument', ...], new_kwargs : Dict[str, 'Argument']):\r\n \"\"\"\r\n This API is internal. Do *not* call it directly.\r\n \"\"\"\r\n self._args = new_args\r\n self._kwargs = new_kwargs\r\n\r\n for old_use in self._input_nodes.keys():\r\n old_use.users.pop(self)\r\n\r\n self._input_nodes = {}\r\n map_arg(self._args, lambda n: self._input_nodes.setdefault(n))\r\n map_arg(self._kwargs, lambda n: self._input_nodes.setdefault(n))\r\n\r\n for new_use in self._input_nodes.keys():\r\n new_use.users.setdefault(self)\r\n\r\n def __repr__(self) -> str:\r\n if self._repr_fn:\r\n return self._repr_fn(self)\r\n return self.name\r\n\r\n def _pretty_print_target(self, target):\r\n \"\"\"\r\n Make target printouts more user-friendly.\r\n 1) builtins will be printed as `builtins.xyz`\r\n 2) operators will be printed as `operator.xyz`\r\n 3) other callables will be printed with qualfied name, e.g. torch.add\r\n \"\"\"\r\n if isinstance(target, str):\r\n return target\r\n if hasattr(target, '__module__'):\r\n if not hasattr(target, '__name__'):\r\n # Just to be defensive, if we don't have `__name__`, get the\r\n # qualname. Not sure if this happens for any members of `operator`\r\n # or `builtins`. This fallback path is not as good, since e.g.\r\n # things in `operator` have `_operator` as their __module__.\r\n return _get_qualified_name(target)\r\n if target.__module__ == 'builtins':\r\n return f'builtins.{target.__name__}'\r\n elif target.__module__ == '_operator':\r\n return f'operator.{target.__name__}'\r\n return _get_qualified_name(target)\r\n\r\n @compatibility(is_backward_compatible=True)\r\n def format_node(self,\r\n placeholder_names: List[str] = None,\r\n maybe_return_typename: List[str] = None) -> Optional[str]:\r\n \"\"\"\r\n Return a descriptive string representation of ``self``.\r\n\r\n This method can be used with no arguments as a debugging\r\n utility.\r\n\r\n This function is also used internally in the ``__str__`` method\r\n of ``Graph``. Together, the strings in ``placeholder_names``\r\n and ``maybe_return_typename`` make up the signature of the\r\n autogenerated ``forward`` function in this Graph's surrounding\r\n GraphModule. ``placeholder_names`` and ``maybe_return_typename``\r\n should not be used otherwise.\r\n\r\n Args:\r\n placeholder_names: A list that will store formatted strings\r\n representing the placeholders in the generated\r\n ``forward`` function. Internal use only.\r\n maybe_return_typename: A single-element list that will store\r\n a formatted string representing the output of the\r\n generated ``forward`` function. Internal use only.\r\n\r\n Returns:\r\n str: If 1) we're using ``format_node`` as an internal helper\r\n in the ``__str__`` method of ``Graph``, and 2) ``self``\r\n is a placeholder Node, return ``None``. Otherwise,\r\n return a descriptive string representation of the\r\n current Node.\r\n \"\"\"\r\n if self.op == 'placeholder':\r\n assert isinstance(self.target, str)\r\n arg_str = self.target\r\n arg_str += arg_str + f': {_type_repr(self.type)}' if self.type else ''\r\n if placeholder_names:\r\n placeholder_names.append(arg_str)\r\n return None\r\n maybe_typename = f'{_type_repr(self.type)} ' if self.type else ''\r\n default_val = '(default=' + str(self.args[0]) + ')' if self.args else ''\r\n return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = {self.op}[target={self.target}]{default_val}'\r\n elif self.op == 'get_attr':\r\n maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else ''\r\n return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = ' \\\r\n f'{self.op}[target={self._pretty_print_target(self.target)}]'\r\n elif self.op == 'output':\r\n if self.type and maybe_return_typename:\r\n maybe_return_typename[0] = f' -> {_type_repr(self.type)}'\r\n return f'return {self.args[0]}'\r\n else:\r\n maybe_typename = f'{_type_repr(self.type)} ' if self.type is not None else ''\r\n return f'%{self.name} : {maybe_typename}[#users={len(self.users)}] = ' \\\r\n f'{self.op}[target={self._pretty_print_target(self.target)}](' \\\r\n f'args = {_format_arg(self.args)}, kwargs = {_format_arg(self.kwargs)})'\r\n\r\n @compatibility(is_backward_compatible=True)\r\n def replace_all_uses_with(self, replace_with : 'Node') -> List['Node']:\r\n \"\"\"\r\n Replace all uses of ``self`` in the Graph with the Node ``replace_with``.\r\n\r\n Args:\r\n\r\n replace_with (Node): The node to replace all uses of ``self`` with.\r\n\r\n Returns:\r\n\r\n The list of Nodes on which this change was made.\r\n \"\"\"\r\n to_process = list(self.users)\r\n for use_node in to_process:\r\n def maybe_replace_node(n : Node) -> Node:\r\n if n == self:\r\n return replace_with\r\n else:\r\n return n\r\n\r\n new_args = map_arg(use_node.args, maybe_replace_node)\r\n new_kwargs = map_arg(use_node.kwargs, maybe_replace_node)\r\n assert isinstance(new_args, tuple)\r\n assert isinstance(new_kwargs, dict)\r\n use_node.__update_args_kwargs(new_args, new_kwargs)\r\n\r\n assert len(self.users) == 0\r\n return to_process\r\n\r\n @compatibility(is_backward_compatible=False)\r\n def is_impure(self):\r\n \"\"\"\r\n Returns whether this op is impure, i.e. if its op is a placeholder or\r\n output, or if a call_function or call_module which is impure.\r\n\r\n Returns:\r\n\r\n bool: If the op is impure or not.\r\n \"\"\"\r\n if self.op in {\"placeholder\", \"output\"}:\r\n return True\r\n\r\n # Check if an impure function.\r\n if self.op == \"call_function\":\r\n return self.target in _side_effectful_functions\r\n\r\n # Check if an impure module.\r\n if self.op == \"call_module\":\r\n assert (\r\n self.graph.owning_module is not None\r\n ), \"self.graph.owning_module not set for purity check\"\r\n target_mod = self.graph.owning_module.get_submodule(self.target)\r\n assert (\r\n target_mod is not None\r\n ), f\"Did not find expected submodule target {self.target}\"\r\n return getattr(target_mod, \"_is_impure\", False)\r\n\r\n return False\r\n\r\n @compatibility(is_backward_compatible=False)\r\n def normalized_arguments(\r\n self, root : torch.nn.Module, arg_types : Optional[Tuple[Any]] = None,\r\n kwarg_types : Optional[Dict[str, Any]] = None,\r\n normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:\r\n \"\"\"\r\n Returns normalized arguments to Python targets. This means that\r\n `args/kwargs` will be matched up to the module/functional's\r\n signature and return exclusively kwargs in positional order\r\n if `normalize_to_only_use_kwargs` is true.\r\n Also populates default values. Does not support positional-only\r\n parameters or varargs parameters.\r\n\r\n Supports module calls.\r\n\r\n May require `arg_types` and `kwarg_types` in order to disambiguate overloads.\r\n\r\n Args:\r\n root (torch.nn.Module): Module upon which to resolve module targets.\r\n arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args\r\n kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs\r\n normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.\r\n\r\n Returns:\r\n\r\n Returns NamedTuple ArgsKwargsPair, or `None` if not successful.\r\n \"\"\"\r\n if self.op == 'call_function':\r\n assert callable(self.target)\r\n return normalize_function(self.target, self.args, self.kwargs, arg_types, kwarg_types) # type: ignore[arg-type]\r\n elif self.op == 'call_module':\r\n assert isinstance(self.target, str)\r\n return normalize_module(root, self.target, self.args, self.kwargs) # type: ignore[arg-type]\r\n\r\n return None\r\n\r\n @compatibility(is_backward_compatible=True)\r\n def replace_input_with(self, old_input: 'Node', new_input: 'Node'):\r\n \"\"\"\r\n Loop through input nodes of ``self``, and replace all instances of\r\n ``old_input`` with ``new_input``.\r\n\r\n Args:\r\n\r\n old_input (Node): The old input node to be replaced.\r\n new_input (Node): The new input node to replace ``old_input``.\r\n \"\"\"\r\n def maybe_replace_node(n : Node) -> Node:\r\n return new_input if n == old_input else n\r\n\r\n new_args = map_arg(self.args, maybe_replace_node)\r\n new_kwargs = map_arg(self.kwargs, maybe_replace_node)\r\n assert isinstance(new_args, tuple)\r\n assert isinstance(new_kwargs, dict)\r\n self.__update_args_kwargs(new_args, new_kwargs)\r\n\r\n\r\n@compatibility(is_backward_compatible=True)\r\ndef map_arg(a: Argument, fn: Callable[[Node], Argument]) -> Argument:\r\n \"\"\"\r\n Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.\r\n \"\"\"\r\n assert callable(fn), \"torch.fx.map_arg(a, fn): fn must be a callable\"\r\n return map_aggregate(a, lambda x: fn(x) if isinstance(x, Node) else x)\r\n\r\n@compatibility(is_backward_compatible=True)\r\ndef map_aggregate(a: Argument, fn: Callable[[Argument], Argument]) -> Argument:\r\n \"\"\"\r\n Apply fn to each Node appearing arg. arg may be a list, tuple, slice, or dict with string keys.\r\n \"\"\"\r\n if isinstance(a, tuple):\r\n return tuple(map_aggregate(elem, fn) for elem in a)\r\n elif isinstance(a, list):\r\n return immutable_list(map_aggregate(elem, fn) for elem in a)\r\n elif isinstance(a, dict):\r\n return immutable_dict((k, map_aggregate(v, fn)) for k, v in a.items())\r\n elif isinstance(a, slice):\r\n return slice(map_aggregate(a.start, fn), map_aggregate(a.stop, fn), map_aggregate(a.step, fn))\r\n else:\r\n return fn(a)\r\n",
"\r\n\r\n\r\n\r\n\r\nimport numpy as np\r\n\r\nfrom caffe2.python import workspace, cnn, core\r\nfrom caffe2.python import timeout_guard\r\nfrom caffe2.proto import caffe2_pb2\r\n\r\n\r\ndef init_model(self):\r\n train_model = cnn.CNNModelHelper(\r\n order=\"NCHW\",\r\n name=\"resnet\",\r\n use_cudnn=True,\r\n cudnn_exhaustive_search=False\r\n )\r\n self.train_model = train_model\r\n\r\n test_model = cnn.CNNModelHelper(\r\n order=\"NCHW\",\r\n name=\"resnet_test\",\r\n use_cudnn=True,\r\n cudnn_exhaustive_search=False,\r\n init_params=False,\r\n )\r\n self.test_model = test_model\r\n\r\n self.log.info(\"Model creation completed\")\r\n\r\n\r\ndef fun_per_epoch_b4RunNet(self, epoch):\r\n pass\r\n\r\n\r\ndef fun_per_iter_b4RunNet(self, epoch, epoch_iter):\r\n\r\n learning_rate = 0.05\r\n for idx in range(self.opts['distributed']['first_xpu_id'],\r\n self.opts['distributed']['first_xpu_id'] +\r\n self.opts['distributed']['num_xpus']):\r\n caffe2_pb2_device = caffe2_pb2.CUDA if \\\r\n self.opts['distributed']['device'] == 'gpu' else \\\r\n caffe2_pb2.CPU\r\n with core.DeviceScope(core.DeviceOption(caffe2_pb2_device, idx)):\r\n workspace.FeedBlob(\r\n '{}_{}/lr'.format(self.opts['distributed']['device'], idx),\r\n np.array(learning_rate, dtype=np.float32)\r\n )\r\n\r\n\r\ndef run_training_net(self):\r\n timeout = 2000.0\r\n with timeout_guard.CompleteInTimeOrDie(timeout):\r\n workspace.RunNet(self.train_model.net.Proto().name)\r\n",
"## @package train\r\n# Module caffe2.python.models.seq2seq.train\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport collections\r\nimport logging\r\nimport math\r\nimport numpy as np\r\nimport random\r\nimport time\r\nimport sys\r\nimport os\r\n\r\nimport caffe2.proto.caffe2_pb2 as caffe2_pb2\r\nfrom caffe2.python import core, workspace, data_parallel_model\r\nimport caffe2.python.models.seq2seq.seq2seq_util as seq2seq_util\r\nfrom caffe2.python.models.seq2seq.seq2seq_model_helper import Seq2SeqModelHelper\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\nlogger.setLevel(logging.INFO)\r\nlogger.addHandler(logging.StreamHandler(sys.stderr))\r\n\r\nBatch = collections.namedtuple('Batch', [\r\n 'encoder_inputs',\r\n 'encoder_lengths',\r\n 'decoder_inputs',\r\n 'decoder_lengths',\r\n 'targets',\r\n 'target_weights',\r\n])\r\n\r\n\r\ndef prepare_batch(batch):\r\n encoder_lengths = [len(entry[0]) for entry in batch]\r\n max_encoder_length = max(encoder_lengths)\r\n decoder_lengths = []\r\n max_decoder_length = max([len(entry[1]) for entry in batch])\r\n\r\n batch_encoder_inputs = []\r\n batch_decoder_inputs = []\r\n batch_targets = []\r\n batch_target_weights = []\r\n\r\n for source_seq, target_seq in batch:\r\n encoder_pads = (\r\n [seq2seq_util.PAD_ID] * (max_encoder_length - len(source_seq))\r\n )\r\n batch_encoder_inputs.append(\r\n list(reversed(source_seq)) + encoder_pads\r\n )\r\n\r\n decoder_pads = (\r\n [seq2seq_util.PAD_ID] * (max_decoder_length - len(target_seq))\r\n )\r\n target_seq_with_go_token = [seq2seq_util.GO_ID] + target_seq\r\n decoder_lengths.append(len(target_seq_with_go_token))\r\n batch_decoder_inputs.append(target_seq_with_go_token + decoder_pads)\r\n\r\n target_seq_with_eos = target_seq + [seq2seq_util.EOS_ID]\r\n targets = target_seq_with_eos + decoder_pads\r\n batch_targets.append(targets)\r\n\r\n if len(source_seq) + len(target_seq) == 0:\r\n target_weights = [0] * len(targets)\r\n else:\r\n target_weights = [\r\n 1 if target != seq2seq_util.PAD_ID else 0\r\n for target in targets\r\n ]\r\n batch_target_weights.append(target_weights)\r\n\r\n return Batch(\r\n encoder_inputs=np.array(\r\n batch_encoder_inputs,\r\n dtype=np.int32,\r\n ).transpose(),\r\n encoder_lengths=np.array(encoder_lengths, dtype=np.int32),\r\n decoder_inputs=np.array(\r\n batch_decoder_inputs,\r\n dtype=np.int32,\r\n ).transpose(),\r\n decoder_lengths=np.array(decoder_lengths, dtype=np.int32),\r\n targets=np.array(\r\n batch_targets,\r\n dtype=np.int32,\r\n ).transpose(),\r\n target_weights=np.array(\r\n batch_target_weights,\r\n dtype=np.float32,\r\n ).transpose(),\r\n )\r\n\r\n\r\nclass Seq2SeqModelCaffe2(object):\r\n\r\n def _build_model(\r\n self,\r\n init_params,\r\n ):\r\n model = Seq2SeqModelHelper(init_params=init_params)\r\n self._build_shared(model)\r\n self._build_embeddings(model)\r\n\r\n forward_model = Seq2SeqModelHelper(init_params=init_params)\r\n self._build_shared(forward_model)\r\n self._build_embeddings(forward_model)\r\n\r\n if self.num_gpus == 0:\r\n loss_blobs = self.model_build_fun(model)\r\n model.AddGradientOperators(loss_blobs)\r\n self.norm_clipped_grad_update(\r\n model,\r\n scope='norm_clipped_grad_update'\r\n )\r\n self.forward_model_build_fun(forward_model)\r\n\r\n else:\r\n assert (self.batch_size % self.num_gpus) == 0\r\n\r\n data_parallel_model.Parallelize_GPU(\r\n forward_model,\r\n input_builder_fun=lambda m: None,\r\n forward_pass_builder_fun=self.forward_model_build_fun,\r\n param_update_builder_fun=None,\r\n devices=list(range(self.num_gpus)),\r\n )\r\n\r\n def clipped_grad_update_bound(model):\r\n self.norm_clipped_grad_update(\r\n model,\r\n scope='norm_clipped_grad_update',\r\n )\r\n\r\n data_parallel_model.Parallelize_GPU(\r\n model,\r\n input_builder_fun=lambda m: None,\r\n forward_pass_builder_fun=self.model_build_fun,\r\n param_update_builder_fun=clipped_grad_update_bound,\r\n devices=list(range(self.num_gpus)),\r\n )\r\n self.norm_clipped_sparse_grad_update(\r\n model,\r\n scope='norm_clipped_sparse_grad_update',\r\n )\r\n self.model = model\r\n self.forward_net = forward_model.net\r\n\r\n def _build_shared(self, model):\r\n optimizer_params = self.model_params['optimizer_params']\r\n with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):\r\n self.learning_rate = model.AddParam(\r\n name='learning_rate',\r\n init_value=float(optimizer_params['learning_rate']),\r\n trainable=False,\r\n )\r\n self.global_step = model.AddParam(\r\n name='global_step',\r\n init_value=0,\r\n trainable=False,\r\n )\r\n self.start_time = model.AddParam(\r\n name='start_time',\r\n init_value=time.time(),\r\n trainable=False,\r\n )\r\n\r\n def _build_embeddings(self, model):\r\n with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):\r\n sqrt3 = math.sqrt(3)\r\n self.encoder_embeddings = model.param_init_net.UniformFill(\r\n [],\r\n 'encoder_embeddings',\r\n shape=[\r\n self.source_vocab_size,\r\n self.model_params['encoder_embedding_size'],\r\n ],\r\n min=-sqrt3,\r\n max=sqrt3,\r\n )\r\n model.params.append(self.encoder_embeddings)\r\n self.decoder_embeddings = model.param_init_net.UniformFill(\r\n [],\r\n 'decoder_embeddings',\r\n shape=[\r\n self.target_vocab_size,\r\n self.model_params['decoder_embedding_size'],\r\n ],\r\n min=-sqrt3,\r\n max=sqrt3,\r\n )\r\n model.params.append(self.decoder_embeddings)\r\n\r\n def model_build_fun(self, model, forward_only=False, loss_scale=None):\r\n encoder_inputs = model.net.AddExternalInput(\r\n workspace.GetNameScope() + 'encoder_inputs',\r\n )\r\n encoder_lengths = model.net.AddExternalInput(\r\n workspace.GetNameScope() + 'encoder_lengths',\r\n )\r\n decoder_inputs = model.net.AddExternalInput(\r\n workspace.GetNameScope() + 'decoder_inputs',\r\n )\r\n decoder_lengths = model.net.AddExternalInput(\r\n workspace.GetNameScope() + 'decoder_lengths',\r\n )\r\n targets = model.net.AddExternalInput(\r\n workspace.GetNameScope() + 'targets',\r\n )\r\n target_weights = model.net.AddExternalInput(\r\n workspace.GetNameScope() + 'target_weights',\r\n )\r\n attention_type = self.model_params['attention']\r\n assert attention_type in ['none', 'regular', 'dot']\r\n\r\n (\r\n encoder_outputs,\r\n weighted_encoder_outputs,\r\n final_encoder_hidden_states,\r\n final_encoder_cell_states,\r\n encoder_units_per_layer,\r\n ) = seq2seq_util.build_embedding_encoder(\r\n model=model,\r\n encoder_params=self.encoder_params,\r\n num_decoder_layers=len(self.model_params['decoder_layer_configs']),\r\n inputs=encoder_inputs,\r\n input_lengths=encoder_lengths,\r\n vocab_size=self.source_vocab_size,\r\n embeddings=self.encoder_embeddings,\r\n embedding_size=self.model_params['encoder_embedding_size'],\r\n use_attention=(attention_type != 'none'),\r\n num_gpus=self.num_gpus,\r\n )\r\n\r\n (\r\n decoder_outputs,\r\n decoder_output_size,\r\n ) = seq2seq_util.build_embedding_decoder(\r\n model,\r\n decoder_layer_configs=self.model_params['decoder_layer_configs'],\r\n inputs=decoder_inputs,\r\n input_lengths=decoder_lengths,\r\n encoder_lengths=encoder_lengths,\r\n encoder_outputs=encoder_outputs,\r\n weighted_encoder_outputs=weighted_encoder_outputs,\r\n final_encoder_hidden_states=final_encoder_hidden_states,\r\n final_encoder_cell_states=final_encoder_cell_states,\r\n encoder_units_per_layer=encoder_units_per_layer,\r\n vocab_size=self.target_vocab_size,\r\n embeddings=self.decoder_embeddings,\r\n embedding_size=self.model_params['decoder_embedding_size'],\r\n attention_type=attention_type,\r\n forward_only=False,\r\n num_gpus=self.num_gpus,\r\n )\r\n\r\n output_logits = seq2seq_util.output_projection(\r\n model=model,\r\n decoder_outputs=decoder_outputs,\r\n decoder_output_size=decoder_output_size,\r\n target_vocab_size=self.target_vocab_size,\r\n decoder_softmax_size=self.model_params['decoder_softmax_size'],\r\n )\r\n targets, _ = model.net.Reshape(\r\n [targets],\r\n ['targets', 'targets_old_shape'],\r\n shape=[-1],\r\n )\r\n target_weights, _ = model.net.Reshape(\r\n [target_weights],\r\n ['target_weights', 'target_weights_old_shape'],\r\n shape=[-1],\r\n )\r\n _, loss_per_word = model.net.SoftmaxWithLoss(\r\n [output_logits, targets, target_weights],\r\n ['OutputProbs_INVALID', 'loss_per_word'],\r\n only_loss=True,\r\n )\r\n\r\n num_words = model.net.SumElements(\r\n [target_weights],\r\n 'num_words',\r\n )\r\n total_loss_scalar = model.net.Mul(\r\n [loss_per_word, num_words],\r\n 'total_loss_scalar',\r\n )\r\n total_loss_scalar_weighted = model.net.Scale(\r\n [total_loss_scalar],\r\n 'total_loss_scalar_weighted',\r\n scale=1.0 / self.batch_size,\r\n )\r\n return [total_loss_scalar_weighted]\r\n\r\n def forward_model_build_fun(self, model, loss_scale=None):\r\n return self.model_build_fun(\r\n model=model,\r\n forward_only=True,\r\n loss_scale=loss_scale\r\n )\r\n\r\n def _calc_norm_ratio(self, model, params, scope, ONE):\r\n with core.NameScope(scope):\r\n grad_squared_sums = []\r\n for i, param in enumerate(params):\r\n logger.info(param)\r\n grad = (\r\n model.param_to_grad[param]\r\n if not isinstance(\r\n model.param_to_grad[param],\r\n core.GradientSlice,\r\n ) else model.param_to_grad[param].values\r\n )\r\n grad_squared = model.net.Sqr(\r\n [grad],\r\n 'grad_{}_squared'.format(i),\r\n )\r\n grad_squared_sum = model.net.SumElements(\r\n grad_squared,\r\n 'grad_{}_squared_sum'.format(i),\r\n )\r\n grad_squared_sums.append(grad_squared_sum)\r\n\r\n grad_squared_full_sum = model.net.Sum(\r\n grad_squared_sums,\r\n 'grad_squared_full_sum',\r\n )\r\n global_norm = model.net.Pow(\r\n grad_squared_full_sum,\r\n 'global_norm',\r\n exponent=0.5,\r\n )\r\n clip_norm = model.param_init_net.ConstantFill(\r\n [],\r\n 'clip_norm',\r\n shape=[],\r\n value=float(self.model_params['max_gradient_norm']),\r\n )\r\n max_norm = model.net.Max(\r\n [global_norm, clip_norm],\r\n 'max_norm',\r\n )\r\n norm_ratio = model.net.Div(\r\n [clip_norm, max_norm],\r\n 'norm_ratio',\r\n )\r\n return norm_ratio\r\n\r\n def _apply_norm_ratio(\r\n self, norm_ratio, model, params, learning_rate, scope, ONE\r\n ):\r\n for param in params:\r\n param_grad = model.param_to_grad[param]\r\n nlr = model.net.Negative(\r\n [learning_rate],\r\n 'negative_learning_rate',\r\n )\r\n with core.NameScope(scope):\r\n update_coeff = model.net.Mul(\r\n [nlr, norm_ratio],\r\n 'update_coeff',\r\n broadcast=1,\r\n )\r\n if isinstance(param_grad, core.GradientSlice):\r\n param_grad_values = param_grad.values\r\n\r\n model.net.ScatterWeightedSum(\r\n [\r\n param,\r\n ONE,\r\n param_grad.indices,\r\n param_grad_values,\r\n update_coeff,\r\n ],\r\n param,\r\n )\r\n else:\r\n model.net.WeightedSum(\r\n [\r\n param,\r\n ONE,\r\n param_grad,\r\n update_coeff,\r\n ],\r\n param,\r\n )\r\n\r\n def norm_clipped_grad_update(self, model, scope):\r\n\r\n if self.num_gpus == 0:\r\n learning_rate = self.learning_rate\r\n else:\r\n learning_rate = model.CopyCPUToGPU(self.learning_rate, 'LR')\r\n\r\n params = []\r\n for param in model.GetParams(top_scope=True):\r\n if param in model.param_to_grad:\r\n if not isinstance(\r\n model.param_to_grad[param],\r\n core.GradientSlice,\r\n ):\r\n params.append(param)\r\n\r\n ONE = model.param_init_net.ConstantFill(\r\n [],\r\n 'ONE',\r\n shape=[1],\r\n value=1.0,\r\n )\r\n logger.info('Dense trainable variables: ')\r\n norm_ratio = self._calc_norm_ratio(model, params, scope, ONE)\r\n self._apply_norm_ratio(\r\n norm_ratio, model, params, learning_rate, scope, ONE\r\n )\r\n\r\n def norm_clipped_sparse_grad_update(self, model, scope):\r\n learning_rate = self.learning_rate\r\n\r\n params = []\r\n for param in model.GetParams(top_scope=True):\r\n if param in model.param_to_grad:\r\n if isinstance(\r\n model.param_to_grad[param],\r\n core.GradientSlice,\r\n ):\r\n params.append(param)\r\n\r\n ONE = model.param_init_net.ConstantFill(\r\n [],\r\n 'ONE',\r\n shape=[1],\r\n value=1.0,\r\n )\r\n logger.info('Sparse trainable variables: ')\r\n norm_ratio = self._calc_norm_ratio(model, params, scope, ONE)\r\n self._apply_norm_ratio(\r\n norm_ratio, model, params, learning_rate, scope, ONE\r\n )\r\n\r\n def total_loss_scalar(self):\r\n if self.num_gpus == 0:\r\n return workspace.FetchBlob('total_loss_scalar')\r\n else:\r\n total_loss = 0\r\n for i in range(self.num_gpus):\r\n name = 'gpu_{}/total_loss_scalar'.format(i)\r\n gpu_loss = workspace.FetchBlob(name)\r\n total_loss += gpu_loss\r\n return total_loss\r\n\r\n def _init_model(self):\r\n workspace.RunNetOnce(self.model.param_init_net)\r\n\r\n def create_net(net):\r\n workspace.CreateNet(\r\n net,\r\n input_blobs=[str(i) for i in net.external_inputs],\r\n )\r\n\r\n create_net(self.model.net)\r\n create_net(self.forward_net)\r\n\r\n def __init__(\r\n self,\r\n model_params,\r\n source_vocab_size,\r\n target_vocab_size,\r\n num_gpus=1,\r\n num_cpus=1,\r\n ):\r\n self.model_params = model_params\r\n self.encoder_type = 'rnn'\r\n self.encoder_params = model_params['encoder_type']\r\n self.source_vocab_size = source_vocab_size\r\n self.target_vocab_size = target_vocab_size\r\n self.num_gpus = num_gpus\r\n self.num_cpus = num_cpus\r\n self.batch_size = model_params['batch_size']\r\n\r\n workspace.GlobalInit([\r\n 'caffe2',\r\n # NOTE: modify log level for debugging purposes\r\n '--caffe2_log_level=0',\r\n # NOTE: modify log level for debugging purposes\r\n '--v=0',\r\n # Fail gracefully if one of the threads fails\r\n '--caffe2_handle_executor_threads_exceptions=1',\r\n '--caffe2_mkl_num_threads=' + str(self.num_cpus),\r\n ])\r\n\r\n def __enter__(self):\r\n return self\r\n\r\n def __exit__(self, exc_type, exc_value, traceback):\r\n workspace.ResetWorkspace()\r\n\r\n def initialize_from_scratch(self):\r\n logger.info('Initializing Seq2SeqModelCaffe2 from scratch: Start')\r\n self._build_model(init_params=True)\r\n self._init_model()\r\n logger.info('Initializing Seq2SeqModelCaffe2 from scratch: Finish')\r\n\r\n def get_current_step(self):\r\n return workspace.FetchBlob(self.global_step)[0]\r\n\r\n def inc_current_step(self):\r\n workspace.FeedBlob(\r\n self.global_step,\r\n np.array([self.get_current_step() + 1]),\r\n )\r\n\r\n def step(\r\n self,\r\n batch,\r\n forward_only\r\n ):\r\n if self.num_gpus < 1:\r\n batch_obj = prepare_batch(batch)\r\n for batch_obj_name, batch_obj_value in zip(\r\n Batch._fields,\r\n batch_obj,\r\n ):\r\n workspace.FeedBlob(batch_obj_name, batch_obj_value)\r\n else:\r\n for i in range(self.num_gpus):\r\n gpu_batch = batch[i::self.num_gpus]\r\n batch_obj = prepare_batch(gpu_batch)\r\n for batch_obj_name, batch_obj_value in zip(\r\n Batch._fields,\r\n batch_obj,\r\n ):\r\n name = 'gpu_{}/{}'.format(i, batch_obj_name)\r\n if batch_obj_name in ['encoder_inputs', 'decoder_inputs']:\r\n dev = core.DeviceOption(caffe2_pb2.CPU)\r\n else:\r\n dev = core.DeviceOption(workspace.GpuDeviceType, i)\r\n workspace.FeedBlob(name, batch_obj_value, device_option=dev)\r\n\r\n if forward_only:\r\n workspace.RunNet(self.forward_net)\r\n else:\r\n workspace.RunNet(self.model.net)\r\n self.inc_current_step()\r\n\r\n return self.total_loss_scalar()\r\n\r\n def save(self, checkpoint_path_prefix, current_step):\r\n checkpoint_path = '{0}-{1}'.format(\r\n checkpoint_path_prefix,\r\n current_step,\r\n )\r\n\r\n assert workspace.RunOperatorOnce(core.CreateOperator(\r\n 'Save',\r\n self.model.GetAllParams(),\r\n [],\r\n absolute_path=True,\r\n db=checkpoint_path,\r\n db_type='minidb',\r\n ))\r\n\r\n checkpoint_config_path = os.path.join(\r\n os.path.dirname(checkpoint_path_prefix),\r\n 'checkpoint',\r\n )\r\n with open(checkpoint_config_path, 'w') as checkpoint_config_file:\r\n checkpoint_config_file.write(\r\n 'model_checkpoint_path: \"' + checkpoint_path + '\"\\n'\r\n 'all_model_checkpoint_paths: \"' + checkpoint_path + '\"\\n'\r\n )\r\n logger.info('Saved checkpoint file to ' + checkpoint_path)\r\n\r\n return checkpoint_path\r\n\r\ndef gen_batches(source_corpus, target_corpus, source_vocab, target_vocab,\r\n batch_size, max_length):\r\n with open(source_corpus) as source, open(target_corpus) as target:\r\n parallel_sentences = []\r\n for source_sentence, target_sentence in zip(source, target):\r\n numerized_source_sentence = seq2seq_util.get_numberized_sentence(\r\n source_sentence,\r\n source_vocab,\r\n )\r\n numerized_target_sentence = seq2seq_util.get_numberized_sentence(\r\n target_sentence,\r\n target_vocab,\r\n )\r\n if (\r\n len(numerized_source_sentence) > 0 and\r\n len(numerized_target_sentence) > 0 and\r\n (\r\n max_length is None or (\r\n len(numerized_source_sentence) <= max_length and\r\n len(numerized_target_sentence) <= max_length\r\n )\r\n )\r\n ):\r\n parallel_sentences.append((\r\n numerized_source_sentence,\r\n numerized_target_sentence,\r\n ))\r\n parallel_sentences.sort(key=lambda s_t: (len(s_t[0]), len(s_t[1])))\r\n\r\n batches, batch = [], []\r\n for sentence_pair in parallel_sentences:\r\n batch.append(sentence_pair)\r\n if len(batch) >= batch_size:\r\n batches.append(batch)\r\n batch = []\r\n if len(batch) > 0:\r\n while len(batch) < batch_size:\r\n batch.append(batch[-1])\r\n assert len(batch) == batch_size\r\n batches.append(batch)\r\n random.shuffle(batches)\r\n return batches\r\n\r\n\r\ndef run_seq2seq_model(args, model_params=None):\r\n source_vocab = seq2seq_util.gen_vocab(\r\n args.source_corpus,\r\n args.unk_threshold,\r\n )\r\n target_vocab = seq2seq_util.gen_vocab(\r\n args.target_corpus,\r\n args.unk_threshold,\r\n )\r\n logger.info('Source vocab size {}'.format(len(source_vocab)))\r\n logger.info('Target vocab size {}'.format(len(target_vocab)))\r\n\r\n batches = gen_batches(args.source_corpus, args.target_corpus, source_vocab,\r\n target_vocab, model_params['batch_size'],\r\n args.max_length)\r\n logger.info('Number of training batches {}'.format(len(batches)))\r\n\r\n batches_eval = gen_batches(args.source_corpus_eval, args.target_corpus_eval,\r\n source_vocab, target_vocab,\r\n model_params['batch_size'], args.max_length)\r\n logger.info('Number of eval batches {}'.format(len(batches_eval)))\r\n\r\n with Seq2SeqModelCaffe2(\r\n model_params=model_params,\r\n source_vocab_size=len(source_vocab),\r\n target_vocab_size=len(target_vocab),\r\n num_gpus=args.num_gpus,\r\n num_cpus=20,\r\n ) as model_obj:\r\n model_obj.initialize_from_scratch()\r\n for i in range(args.epochs):\r\n logger.info('Epoch {}'.format(i))\r\n total_loss = 0\r\n for batch in batches:\r\n total_loss += model_obj.step(\r\n batch=batch,\r\n forward_only=False,\r\n )\r\n logger.info('\\ttraining loss {}'.format(total_loss))\r\n total_loss = 0\r\n for batch in batches_eval:\r\n total_loss += model_obj.step(\r\n batch=batch,\r\n forward_only=True,\r\n )\r\n logger.info('\\teval loss {}'.format(total_loss))\r\n if args.checkpoint is not None:\r\n model_obj.save(args.checkpoint, i)\r\n\r\n\r\ndef main():\r\n random.seed(31415)\r\n parser = argparse.ArgumentParser(\r\n description='Caffe2: Seq2Seq Training'\r\n )\r\n parser.add_argument('--source-corpus', type=str, default=None,\r\n help='Path to source corpus in a text file format. Each '\r\n 'line in the file should contain a single sentence',\r\n required=True)\r\n parser.add_argument('--target-corpus', type=str, default=None,\r\n help='Path to target corpus in a text file format',\r\n required=True)\r\n parser.add_argument('--max-length', type=int, default=None,\r\n help='Maximal lengths of train and eval sentences')\r\n parser.add_argument('--unk-threshold', type=int, default=50,\r\n help='Threshold frequency under which token becomes '\r\n 'labeled unknown token')\r\n\r\n parser.add_argument('--batch-size', type=int, default=32,\r\n help='Training batch size')\r\n parser.add_argument('--epochs', type=int, default=10,\r\n help='Number of iterations over training data')\r\n parser.add_argument('--learning-rate', type=float, default=0.5,\r\n help='Learning rate')\r\n parser.add_argument('--max-gradient-norm', type=float, default=1.0,\r\n help='Max global norm of gradients at the end of each '\r\n 'backward pass. We do clipping to match the number.')\r\n parser.add_argument('--num-gpus', type=int, default=0,\r\n help='Number of GPUs for data parallel model')\r\n\r\n parser.add_argument('--use-bidirectional-encoder', action='store_true',\r\n help='Set flag to use bidirectional recurrent network '\r\n 'for first layer of encoder')\r\n parser.add_argument('--use-attention', action='store_true',\r\n help='Set flag to use seq2seq with attention model')\r\n parser.add_argument('--source-corpus-eval', type=str, default=None,\r\n help='Path to source corpus for evaluation in a text '\r\n 'file format', required=True)\r\n parser.add_argument('--target-corpus-eval', type=str, default=None,\r\n help='Path to target corpus for evaluation in a text '\r\n 'file format', required=True)\r\n parser.add_argument('--encoder-cell-num-units', type=int, default=512,\r\n help='Number of cell units per encoder layer')\r\n parser.add_argument('--encoder-num-layers', type=int, default=2,\r\n help='Number encoder layers')\r\n parser.add_argument('--decoder-cell-num-units', type=int, default=512,\r\n help='Number of cell units in the decoder layer')\r\n parser.add_argument('--decoder-num-layers', type=int, default=2,\r\n help='Number decoder layers')\r\n parser.add_argument('--encoder-embedding-size', type=int, default=256,\r\n help='Size of embedding in the encoder layer')\r\n parser.add_argument('--decoder-embedding-size', type=int, default=512,\r\n help='Size of embedding in the decoder layer')\r\n parser.add_argument('--decoder-softmax-size', type=int, default=None,\r\n help='Size of softmax layer in the decoder')\r\n\r\n parser.add_argument('--checkpoint', type=str, default=None,\r\n help='Path to checkpoint')\r\n\r\n args = parser.parse_args()\r\n\r\n encoder_layer_configs = [\r\n dict(\r\n num_units=args.encoder_cell_num_units,\r\n ),\r\n ] * args.encoder_num_layers\r\n\r\n if args.use_bidirectional_encoder:\r\n assert args.encoder_cell_num_units % 2 == 0\r\n encoder_layer_configs[0]['num_units'] /= 2\r\n\r\n decoder_layer_configs = [\r\n dict(\r\n num_units=args.decoder_cell_num_units,\r\n ),\r\n ] * args.decoder_num_layers\r\n\r\n run_seq2seq_model(args, model_params=dict(\r\n attention=('regular' if args.use_attention else 'none'),\r\n decoder_layer_configs=decoder_layer_configs,\r\n encoder_type=dict(\r\n encoder_layer_configs=encoder_layer_configs,\r\n use_bidirectional_encoder=args.use_bidirectional_encoder,\r\n ),\r\n batch_size=args.batch_size,\r\n optimizer_params=dict(\r\n learning_rate=args.learning_rate,\r\n ),\r\n encoder_embedding_size=args.encoder_embedding_size,\r\n decoder_embedding_size=args.decoder_embedding_size,\r\n decoder_softmax_size=args.decoder_softmax_size,\r\n max_gradient_norm=args.max_gradient_norm,\r\n ))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n",
"# Copyright (c) 2016-present, Facebook, Inc.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n##############################################################################\r\n\r\n\r\n\r\nimport caffe2.python.hypothesis_test_util as hu\r\nimport hypothesis.strategies as st\r\nimport numpy as np\r\nfrom caffe2.python import core, workspace\r\nfrom caffe2.quantization.server import dnnlowp_pybind11\r\nfrom hypothesis import given, settings\r\n\r\n\r\nclass TestInt8GenQuantParamsMinMaxOperator(hu.HypothesisTestCase):\r\n @settings(max_examples=20, deadline=None)\r\n @given(\r\n n=st.integers(10, 10),\r\n m=st.integers(10, 10),\r\n preserve_sparsity=st.booleans(),\r\n rnd_seed=st.integers(1, 5),\r\n **hu.gcs_cpu_only\r\n )\r\n def test_int8_gen_quant_params_min_max_op(\r\n self, n, m, preserve_sparsity, rnd_seed, gc, dc\r\n ):\r\n X_min = 0 if preserve_sparsity else -77\r\n X_max = X_min + 255\r\n np.random.seed(rnd_seed)\r\n X = np.round(np.random.rand(n, m) * (X_max - X_min) + X_min).astype(\r\n np.float32\r\n )\r\n # Calculate X_qparam\r\n hist, bin_edges = np.histogram(X.flatten(), bins=2048)\r\n X_qparam = dnnlowp_pybind11.ChooseStaticQuantizationParams(\r\n np.min(X), np.max(X), hist, preserve_sparsity, 8, \"MIN_MAX_QUANTIZATION\"\r\n )\r\n\r\n # Build a net to generate X's qparam using the Int8GenQuantParamsMinMax op\r\n workspace.FeedBlob(\"X\", X, device_option=gc)\r\n workspace.FeedBlob(\"X_min\", np.array([np.min(X)]), device_option=gc)\r\n workspace.FeedBlob(\"X_max\", np.array([np.max(X)]), device_option=gc)\r\n dnnlowp_pybind11.CreateInt8QuantSchemeBlob(\r\n \"quant_scheme\", \"MIN_MAX_QUANTIZATION\", preserve_sparsity\r\n )\r\n assert workspace.HasBlob(\r\n \"quant_scheme\"\r\n ), \"Failed to create the quant_scheme blob in current workspace\"\r\n\r\n gen_quant_params_net = core.Net(\"gen_quant_params_min_max\")\r\n gen_quant_params_op = core.CreateOperator(\r\n \"Int8GenQuantParamsMinMax\",\r\n [\"X_min\", \"X_max\", \"quant_scheme\"],\r\n [\"quant_param\"],\r\n device_option=gc,\r\n )\r\n gen_quant_params_net.Proto().op.extend([gen_quant_params_op])\r\n assert workspace.RunNetOnce(\r\n gen_quant_params_net\r\n ), \"Failed to run the gen_quant_params net\"\r\n scale, zero_point = dnnlowp_pybind11.ObserveInt8QuantParamsBlob(\"quant_param\")\r\n\r\n shapes, types = workspace.InferShapesAndTypes(\r\n [gen_quant_params_net],\r\n blob_dimensions={\"X\": [n, m], \"X_min\": [1], \"X_max\": [1], \"quant_scheme\": [1]},\r\n blob_types={\"X\": core.DataType.FLOAT, \"X_min\": core.DataType.FLOAT, \"X_max\": core.DataType.FLOAT, \"quant_scheme\": core.DataType.STRING}\r\n )\r\n self.assertEqual(shapes[\"quant_param\"], [1])\r\n self.assertEqual(types[\"quant_param\"], core.DataType.FLOAT)\r\n\r\n np.testing.assert_equal(scale, X_qparam.scale)\r\n np.testing.assert_equal(zero_point, X_qparam.zero_point)\r\n",
"# This file is part of h5py, a Python interface to the HDF5 library.\r\n#\r\n# http://www.h5py.org\r\n#\r\n# Copyright 2008-2013 Andrew Collette and contributors\r\n#\r\n# License: Standard 3-clause BSD; see \"license.txt\" for full license terms\r\n# and contributor agreement.\r\n\r\nimport sys\r\n\r\nimport numpy as np\r\n\r\nfrom .common import ut, TestCase\r\nfrom h5py import File, Group, Dataset\r\nimport h5py\r\n\r\n\r\nclass BaseDataset(TestCase):\r\n\r\n \"\"\"\r\n data is a 3-dimensional dataset with dimensions [z, y, x]\r\n\r\n The z dimension is labeled. It does not have any attached scales.\r\n The y dimension is not labeled. It has one attached scale.\r\n The x dimension is labeled. It has two attached scales.\r\n\r\n data2 is a 3-dimensional dataset with no associated dimension scales.\r\n \"\"\"\r\n\r\n def setUp(self):\r\n self.f = File(self.mktemp(), 'w')\r\n self.f['data'] = np.ones((4, 3, 2), 'f')\r\n self.f['data2'] = np.ones((4, 3, 2), 'f')\r\n self.f['x1'] = np.ones((2), 'f')\r\n h5py.h5ds.set_scale(self.f['x1'].id)\r\n h5py.h5ds.attach_scale(self.f['data'].id, self.f['x1'].id, 2)\r\n self.f['x2'] = np.ones((2), 'f')\r\n h5py.h5ds.set_scale(self.f['x2'].id, b'x2 name')\r\n h5py.h5ds.attach_scale(self.f['data'].id, self.f['x2'].id, 2)\r\n self.f['y1'] = np.ones((3), 'f')\r\n h5py.h5ds.set_scale(self.f['y1'].id, b'y1 name')\r\n h5py.h5ds.attach_scale(self.f['data'].id, self.f['y1'].id, 1)\r\n self.f['z1'] = np.ones((4), 'f')\r\n\r\n h5py.h5ds.set_label(self.f['data'].id, 0, b'z')\r\n h5py.h5ds.set_label(self.f['data'].id, 2, b'x')\r\n\r\n def tearDown(self):\r\n if self.f:\r\n self.f.close()\r\n\r\n\r\nclass TestH5DSBindings(BaseDataset):\r\n\r\n \"\"\"\r\n Feature: Datasets can be created from existing data\r\n \"\"\"\r\n\r\n def test_create_dimensionscale(self):\r\n \"\"\" Create a dimension scale from existing dataset \"\"\"\r\n self.assertTrue(h5py.h5ds.is_scale(self.f['x1'].id))\r\n self.assertEqual(h5py.h5ds.get_scale_name(self.f['x1'].id), b'')\r\n self.assertEqual(self.f['x1'].attrs['CLASS'], b\"DIMENSION_SCALE\")\r\n self.assertEqual(h5py.h5ds.get_scale_name(self.f['x2'].id), b'x2 name')\r\n\r\n def test_attach_dimensionscale(self):\r\n self.assertTrue(\r\n h5py.h5ds.is_attached(self.f['data'].id, self.f['x1'].id, 2)\r\n )\r\n self.assertFalse(\r\n h5py.h5ds.is_attached(self.f['data'].id, self.f['x1'].id, 1))\r\n self.assertEqual(h5py.h5ds.get_num_scales(self.f['data'].id, 0), 0)\r\n self.assertEqual(h5py.h5ds.get_num_scales(self.f['data'].id, 1), 1)\r\n self.assertEqual(h5py.h5ds.get_num_scales(self.f['data'].id, 2), 2)\r\n\r\n def test_detach_dimensionscale(self):\r\n self.assertTrue(\r\n h5py.h5ds.is_attached(self.f['data'].id, self.f['x1'].id, 2)\r\n )\r\n h5py.h5ds.detach_scale(self.f['data'].id, self.f['x1'].id, 2)\r\n self.assertFalse(\r\n h5py.h5ds.is_attached(self.f['data'].id, self.f['x1'].id, 2)\r\n )\r\n self.assertEqual(h5py.h5ds.get_num_scales(self.f['data'].id, 2), 1)\r\n\r\n # TODO: update condition once the bug is fixed upstream\r\n @ut.skipUnless(\r\n h5py.version.hdf5_version_tuple > (2, 0, 0),\r\n \"Reading non-existent label segfaults\"\r\n )\r\n def test_label_dimensionscale(self):\r\n self.assertEqual(h5py.h5ds.get_label(self.f['data'].id, 0), b'z')\r\n self.assertEqual(h5py.h5ds.get_label(self.f['data'].id, 1), b'')\r\n self.assertEqual(h5py.h5ds.get_label(self.f['data'].id, 2), b'x')\r\n\r\n def test_iter_dimensionscales(self):\r\n def func(dsid):\r\n res = h5py.h5ds.get_scale_name(dsid)\r\n if res == b'x2 name':\r\n return dsid\r\n\r\n res = h5py.h5ds.iterate(self.f['data'].id, 2, func, 0)\r\n self.assertEqual(h5py.h5ds.get_scale_name(res), b'x2 name')\r\n\r\n\r\nclass TestDimensionManager(BaseDataset):\r\n\r\n def test_make_scale(self):\r\n # test recreating or renaming an existing scale:\r\n self.f['x1'].make_scale(b'foobar')\r\n self.assertEqual(self.f['data'].dims[2]['foobar'], self.f['x1'])\r\n # test creating entirely new scale:\r\n self.f['data2'].make_scale(b'foobaz')\r\n self.f['data'].dims[2].attach_scale(self.f['data2'])\r\n self.assertEqual(self.f['data'].dims[2]['foobaz'], self.f['data2'])\r\n\r\n def test_get_dimension(self):\r\n with self.assertRaises(IndexError):\r\n self.f['data'].dims[3]\r\n\r\n def test_len(self):\r\n self.assertEqual(len(self.f['data'].dims), 3)\r\n self.assertEqual(len(self.f['data2'].dims), 3)\r\n\r\n def test_iter(self):\r\n dims = self.f['data'].dims\r\n self.assertEqual(\r\n [d for d in dims],\r\n [dims[0], dims[1], dims[2]]\r\n )\r\n\r\n def test_repr(self):\r\n ds = self.f.create_dataset('x', (2,3))\r\n self.assertIsInstance(repr(ds.dims), str)\r\n self.f.close()\r\n self.assertIsInstance(repr(ds.dims), str)\r\n\r\n\r\nclass TestDimensionsHighLevel(BaseDataset):\r\n\r\n def test_len(self):\r\n self.assertEqual(len(self.f['data'].dims[0]), 0)\r\n self.assertEqual(len(self.f['data'].dims[1]), 1)\r\n self.assertEqual(len(self.f['data'].dims[2]), 2)\r\n self.assertEqual(len(self.f['data2'].dims[0]), 0)\r\n self.assertEqual(len(self.f['data2'].dims[1]), 0)\r\n self.assertEqual(len(self.f['data2'].dims[2]), 0)\r\n\r\n def test_get_label(self):\r\n self.assertEqual(self.f['data'].dims[2].label, 'x')\r\n self.assertEqual(self.f['data'].dims[1].label, '')\r\n self.assertEqual(self.f['data'].dims[0].label, 'z')\r\n self.assertEqual(self.f['data2'].dims[2].label, '')\r\n self.assertEqual(self.f['data2'].dims[1].label, '')\r\n self.assertEqual(self.f['data2'].dims[0].label, '')\r\n\r\n def test_set_label(self):\r\n self.f['data'].dims[0].label = 'foo'\r\n self.assertEqual(self.f['data'].dims[2].label, 'x')\r\n self.assertEqual(self.f['data'].dims[1].label, '')\r\n self.assertEqual(self.f['data'].dims[0].label, 'foo')\r\n\r\n def test_detach_scale(self):\r\n self.f['data'].dims[2].detach_scale(self.f['x1'])\r\n self.assertEqual(len(self.f['data'].dims[2]), 1)\r\n self.assertEqual(self.f['data'].dims[2][0], self.f['x2'])\r\n self.f['data'].dims[2].detach_scale(self.f['x2'])\r\n self.assertEqual(len(self.f['data'].dims[2]), 0)\r\n\r\n def test_attach_scale(self):\r\n self.f['x3'] = self.f['x2'][...]\r\n self.f['data'].dims[2].attach_scale(self.f['x3'])\r\n self.assertEqual(len(self.f['data'].dims[2]), 3)\r\n self.assertEqual(self.f['data'].dims[2][2], self.f['x3'])\r\n\r\n def test_get_dimension_scale(self):\r\n self.assertEqual(self.f['data'].dims[2][0], self.f['x1'])\r\n with self.assertRaises(RuntimeError):\r\n self.f['data2'].dims[2][0], self.f['x2']\r\n self.assertEqual(self.f['data'].dims[2][''], self.f['x1'])\r\n self.assertEqual(self.f['data'].dims[2]['x2 name'], self.f['x2'])\r\n\r\n def test_get_items(self):\r\n self.assertEqual(\r\n self.f['data'].dims[2].items(),\r\n [('', self.f['x1']), ('x2 name', self.f['x2'])]\r\n )\r\n\r\n def test_get_keys(self):\r\n self.assertEqual(self.f['data'].dims[2].keys(), ['', 'x2 name'])\r\n\r\n def test_get_values(self):\r\n self.assertEqual(\r\n self.f['data'].dims[2].values(),\r\n [self.f['x1'], self.f['x2']]\r\n )\r\n\r\n def test_iter(self):\r\n self.assertEqual([i for i in self.f['data'].dims[2]], ['', 'x2 name'])\r\n\r\n def test_repr(self):\r\n ds = self.f[\"data\"]\r\n self.assertEqual(repr(ds.dims[2])[1:16], '\"x\" dimension 2')\r\n self.f.close()\r\n self.assertIsInstance(repr(ds.dims), str)\r\n\r\n def test_attributes(self):\r\n self.f[\"data2\"].attrs[\"DIMENSION_LIST\"] = self.f[\"data\"].attrs[\r\n \"DIMENSION_LIST\"]\r\n self.assertEqual(len(self.f['data2'].dims[0]), 0)\r\n self.assertEqual(len(self.f['data2'].dims[1]), 1)\r\n self.assertEqual(len(self.f['data2'].dims[2]), 2)\r\n",
"\r\n\r\n\r\n\r\n\r\nimport unittest\r\nimport hypothesis.strategies as st\r\nfrom hypothesis import given, assume\r\nimport numpy as np\r\nimport time\r\nimport os\r\nfrom caffe2.proto import caffe2_pb2\r\nfrom caffe2.python import core, workspace, muji, dyndep\r\nimport caffe2.python.hypothesis_test_util as hu\r\n\r\nnp.random.seed(1)\r\n\r\ndyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/nccl:nccl_ops')\r\n\r\n\r\ndef gpu_device(i):\r\n device_option = caffe2_pb2.DeviceOption()\r\n device_option.device_type = workspace.GpuDeviceType\r\n device_option.device_id = i\r\n return device_option\r\n\r\n\r\ndef benchmark(ws, net, warmups=5, iters=100):\r\n for _ in range(warmups):\r\n ws.run(net)\r\n plan = core.Plan(\"plan\")\r\n plan.AddStep(core.ExecutionStep(\"test-step\", net, iters))\r\n before = time.time()\r\n ws.run(plan)\r\n after = time.time()\r\n print(\"Timing network, time taken per-iteration: {:.6f}ms\".format((\r\n after - before) / float(iters) * 1000.0))\r\n return after - before\r\n\r\n\r\[email protected](not workspace.has_cuda_support, \"NCCL only on CUDA GPU\")\r\nclass NCCLOpsTest(hu.HypothesisTestCase):\r\n @given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),\r\n m=st.integers(min_value=1, max_value=1000),\r\n in_place=st.booleans())\r\n def test_nccl_allreduce(self, n, m, in_place):\r\n xs = [np.random.randn(m).astype(np.float32) for i in range(n)]\r\n inputs = [str(\"x_{}\".format(i)) for i in range(n)]\r\n prefix = \"\" if in_place else \"o\"\r\n outputs = [str(\"{}x_{}\".format(prefix, i)) for i in range(n)]\r\n op = core.CreateOperator(\"NCCLAllreduce\", inputs, outputs)\r\n input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}\r\n\r\n def allreduce(*args):\r\n assert len(args) == n\r\n output = np.sum(args, axis=0)\r\n return [output for _ in range(n)]\r\n\r\n outputs = self.assertReferenceChecks(\r\n hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],\r\n allreduce, input_device_options)\r\n for output in outputs:\r\n np.testing.assert_array_equal(outputs[0], output)\r\n self.assertEqual(outputs[0].tobytes(), output.tobytes())\r\n\r\n @given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),\r\n m=st.integers(min_value=1, max_value=1000),\r\n root=st.integers(min_value=0,\r\n max_value=workspace.NumGpuDevices() - 1))\r\n def test_nccl_broadcast(self, n, m, root):\r\n assume(root < n)\r\n xs = [np.random.randn(m).astype(np.float32) for i in range(n)]\r\n inputs = [str(\"x_{}\".format(i)) for i in range(n)]\r\n op = core.CreateOperator(\"NCCLBroadcast\", inputs, inputs, root=root)\r\n input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}\r\n\r\n def broadcast(*args):\r\n assert len(args) == n\r\n return [args[root] for _ in range(n)]\r\n\r\n self.assertReferenceChecks(\r\n hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],\r\n broadcast, input_device_options)\r\n\r\n @given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),\r\n m=st.integers(min_value=1, max_value=1000),\r\n # NCCL Reduce seems to deadlock for non-zero roots.\r\n root=st.integers(min_value=0, max_value=0),\r\n in_place=st.booleans())\r\n def test_nccl_reduce(self, n, m, root, in_place):\r\n assume(in_place is False or root == 0)\r\n xs = [np.random.randn(m).astype(np.float32) for i in range(n)]\r\n inputs = [str(\"x_{}\".format(i)) for i in range(n)]\r\n op = core.CreateOperator(\r\n \"NCCLReduce\", inputs,\r\n inputs[root] if in_place else b\"o\", root=root)\r\n input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}\r\n\r\n def reduce(*args):\r\n assert len(args) == n\r\n return [np.sum(args, axis=0)]\r\n\r\n self.assertReferenceChecks(\r\n hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],\r\n reduce, input_device_options)\r\n\r\n @given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),\r\n m=st.integers(min_value=1, max_value=1000))\r\n def test_nccl_allgather(self, n, m):\r\n xs = [np.random.randn(m).astype(np.float32) for i in range(n)]\r\n inputs = [str(\"x_{}\".format(i)) for i in range(n)]\r\n outputs = [str(\"o_{}\".format(i)) for i in range(n)]\r\n op = core.CreateOperator(\"NCCLAllGather\", inputs, outputs)\r\n input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}\r\n\r\n def allgather(*args):\r\n assert len(args) == n\r\n return [np.stack(args, axis=0) for _ in range(n)]\r\n\r\n outputs = self.assertReferenceChecks(\r\n hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],\r\n allgather, input_device_options)\r\n for output in outputs:\r\n np.testing.assert_array_equal(outputs[0], output)\r\n self.assertEqual(outputs[0].tobytes(), output.tobytes())\r\n\r\n @given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),\r\n m=st.integers(min_value=1, max_value=1000))\r\n def test_nccl_reduce_scatter(self, n, m):\r\n xs = [np.random.randn(n, m).astype(np.float32) for i in range(n)]\r\n inputs = [str(\"x_{}\".format(i)) for i in range(n)]\r\n outputs = [str(\"o_{}\".format(i)) for i in range(n)]\r\n op = core.CreateOperator(\"NCCLReduceScatter\", inputs, outputs)\r\n input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}\r\n\r\n def reduce_scatter(*args):\r\n assert len(args) == n\r\n reduced = sum(args)\r\n assert len(reduced.shape) > 1\r\n ref = [reduced[i, :] for i in range(n)]\r\n return ref\r\n\r\n self.assertReferenceChecks(\r\n hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],\r\n reduce_scatter, input_device_options)\r\n\r\n @given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),\r\n m=st.integers(min_value=100000, max_value=100000),\r\n iters=st.integers(min_value=1, max_value=100),\r\n net_type=st.sampled_from([\"dag\", \"async_dag\", \"simple\"]))\r\n def _test_nccl_sync(self, n, m, iters, net_type):\r\n inputs = [str(\"x_{}\".format(i)) for i in range(n)]\r\n extra_inputs = [str(\"xe_{}\".format(i)) for i in range(n)]\r\n net = core.Net(\"asdf\")\r\n net.Proto().type = net_type\r\n net.Proto().num_workers = n\r\n for i in range(n):\r\n net.ConstantFill([], inputs[i], shape=[m], value=0.0,\r\n device_option=gpu_device(i))\r\n net.ConstantFill([], extra_inputs[i], shape=[m], value=1.0,\r\n device_option=gpu_device(i))\r\n for _ in range(iters):\r\n net.Sum([inputs[i], extra_inputs[i]], [inputs[i]],\r\n device_option=gpu_device(i))\r\n net.NCCLReduce(inputs, [inputs[0]], device_option=gpu_device(0))\r\n self.ws.run(net)\r\n np.testing.assert_array_equal(\r\n self.ws.blobs[inputs[0]].fetch(),\r\n np.full(shape=(m,), fill_value=iters * n, dtype=np.float32))\r\n\r\n @unittest.skipIf(not os.environ.get(\"CAFFE2_BENCHMARK\"), \"Benchmark\")\r\n def test_timings(self):\r\n for n in range(2, workspace.NumGpuDevices()):\r\n for in_place in [False, True]:\r\n xs = [np.random.randn(1e7).astype(np.float32)\r\n for i in range(n)]\r\n inputs = [str(\"x_{}\".format(i)) for i in range(n)]\r\n prefix = \"\" if in_place else \"o\"\r\n outputs = [str(\"{}x_{}\".format(prefix, i)) for i in range(n)]\r\n\r\n net = core.Net(\"test\")\r\n net.NCCLAllreduce(inputs, outputs)\r\n net.RunAllOnGPU()\r\n for i in range(n):\r\n self.ws.create_blob(inputs[i]).feed(xs[i], gpu_device(i))\r\n self.ws.run(net)\r\n net_time = benchmark(self.ws, net)\r\n vanilla = core.Net(\"vanilla\")\r\n muji.Allreduce(vanilla, inputs)\r\n vanilla_time = benchmark(self.ws, vanilla)\r\n print(\"Speedup for NCCL: {:.2f}\".format(\r\n vanilla_time / net_time))\r\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\r\n# All rights reserved.\r\n#\r\n# This source code is licensed under the BSD-style license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\r\nimport inspect\r\nimport logging\r\nimport os\r\nimport pickle\r\nimport socket\r\nimport threading\r\nimport time\r\nimport weakref\r\nfrom abc import ABC, abstractmethod\r\nfrom dataclasses import dataclass\r\nfrom datetime import datetime, timedelta\r\nfrom enum import Enum\r\nfrom typing import Any, Callable, Dict, List, Optional, Set, Tuple, cast\r\n\r\nfrom torch.distributed import PrefixStore, Store\r\nfrom torch.distributed.elastic.events import (\r\n NodeState,\r\n construct_and_record_rdzv_event,\r\n)\r\n\r\nfrom .api import (\r\n RendezvousClosedError,\r\n RendezvousError,\r\n RendezvousHandler,\r\n RendezvousParameters,\r\n RendezvousStateError,\r\n RendezvousTimeoutError,\r\n)\r\nfrom .utils import _delay, _PeriodicTimer\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\n\r\ndef get_method_name(depth=2):\r\n if len(inspect.stack()) > depth:\r\n return inspect.stack()[depth].function\r\n return \"no_method_name\"\r\n\r\n\r\nToken = Any\r\n\"\"\"Represents an opaque fencing token used by the rendezvous backend.\"\"\"\r\n\r\n\r\nclass RendezvousBackend(ABC):\r\n \"\"\"Represents a backend that holds the rendezvous state.\"\"\"\r\n\r\n @property\r\n @abstractmethod\r\n def name(self) -> str:\r\n \"\"\"Gets the name of the backend.\"\"\"\r\n\r\n @abstractmethod\r\n def get_state(self) -> Optional[Tuple[bytes, Token]]:\r\n \"\"\"Gets the rendezvous state.\r\n\r\n Returns:\r\n A tuple of the encoded rendezvous state and its fencing token or\r\n ``None`` if no state is found in the backend.\r\n\r\n Raises:\r\n RendezvousConnectionError:\r\n The connection to the backend has failed.\r\n RendezvousStateError:\r\n The rendezvous state is corrupt.\r\n \"\"\"\r\n\r\n @abstractmethod\r\n def set_state(\r\n self, state: bytes, token: Optional[Token] = None\r\n ) -> Optional[Tuple[bytes, Token, bool]]:\r\n \"\"\"Sets the rendezvous state.\r\n\r\n The new rendezvous state is set conditionally:\r\n\r\n - If the specified ``token`` matches the fencing token stored in the\r\n backend, the state will be updated. The new state will be returned\r\n to the caller along with its fencing token.\r\n - If the specified ``token`` does not match the fencing token stored\r\n in the backend, the state won't be updated; instead the existing\r\n state along with its fencing token will be returned to the caller.\r\n - If the specified ``token`` is ``None``, the new state will be set\r\n only if there is no existing state in the backend. Either the new\r\n state or the existing state along with its fencing token will be\r\n returned to the caller.\r\n\r\n Args:\r\n state:\r\n The encoded rendezvous state.\r\n token:\r\n An optional fencing token that was retrieved by a previous call\r\n to :py:meth:`get_state` or ``set_state()``.\r\n\r\n Returns:\r\n A tuple of the serialized rendezvous state, its fencing token, and\r\n a boolean value indicating whether our set attempt succeeded.\r\n\r\n Raises:\r\n RendezvousConnectionError:\r\n The connection to the backend has failed.\r\n RendezvousStateError:\r\n The rendezvous state is corrupt.\r\n \"\"\"\r\n\r\n\r\nclass RendezvousTimeout:\r\n \"\"\"Holds the timeout configuration of a rendezvous.\r\n\r\n Args:\r\n join:\r\n The time within which the rendezvous is expected to complete.\r\n last_call:\r\n An additional wait amount before completing the rendezvous once the\r\n rendezvous has the minimum number of required participants.\r\n close:\r\n The time within which the rendezvous is expected to close after a\r\n call to :py:meth:`RendezvousHandler.set_closed` or\r\n :py:meth:`RendezvousHandler.shutdown`.\r\n keep_alive:\r\n The time within which a keep-alive heartbeat is expected to\r\n complete.\r\n \"\"\"\r\n\r\n _ZERO = timedelta(0)\r\n\r\n _DEFAULT_TIMEOUTS = {\r\n \"join\": timedelta(seconds=600),\r\n \"last_call\": timedelta(seconds=30),\r\n \"close\": timedelta(seconds=30),\r\n \"heartbeat\": timedelta(seconds=5),\r\n }\r\n\r\n _join: timedelta\r\n _last_call: timedelta\r\n _close: timedelta\r\n _heartbeat: timedelta\r\n\r\n def __init__(\r\n self,\r\n join: Optional[timedelta] = None,\r\n last_call: Optional[timedelta] = None,\r\n close: Optional[timedelta] = None,\r\n heartbeat: Optional[timedelta] = None,\r\n ) -> None:\r\n self._set_timeouts(join=join, last_call=last_call, close=close, heartbeat=heartbeat)\r\n\r\n @property\r\n def join(self) -> timedelta:\r\n \"\"\"Gets the join timeout.\"\"\"\r\n return self._join\r\n\r\n @property\r\n def last_call(self) -> timedelta:\r\n \"\"\"Gets the last call timeout.\"\"\"\r\n return self._last_call\r\n\r\n @property\r\n def close(self) -> timedelta:\r\n \"\"\"Gets the close timeout.\"\"\"\r\n return self._close\r\n\r\n @property\r\n def heartbeat(self) -> timedelta:\r\n \"\"\"Gets the keep-alive heartbeat timeout.\"\"\"\r\n return self._heartbeat\r\n\r\n def _set_timeouts(self, **timeouts: Optional[timedelta]):\r\n for name, timeout in timeouts.items():\r\n if timeout is None:\r\n timeout = self._DEFAULT_TIMEOUTS[name]\r\n if timeout <= self._ZERO:\r\n raise ValueError(f\"The {name} timeout ({timeout}) must be positive.\")\r\n setattr(self, \"_\" + name, timeout)\r\n\r\n\r\n@dataclass(repr=False, eq=False, frozen=True)\r\nclass RendezvousSettings:\r\n \"\"\"Holds the settings of the rendezvous.\r\n\r\n Attributes:\r\n run_id:\r\n The run id of the rendezvous.\r\n min_nodes:\r\n The minimum number of nodes to admit to the rendezvous.\r\n max_nodes:\r\n The maximum number of nodes to admit to the rendezvous.\r\n timeout:\r\n The timeout configuration of the rendezvous.\r\n keep_alive_interval:\r\n The amount of time a node waits before sending a heartbeat to keep\r\n it alive in the rendezvous.\r\n keep_alive_max_attempt:\r\n The maximum number of failed heartbeat attempts after which a node\r\n is considered dead.\r\n \"\"\"\r\n\r\n run_id: str\r\n min_nodes: int\r\n max_nodes: int\r\n timeout: RendezvousTimeout\r\n keep_alive_interval: timedelta\r\n keep_alive_max_attempt: int\r\n\r\n\r\n@dataclass(eq=True, order=True, frozen=True)\r\nclass _NodeDesc:\r\n \"\"\"Describes a node in the rendezvous.\r\n\r\n Attributes:\r\n fqdn:\r\n The FQDN of the node.\r\n pid:\r\n The id of the process in which the rendezvous handler runs.\r\n local_id:\r\n A process-wide unique id.\r\n \"\"\"\r\n\r\n fqdn: str\r\n pid: int\r\n local_id: int\r\n\r\n def __repr__(self) -> str:\r\n return f\"{self.fqdn}_{self.pid}_{self.local_id}\"\r\n\r\n\r\nclass _NodeDescGenerator:\r\n \"\"\"Generates node descriptors.\r\n\r\n A node descriptor is a combination of an FQDN, a process id, and an auto-\r\n incremented integer that uniquely identifies a node in the rendezvous.\r\n \"\"\"\r\n\r\n _lock: threading.Lock\r\n _local_id: int\r\n\r\n def __init__(self) -> None:\r\n self._lock = threading.Lock()\r\n\r\n # An integer that is incremented with each call to generate().\r\n self._local_id = 0\r\n\r\n def generate(self) -> _NodeDesc:\r\n # This method can be called by multiple threads concurrently; therefore,\r\n # we must increment the integer atomically.\r\n with self._lock:\r\n local_id = self._local_id\r\n\r\n self._local_id += 1\r\n\r\n return _NodeDesc(socket.getfqdn(), os.getpid(), local_id)\r\n\r\n\r\nclass _RendezvousState:\r\n \"\"\"Holds the state of a rendezvous.\r\n\r\n Attributes:\r\n round:\r\n The current round of the rendezvous.\r\n complete:\r\n A boolean value indicating whether the current round of the\r\n rendezvous is complete.\r\n deadline:\r\n The time at which the current round of the rendezvous will be\r\n considered complete if it is still waiting for nodes to join.\r\n closed:\r\n A boolean value indicating whether the rendezvous is closed.\r\n participants:\r\n A dictionary of the participants and their corresponding ranks.\r\n wait_list:\r\n A set of nodes that are waiting to participate in the next round of\r\n the rendezvous.\r\n last_heartbeats:\r\n A dictionary containing each node's last heartbeat time.\r\n \"\"\"\r\n\r\n round: int\r\n complete: bool\r\n deadline: Optional[datetime]\r\n closed: bool\r\n participants: Dict[_NodeDesc, int]\r\n wait_list: Set[_NodeDesc]\r\n last_heartbeats: Dict[_NodeDesc, datetime]\r\n\r\n def __init__(self) -> None:\r\n self.round = 0\r\n self.complete = False\r\n self.deadline = None\r\n self.closed = False\r\n self.participants = {}\r\n self.wait_list = set()\r\n self.last_heartbeats = {}\r\n\r\n\r\ndef _remove_participant_epilogue(state: _RendezvousState, settings: RendezvousSettings) -> None:\r\n if state.complete:\r\n # If we do not have any participants left, move to the next round.\r\n if not state.participants:\r\n state.complete = False\r\n\r\n state.round += 1\r\n else:\r\n if len(state.participants) < settings.min_nodes:\r\n state.deadline = None\r\n\r\n\r\nclass _RendezvousStateHolder(ABC):\r\n \"\"\"Holds the shared rendezvous state synced with other nodes.\"\"\"\r\n\r\n @property\r\n @abstractmethod\r\n def state(self) -> _RendezvousState:\r\n \"\"\"Gets the local state.\"\"\"\r\n\r\n @abstractmethod\r\n def sync(self) -> Optional[bool]:\r\n \"\"\"Reads or writes the latest state.\r\n\r\n Returns:\r\n A boolean value indicating whether the local state, in case marked\r\n as dirty, was successfully synced with other nodes.\r\n \"\"\"\r\n\r\n @abstractmethod\r\n def mark_dirty(self) -> None:\r\n \"\"\"Marks the local state as dirty.\"\"\"\r\n\r\n\r\nclass _BackendRendezvousStateHolder(_RendezvousStateHolder):\r\n \"\"\"Holds the rendezvous state synced with other nodes via a backend.\r\n\r\n Args:\r\n backend:\r\n The rendezvous backend to use.\r\n settings:\r\n The rendezvous settings.\r\n cache_duration:\r\n The amount of time, in seconds, to cache the last rendezvous state\r\n before requesting it from the backend again.\r\n \"\"\"\r\n\r\n _backend: RendezvousBackend\r\n _state: _RendezvousState\r\n _settings: RendezvousSettings\r\n _cache_duration: int\r\n _token: Token\r\n _dirty: bool\r\n _last_sync_time: float\r\n _dead_nodes: List[_NodeDesc]\r\n\r\n def __init__(\r\n self,\r\n backend: RendezvousBackend,\r\n settings: RendezvousSettings,\r\n cache_duration: int = 1,\r\n ) -> None:\r\n self._backend = backend\r\n self._state = _RendezvousState()\r\n self._settings = settings\r\n self._cache_duration = cache_duration\r\n self._token = None\r\n self._dirty = False\r\n self._last_sync_time = -1\r\n self._dead_nodes = []\r\n\r\n def _record(self, message: str, node_state: NodeState = NodeState.RUNNING):\r\n construct_and_record_rdzv_event(\r\n name=f\"{self.__class__.__name__}.{get_method_name()}\",\r\n run_id=self._settings.run_id,\r\n message=message,\r\n node_state=node_state,\r\n )\r\n\r\n @property\r\n def state(self) -> _RendezvousState:\r\n \"\"\"See base class.\"\"\"\r\n return self._state\r\n\r\n def sync(self) -> Optional[bool]:\r\n \"\"\"See base class.\"\"\"\r\n state_bits: Optional[bytes] = None\r\n\r\n token = None\r\n\r\n has_set: Optional[bool]\r\n\r\n if self._dirty:\r\n has_set = False\r\n\r\n state_bits = pickle.dumps(self._state)\r\n\r\n set_response = self._backend.set_state(state_bits, self._token)\r\n if set_response is not None:\r\n state_bits, token, has_set = set_response\r\n else:\r\n has_set = None\r\n\r\n if self._cache_duration > 0:\r\n # Avoid overloading the backend if we are asked to retrieve the\r\n # state repeatedly. Try to serve the cached state.\r\n if self._last_sync_time >= max(time.monotonic() - self._cache_duration, 0):\r\n return None\r\n\r\n get_response = self._backend.get_state()\r\n if get_response is not None:\r\n state_bits, token = get_response\r\n\r\n if state_bits is not None:\r\n try:\r\n self._state = pickle.loads(state_bits)\r\n except pickle.PickleError as exc:\r\n raise RendezvousStateError(\r\n \"The rendezvous state is corrupt. See inner exception for details.\"\r\n ) from exc\r\n else:\r\n self._state = _RendezvousState()\r\n\r\n if has_set and self._dead_nodes and log.isEnabledFor(logging.DEBUG):\r\n node_list = \", \".join(f\"'{dead_node}'\" for dead_node in self._dead_nodes)\r\n\r\n msg = (\r\n f\"As part of the sync operation the node(s) {node_list} have been removed from the \"\r\n f\"rendezvous '{self._settings.run_id}' since they had no heartbeat.\"\r\n )\r\n self._record(message=msg)\r\n log.debug(msg)\r\n\r\n self._token = token\r\n\r\n self._dirty = False\r\n\r\n self._last_sync_time = time.monotonic()\r\n\r\n self._sanitize()\r\n\r\n return has_set\r\n\r\n def _sanitize(self) -> None:\r\n state = self._state\r\n\r\n expire_time = datetime.utcnow() - (\r\n self._settings.keep_alive_interval * self._settings.keep_alive_max_attempt\r\n )\r\n\r\n # Filter out the dead nodes.\r\n self._dead_nodes = [\r\n node\r\n for node, last_heartbeat in state.last_heartbeats.items()\r\n if last_heartbeat < expire_time\r\n ]\r\n\r\n participant_removed = False\r\n\r\n for dead_node in self._dead_nodes:\r\n del state.last_heartbeats[dead_node]\r\n\r\n try:\r\n del state.participants[dead_node]\r\n\r\n participant_removed = True\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n state.wait_list.remove(dead_node)\r\n except KeyError:\r\n pass\r\n\r\n if participant_removed:\r\n # Common epilogue shared with the _remove_from_participants()\r\n # function of _DistributedRendezvousOpExecutor.\r\n _remove_participant_epilogue(state, self._settings)\r\n\r\n def mark_dirty(self) -> None:\r\n \"\"\"See base class.\r\n\r\n If the local rendezvous state is dirty, the next sync call will try to\r\n write the changes back to the backend. However this attempt might fail\r\n if another node, which had the same state, also made changes and wrote\r\n them before us.\r\n \"\"\"\r\n self._dirty = True\r\n\r\n\r\nclass _Action(Enum):\r\n \"\"\"Specifies the possible actions based on the state of the rendezvous.\"\"\"\r\n\r\n KEEP_ALIVE = 1\r\n ADD_TO_PARTICIPANTS = 2\r\n ADD_TO_WAIT_LIST = 3\r\n REMOVE_FROM_PARTICIPANTS = 4\r\n REMOVE_FROM_WAIT_LIST = 5\r\n MARK_RENDEZVOUS_COMPLETE = 6\r\n MARK_RENDEZVOUS_CLOSED = 7\r\n SYNC = 8\r\n ERROR_CLOSED = 9\r\n ERROR_TIMEOUT = 10\r\n FINISH = 11\r\n\r\n\r\nclass _RendezvousContext:\r\n \"\"\"Holds the context of the rendezvous.\r\n\r\n Attributes:\r\n node:\r\n The node descriptor associated with the current rendezvous handler\r\n instance.\r\n state:\r\n The current state of the rendezvous.\r\n settings:\r\n The rendezvous settings.\r\n \"\"\"\r\n\r\n node: _NodeDesc\r\n state: _RendezvousState\r\n settings: RendezvousSettings\r\n\r\n def __init__(\r\n self, node: _NodeDesc, state: _RendezvousState, settings: RendezvousSettings\r\n ) -> None:\r\n self.node = node\r\n self.state = state\r\n self.settings = settings\r\n\r\n\r\nclass _RendezvousOpExecutor(ABC):\r\n \"\"\"Executes rendezvous operations.\"\"\"\r\n\r\n @abstractmethod\r\n def run(\r\n self,\r\n state_handler: Callable[[_RendezvousContext, float], _Action],\r\n deadline: float,\r\n ) -> None:\r\n \"\"\"Executes a rendezvous operation.\r\n\r\n An operation is run inside a state machine and is expected to transition\r\n the rendezvous from one state to another.\r\n\r\n Args:\r\n state_handler:\r\n A callable that is expected to return the next state transition\r\n action based on the current state of the rendezvous.\r\n deadline:\r\n The time, in seconds, at which the operation will be considered\r\n timed-out.\r\n \"\"\"\r\n\r\n\r\nclass _DistributedRendezvousOpExecutor(_RendezvousOpExecutor):\r\n \"\"\"Executes rendezvous operations using a shared state.\r\n\r\n Args:\r\n node:\r\n The node descriptor associated with the current rendezvous handler\r\n instance.\r\n state_holder:\r\n The ``RendezvousStateHolder`` to use to sync the rendezvous state\r\n with other nodes.\r\n settings:\r\n The rendezvous settings.\r\n \"\"\"\r\n\r\n _node: _NodeDesc\r\n _state: _RendezvousState\r\n _state_holder: _RendezvousStateHolder\r\n _settings: RendezvousSettings\r\n\r\n def __init__(\r\n self,\r\n node: _NodeDesc,\r\n state_holder: _RendezvousStateHolder,\r\n settings: RendezvousSettings,\r\n ) -> None:\r\n self._node = node\r\n self._state_holder = state_holder\r\n self._settings = settings\r\n\r\n def _record(self, message: str, node_state: NodeState = NodeState.RUNNING) -> None:\r\n construct_and_record_rdzv_event(\r\n name=f\"{self.__class__.__name__}.{get_method_name()}\",\r\n run_id=self._settings.run_id,\r\n message=message,\r\n node_state=node_state,\r\n hostname=self._node.fqdn,\r\n pid=self._node.pid,\r\n local_id=self._node.local_id,\r\n )\r\n\r\n def run(\r\n self,\r\n state_handler: Callable[[_RendezvousContext, float], _Action],\r\n deadline: float,\r\n ) -> None:\r\n \"\"\"See base class.\"\"\"\r\n action = None\r\n\r\n while action != _Action.FINISH:\r\n # Reads or writes the latest rendezvous state shared by all nodes in\r\n # the rendezvous. Note that our local changes might get overridden\r\n # by another node if that node synced its changes before us.\r\n has_set = self._state_holder.sync()\r\n if has_set is not None:\r\n if has_set:\r\n msg = (\r\n f\"The node '{self._node}' has successfully synced its local changes with \"\r\n f\"other nodes in the rendezvous '{self._settings.run_id}'.\"\r\n )\r\n else:\r\n msg = (\r\n f\"The node '{self._node}' has a stale state and failed to sync its local \"\r\n f\"changes with other nodes in the rendezvous '{self._settings.run_id}'.\"\r\n )\r\n\r\n self._record(message=msg)\r\n log.debug(msg)\r\n\r\n self._state = self._state_holder.state\r\n\r\n ctx = _RendezvousContext(self._node, self._state, self._settings)\r\n\r\n # Determine the next action to take based on the current state of\r\n # the rendezvous.\r\n action = state_handler(ctx, deadline)\r\n\r\n if action == _Action.FINISH:\r\n continue\r\n\r\n if action == _Action.ERROR_CLOSED:\r\n raise RendezvousClosedError()\r\n\r\n if action == _Action.ERROR_TIMEOUT:\r\n raise RendezvousTimeoutError()\r\n\r\n if action == _Action.SYNC:\r\n # Delay the execution by one second to avoid overloading the\r\n # backend if we are asked to poll for state changes.\r\n _delay(seconds=1)\r\n else:\r\n if action == _Action.KEEP_ALIVE:\r\n self._keep_alive()\r\n elif action == _Action.ADD_TO_PARTICIPANTS:\r\n self._add_to_participants()\r\n elif action == _Action.ADD_TO_WAIT_LIST:\r\n self._add_to_wait_list()\r\n elif action == _Action.REMOVE_FROM_PARTICIPANTS:\r\n self._remove_from_participants()\r\n elif action == _Action.REMOVE_FROM_WAIT_LIST:\r\n self._remove_from_wait_list()\r\n elif action == _Action.MARK_RENDEZVOUS_COMPLETE:\r\n self._mark_rendezvous_complete()\r\n elif action == _Action.MARK_RENDEZVOUS_CLOSED:\r\n self._mark_rendezvous_closed()\r\n\r\n # Attempt to sync our changes back to other nodes.\r\n self._state_holder.mark_dirty()\r\n\r\n def _keep_alive(self) -> None:\r\n msg = (\r\n f\"The node '{self._node}' updated its keep-alive heartbeat time for the rendezvous \"\r\n f\"'{self._settings.run_id}'. Pending sync.\"\r\n )\r\n self._record(message=msg)\r\n log.debug(msg)\r\n\r\n self._state.last_heartbeats[self._node] = datetime.utcnow()\r\n\r\n def _add_to_participants(self) -> None:\r\n msg = (\r\n f\"The node '{self._node}' added itself to the participants of round \"\r\n f\"{self._state.round} of the rendezvous '{self._settings.run_id}'. Pending sync.\"\r\n )\r\n self._record(message=msg)\r\n log.debug(msg)\r\n\r\n state = self._state\r\n\r\n try:\r\n state.wait_list.remove(self._node)\r\n except KeyError:\r\n pass\r\n\r\n # The ranks of the participants will be set once the rendezvous is\r\n # complete.\r\n state.participants[self._node] = 0\r\n\r\n self._keep_alive()\r\n\r\n if len(state.participants) == self._settings.min_nodes:\r\n state.deadline = datetime.utcnow() + self._settings.timeout.last_call\r\n\r\n if len(state.participants) == self._settings.max_nodes:\r\n self._mark_rendezvous_complete()\r\n\r\n def _add_to_wait_list(self) -> None:\r\n msg = (\r\n f\"The node '{self._node}' added itself to the wait list of round \"\r\n f\"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync.\"\r\n )\r\n self._record(message=msg)\r\n log.debug(msg)\r\n\r\n self._state.wait_list.add(self._node)\r\n\r\n self._keep_alive()\r\n\r\n def _remove_from_participants(self) -> None:\r\n msg = (\r\n f\"The node '{self._node}' removed itself from the participants of round \"\r\n f\"{self._state.round} of the rendezvous '{self._settings.run_id}'. Pending sync.\"\r\n )\r\n self._record(message=msg)\r\n log.debug(msg)\r\n\r\n state = self._state\r\n\r\n del state.participants[self._node]\r\n\r\n del state.last_heartbeats[self._node]\r\n\r\n # Common epilogue shared with the sanitizer() function of\r\n # _BackendRendezvousStateHolder.\r\n _remove_participant_epilogue(state, self._settings)\r\n\r\n def _remove_from_wait_list(self) -> None:\r\n msg = (\r\n f\"The node '{self._node}' removed itself from the wait list of round \"\r\n f\"{self._state.round + 1} of the rendezvous '{self._settings.run_id}'. Pending sync.\"\r\n )\r\n self._record(message=msg)\r\n log.debug(msg)\r\n\r\n self._state.wait_list.remove(self._node)\r\n\r\n del self._state.last_heartbeats[self._node]\r\n\r\n def _mark_rendezvous_complete(self) -> None:\r\n msg = (\r\n f\"The node '{self._node}' marked round {self._state.round} of the rendezvous \"\r\n f\"'{self._settings.run_id}' as complete. Pending sync.\"\r\n )\r\n self._record(message=msg, node_state=NodeState.SUCCEEDED)\r\n log.debug(msg)\r\n\r\n state = self._state\r\n\r\n state.complete = True\r\n state.deadline = None\r\n\r\n # Assign the ranks.\r\n for rank, node in enumerate(sorted(state.participants)):\r\n state.participants[node] = rank\r\n\r\n def _mark_rendezvous_closed(self) -> None:\r\n msg = (\r\n f\"The node '{self._node}' marked the rendezvous '{self._settings.run_id}' as closed. \"\r\n \"Pending sync.\"\r\n )\r\n self._record(message=msg, node_state=NodeState.SUCCEEDED)\r\n log.debug(msg)\r\n\r\n self._state.closed = True\r\n\r\n\r\ndef _should_keep_alive(ctx: _RendezvousContext) -> bool:\r\n \"\"\"Determines whether a keep-alive heartbeat should be sent.\"\"\"\r\n try:\r\n last_heartbeat = ctx.state.last_heartbeats[ctx.node]\r\n except KeyError:\r\n return False\r\n\r\n return last_heartbeat <= datetime.utcnow() - ctx.settings.keep_alive_interval\r\n\r\n\r\nclass _RendezvousExitOp:\r\n \"\"\"Represents a rendezvous exit operation.\"\"\"\r\n\r\n def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:\r\n if ctx.node in ctx.state.participants:\r\n if time.monotonic() > deadline:\r\n return _Action.ERROR_TIMEOUT\r\n return _Action.REMOVE_FROM_PARTICIPANTS\r\n return _Action.FINISH\r\n\r\n\r\nclass _RendezvousJoinOp:\r\n \"\"\"Represents a rendezvous join operation.\"\"\"\r\n\r\n def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:\r\n state = ctx.state\r\n\r\n # A closed rendezvous means that it no longer accepts new nodes.\r\n if state.closed:\r\n return _Action.ERROR_CLOSED\r\n\r\n is_participant = ctx.node in state.participants\r\n\r\n # If we are part of the rendezvous and it is already complete there is\r\n # no further action to take.\r\n if state.complete and is_participant:\r\n return _Action.FINISH\r\n\r\n now = time.monotonic()\r\n if now > deadline:\r\n rollback_period = 5 # 5 seconds\r\n\r\n # If we still have time to rollback (a short period on top of the\r\n # operation deadline), try to remove ourself from the rendezvous.\r\n # It is okay if we can't though as our keep-alive will eventually\r\n # expire.\r\n if now <= deadline + rollback_period:\r\n # If we are part of the rendezvous, it means we couldn't find\r\n # enough participants to complete it on time.\r\n if is_participant:\r\n return _Action.REMOVE_FROM_PARTICIPANTS\r\n # If we are in the wait list, it means we couldn't wait till the\r\n # next round of the rendezvous.\r\n if ctx.node in state.wait_list:\r\n return _Action.REMOVE_FROM_WAIT_LIST\r\n return _Action.ERROR_TIMEOUT\r\n\r\n if state.complete:\r\n # If we are here, it means we are not part of the rendezvous. In\r\n # case the rendezvous has capacity for additional participants add\r\n # ourself to the wait list for the next round.\r\n if len(state.participants) < ctx.settings.max_nodes:\r\n if ctx.node not in state.wait_list:\r\n return _Action.ADD_TO_WAIT_LIST\r\n elif is_participant:\r\n # If the rendezvous has enough number of participants including us,\r\n # check whether we have passed the rendezvous deadline. If yes,\r\n # complete it.\r\n if len(state.participants) >= ctx.settings.min_nodes:\r\n if cast(datetime, state.deadline) < datetime.utcnow():\r\n return _Action.MARK_RENDEZVOUS_COMPLETE\r\n else:\r\n # The rendezvous is not complete yet and we are not part of it. Try\r\n # to join.\r\n return _Action.ADD_TO_PARTICIPANTS\r\n\r\n if _should_keep_alive(ctx):\r\n return _Action.KEEP_ALIVE\r\n\r\n # At this point either the rendezvous is not complete, but we are part\r\n # of it, which means we have to wait for other participants to join; or\r\n # the rendezvous is complete, but we are not part of it, which means we\r\n # have to wait for the next round.\r\n return _Action.SYNC\r\n\r\n\r\nclass _RendezvousCloseOp:\r\n \"\"\"Represents a rendezvous close operation.\"\"\"\r\n\r\n def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:\r\n if ctx.state.closed:\r\n return _Action.FINISH\r\n if time.monotonic() > deadline:\r\n return _Action.ERROR_TIMEOUT\r\n return _Action.MARK_RENDEZVOUS_CLOSED\r\n\r\n\r\nclass _RendezvousKeepAliveOp:\r\n \"\"\"Represents a rendezvous keep-alive update operation.\"\"\"\r\n\r\n def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:\r\n if _should_keep_alive(ctx):\r\n if time.monotonic() > deadline:\r\n return _Action.ERROR_TIMEOUT\r\n return _Action.KEEP_ALIVE\r\n return _Action.FINISH\r\n\r\n\r\nclass DynamicRendezvousHandler(RendezvousHandler):\r\n \"\"\"Represents a handler that sets up a rendezvous among a set of nodes.\"\"\"\r\n\r\n # Static\r\n _node_desc_generator = _NodeDescGenerator()\r\n\r\n _this_node: _NodeDesc\r\n _settings: RendezvousSettings\r\n _backend_name: str\r\n _store: Store\r\n _state_holder: _RendezvousStateHolder\r\n _op_executor: _RendezvousOpExecutor\r\n _heartbeat_lock: threading.Lock\r\n _keep_alive_timer: Optional[_PeriodicTimer]\r\n\r\n @classmethod\r\n def from_backend(\r\n cls,\r\n run_id: str,\r\n store: Store,\r\n backend: RendezvousBackend,\r\n min_nodes: int,\r\n max_nodes: int,\r\n timeout: Optional[RendezvousTimeout] = None,\r\n ):\r\n \"\"\"Creates a new :py:class:`DynamicRendezvousHandler`.\r\n\r\n Args:\r\n run_id:\r\n The run id of the rendezvous.\r\n store:\r\n The C10d store to return as part of the rendezvous.\r\n backend:\r\n The backend to use to hold the rendezvous state.\r\n min_nodes:\r\n The minimum number of nodes to admit to the rendezvous.\r\n max_nodes:\r\n The maximum number of nodes to admit to the rendezvous.\r\n timeout:\r\n The timeout configuration of the rendezvous.\r\n \"\"\"\r\n # We associate each handler instance with a unique node descriptor.\r\n node = cls._node_desc_generator.generate()\r\n\r\n settings = RendezvousSettings(\r\n run_id,\r\n min_nodes,\r\n max_nodes,\r\n timeout or RendezvousTimeout(),\r\n keep_alive_interval=timedelta(seconds=5),\r\n keep_alive_max_attempt=3,\r\n )\r\n\r\n state_holder = _BackendRendezvousStateHolder(backend, settings)\r\n\r\n return cls(node, settings, backend.name, store, state_holder)\r\n\r\n def __init__(\r\n self,\r\n node: _NodeDesc,\r\n settings: RendezvousSettings,\r\n backend_name: str,\r\n store: Store,\r\n state_holder: _RendezvousStateHolder,\r\n ) -> None:\r\n if not settings.run_id:\r\n raise ValueError(\"The run id must be a non-empty string.\")\r\n\r\n if settings.min_nodes < 1:\r\n raise ValueError(\r\n f\"The minimum number of nodes ({settings.min_nodes}) must be greater than zero.\"\r\n )\r\n\r\n if settings.max_nodes < settings.min_nodes:\r\n raise ValueError(\r\n f\"The maximum number of nodes ({settings.max_nodes}) must be greater than or equal \"\r\n f\"to the minimum number of nodes ({settings.min_nodes}).\"\r\n )\r\n\r\n self._this_node = node\r\n\r\n self._settings = settings\r\n\r\n self._backend_name = backend_name\r\n\r\n self._store = store\r\n\r\n self._state_holder = state_holder\r\n\r\n self._op_executor = _DistributedRendezvousOpExecutor(\r\n self._this_node, self._state_holder, self._settings\r\n )\r\n\r\n self._heartbeat_lock = threading.Lock()\r\n\r\n self._keep_alive_timer = None\r\n\r\n def _record(\r\n self,\r\n message: str,\r\n node_state: NodeState = NodeState.RUNNING,\r\n rank: Optional[int] = None,\r\n ) -> None:\r\n construct_and_record_rdzv_event(\r\n name=f\"{self.__class__.__name__}.{get_method_name()}\",\r\n run_id=self._settings.run_id,\r\n message=message,\r\n node_state=node_state,\r\n hostname=self._this_node.fqdn,\r\n pid=self._this_node.pid,\r\n local_id=self._this_node.local_id,\r\n rank=rank,\r\n )\r\n\r\n @property\r\n def settings(self) -> RendezvousSettings:\r\n \"\"\"Gets the settings of the rendezvous.\"\"\"\r\n return self._settings\r\n\r\n def get_backend(self) -> str:\r\n \"\"\"See base class.\"\"\"\r\n return self._backend_name\r\n\r\n def next_rendezvous(self) -> Tuple[Store, int, int]:\r\n \"\"\"See base class.\"\"\"\r\n msg = (\r\n f\"The node '{self._this_node}' attempts to join the next round of the rendezvous \"\r\n f\"'{self._settings.run_id}'.\"\r\n )\r\n self._record(message=msg)\r\n log.info(msg)\r\n\r\n try:\r\n self._stop_heartbeats()\r\n\r\n # Delay the execution for a small random amount of time if this is our\r\n # first run. This will slightly skew the rendezvous attempts across the\r\n # nodes and reduce the load on the backend.\r\n if self._state_holder.state.round == 0:\r\n _delay(seconds=(0, 0.3))\r\n\r\n exit_op = _RendezvousExitOp()\r\n join_op = _RendezvousJoinOp()\r\n\r\n deadline = self._get_deadline(self._settings.timeout.join)\r\n\r\n self._op_executor.run(exit_op, deadline)\r\n self._op_executor.run(join_op, deadline)\r\n\r\n self._start_heartbeats()\r\n\r\n rank, world_size = self._get_world()\r\n store = self._get_store()\r\n\r\n except Exception as e:\r\n self._record(\r\n message=f\"{type(e).__name__}: {str(e)}\",\r\n node_state=NodeState.FAILED,\r\n )\r\n raise\r\n\r\n msg = (\r\n f\"The node '{self._this_node}' has joined round {self._state_holder.state.round} of \"\r\n f\"the rendezvous '{self._settings.run_id}' as rank {rank} in a world of size \"\r\n f\"{world_size}.\"\r\n )\r\n self._record(message=msg, rank=rank)\r\n log.info(msg)\r\n\r\n return store, rank, world_size\r\n\r\n def is_closed(self) -> bool:\r\n \"\"\"See base class.\"\"\"\r\n try:\r\n with self._heartbeat_lock:\r\n self._state_holder.sync()\r\n\r\n return self._state_holder.state.closed\r\n\r\n except Exception as e:\r\n self._record(\r\n message=f\"{type(e).__name__}: {str(e)}\",\r\n node_state=NodeState.FAILED,\r\n )\r\n raise\r\n\r\n def set_closed(self) -> None:\r\n \"\"\"See base class.\"\"\"\r\n try:\r\n with self._heartbeat_lock:\r\n self._close()\r\n except Exception as e:\r\n self._record(\r\n message=f\"{type(e).__name__}: {str(e)}\",\r\n node_state=NodeState.FAILED,\r\n )\r\n raise\r\n\r\n def num_nodes_waiting(self) -> int:\r\n \"\"\"See base class.\"\"\"\r\n try:\r\n with self._heartbeat_lock:\r\n self._state_holder.sync()\r\n\r\n return len(self._state_holder.state.wait_list)\r\n\r\n except Exception as e:\r\n self._record(\r\n message=f\"{type(e).__name__}: {str(e)}\",\r\n node_state=NodeState.FAILED,\r\n )\r\n raise\r\n\r\n def get_run_id(self) -> str:\r\n \"\"\"See base class.\"\"\"\r\n return self._settings.run_id\r\n\r\n def shutdown(self) -> bool:\r\n \"\"\"See base class.\"\"\"\r\n self._stop_heartbeats()\r\n\r\n try:\r\n self._close()\r\n\r\n return True\r\n except RendezvousError as ex:\r\n msg = (\r\n f\"The node '{self._this_node}' has failed to shutdown the rendezvous \"\r\n f\"'{self._settings.run_id}' due to an error of type {type(ex).__name__}.\"\r\n )\r\n self._record(message=msg, node_state=NodeState.FAILED)\r\n log.warning(msg)\r\n\r\n return False\r\n except Exception as e:\r\n self._record(\r\n message=f\"{type(e).__name__}: {str(e)}\",\r\n node_state=NodeState.FAILED,\r\n )\r\n raise\r\n\r\n def _close(self) -> None:\r\n op = _RendezvousCloseOp()\r\n\r\n deadline = self._get_deadline(self._settings.timeout.close)\r\n\r\n self._op_executor.run(op, deadline)\r\n\r\n msg = f\"The node '{self._this_node}' has closed the rendezvous '{self._settings.run_id}'.\"\r\n self._record(message=msg, node_state=NodeState.SUCCEEDED)\r\n log.info(msg)\r\n\r\n @staticmethod\r\n def _keep_alive_weak(weak_self) -> None:\r\n self = weak_self()\r\n if self is not None:\r\n self._keep_alive()\r\n\r\n def _keep_alive(self) -> None:\r\n self._heartbeat_lock.acquire()\r\n\r\n op = _RendezvousKeepAliveOp()\r\n\r\n deadline = self._get_deadline(self._settings.timeout.heartbeat)\r\n\r\n try:\r\n self._op_executor.run(op, deadline)\r\n\r\n msg = (\r\n f\"The node '{self._this_node}' has sent a keep-alive heartbeat to the rendezvous \"\r\n f\"'{self._settings.run_id}'.\"\r\n )\r\n self._record(message=msg)\r\n log.debug(msg)\r\n except RendezvousError as ex:\r\n msg = (\r\n f\"The node '{self._this_node}' has failed to send a keep-alive heartbeat to the \"\r\n f\"rendezvous '{self._settings.run_id}' due to an error of type {type(ex).__name__}.\"\r\n )\r\n self._record(message=msg, node_state=NodeState.FAILED)\r\n log.warning(msg)\r\n finally:\r\n self._heartbeat_lock.release()\r\n\r\n def _start_heartbeats(self) -> None:\r\n self._keep_alive_timer = _PeriodicTimer(\r\n self._settings.keep_alive_interval, self._keep_alive_weak, weakref.ref(self)\r\n )\r\n\r\n self._keep_alive_timer.set_name(f\"RendezvousKeepAliveTimer_{self._this_node.local_id}\")\r\n\r\n self._keep_alive_timer.start()\r\n\r\n def _stop_heartbeats(self) -> None:\r\n if self._keep_alive_timer is None:\r\n return\r\n\r\n self._keep_alive_timer.cancel()\r\n\r\n def _get_world(self) -> Tuple[int, int]:\r\n state = self._state_holder.state\r\n\r\n return state.participants[self._this_node], len(state.participants)\r\n\r\n def _get_store(self) -> Store:\r\n key_prefix = f\"torch.rendezvous.{self._settings.run_id}.{self._state_holder.state.round}\"\r\n\r\n return PrefixStore(key_prefix, self._store)\r\n\r\n def _get_deadline(self, timeout: timedelta) -> float:\r\n return time.monotonic() + timeout.total_seconds()\r\n\r\n\r\ndef _get_timeout(params: RendezvousParameters, key: str) -> Optional[timedelta]:\r\n timeout = params.get_as_int(key + \"_timeout\")\r\n if timeout is None:\r\n return None\r\n return timedelta(seconds=timeout)\r\n\r\n\r\ndef create_handler(\r\n store: Store, backend: RendezvousBackend, params: RendezvousParameters\r\n) -> DynamicRendezvousHandler:\r\n \"\"\"Creates a new :py:class:`DynamicRendezvousHandler` from the specified\r\n parameters.\r\n\r\n Args:\r\n store:\r\n The C10d store to return as part of the rendezvous.\r\n backend:\r\n The backend to use to hold the rendezvous state.\r\n\r\n +-------------------+------------------------------------------------------+\r\n | Parameter | Description |\r\n +===================+======================================================+\r\n | join_timeout | The total time, in seconds, within which the |\r\n | | rendezvous is expected to complete. Defaults to 600 |\r\n | | seconds. |\r\n +-------------------+------------------------------------------------------+\r\n | last_call_timeout | An additional wait amount, in seconds, before |\r\n | | completing the rendezvous once the minimum number of |\r\n | | nodes has been reached. Defaults to 30 seconds. |\r\n +-------------------+------------------------------------------------------+\r\n | close_timeout | The time, in seconds, within which the rendezvous is |\r\n | | expected to close after a call to |\r\n | | :py:meth:`RendezvousHandler.set_closed` or |\r\n | | :py:meth:`RendezvousHandler.shutdown`. Defaults to |\r\n | | 30 seconds. |\r\n +-------------------+------------------------------------------------------+\r\n \"\"\"\r\n try:\r\n timeout = RendezvousTimeout(\r\n _get_timeout(params, \"join\"),\r\n _get_timeout(params, \"last_call\"),\r\n _get_timeout(params, \"close\"),\r\n )\r\n\r\n return DynamicRendezvousHandler.from_backend(\r\n params.run_id,\r\n store,\r\n backend,\r\n params.min_nodes,\r\n params.max_nodes,\r\n timeout,\r\n )\r\n except Exception as e:\r\n construct_and_record_rdzv_event(\r\n message=f\"{type(e).__name__}: {str(e)}\",\r\n run_id=params.run_id,\r\n node_state=NodeState.FAILED,\r\n )\r\n raise\r\n",
"from numbers import Number\r\nimport math\r\nimport torch\r\nfrom torch.distributions import constraints\r\nfrom torch.distributions.uniform import Uniform\r\nfrom torch.distributions.transformed_distribution import TransformedDistribution\r\nfrom torch.distributions.transforms import AffineTransform, ExpTransform\r\nfrom torch.distributions.utils import broadcast_all, euler_constant\r\n\r\n\r\nclass Gumbel(TransformedDistribution):\r\n r\"\"\"\r\n Samples from a Gumbel Distribution.\r\n\r\n Examples::\r\n\r\n >>> m = Gumbel(torch.tensor([1.0]), torch.tensor([2.0]))\r\n >>> m.sample() # sample from Gumbel distribution with loc=1, scale=2\r\n tensor([ 1.0124])\r\n\r\n Args:\r\n loc (float or Tensor): Location parameter of the distribution\r\n scale (float or Tensor): Scale parameter of the distribution\r\n \"\"\"\r\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\r\n support = constraints.real\r\n\r\n def __init__(self, loc, scale, validate_args=None):\r\n self.loc, self.scale = broadcast_all(loc, scale)\r\n finfo = torch.finfo(self.loc.dtype)\r\n if isinstance(loc, Number) and isinstance(scale, Number):\r\n base_dist = Uniform(finfo.tiny, 1 - finfo.eps)\r\n else:\r\n base_dist = Uniform(torch.full_like(self.loc, finfo.tiny),\r\n torch.full_like(self.loc, 1 - finfo.eps))\r\n transforms = [ExpTransform().inv, AffineTransform(loc=0, scale=-torch.ones_like(self.scale)),\r\n ExpTransform().inv, AffineTransform(loc=loc, scale=-self.scale)]\r\n super(Gumbel, self).__init__(base_dist, transforms, validate_args=validate_args)\r\n\r\n def expand(self, batch_shape, _instance=None):\r\n new = self._get_checked_instance(Gumbel, _instance)\r\n new.loc = self.loc.expand(batch_shape)\r\n new.scale = self.scale.expand(batch_shape)\r\n return super(Gumbel, self).expand(batch_shape, _instance=new)\r\n\r\n # Explicitly defining the log probability function for Gumbel due to precision issues\r\n def log_prob(self, value):\r\n if self._validate_args:\r\n self._validate_sample(value)\r\n y = (self.loc - value) / self.scale\r\n return (y - y.exp()) - self.scale.log()\r\n\r\n @property\r\n def mean(self):\r\n return self.loc + self.scale * euler_constant\r\n\r\n @property\r\n def stddev(self):\r\n return (math.pi / math.sqrt(6)) * self.scale\r\n\r\n @property\r\n def variance(self):\r\n return self.stddev.pow(2)\r\n\r\n def entropy(self):\r\n return self.scale.log() + (1 + euler_constant)\r\n",
"# Copyright 2019 Kakao Brain\r\n#\r\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\r\n#\r\n# This source code is licensed under the BSD license found in the\r\n# LICENSE file in the root directory of this source tree.\r\n\"\"\"Manipulation of micro-batches.\"\"\"\r\nimport typing\r\nfrom typing import Any, Callable, List, Union, cast, Sequence\r\n\r\nimport torch\r\nfrom torch import Tensor\r\nimport torch.cuda.comm\r\n\r\n__all__: List[str] = []\r\n\r\n\r\nTensors = Sequence[Tensor]\r\nTensorOrTensors = Union[Tensor, Tensors]\r\nFunction = Callable[[TensorOrTensors], Union[List[Any], Tensor]]\r\n\r\n\r\nclass NoChunk(object):\r\n \"\"\"\r\n Wrapper for a Tensor in :meth:`Pipe.forward` indicating that the tensor\r\n should not be chunked on the batch dimension and instead be replicated\r\n as-is across all micro-batches. This is useful for tensors which might\r\n not have any 'batch' semantics for the model.\r\n \"\"\"\r\n def __init__(self, inp: Tensor):\r\n if not torch.is_tensor(inp):\r\n raise TypeError(f'NoChunk only supported for tensors, found: {inp}')\r\n self._tensor = inp\r\n\r\n @property\r\n def tensor(self):\r\n return self._tensor\r\n\r\n\r\nclass Batch:\r\n \"\"\"\r\n An abstraction representing a microbatch in the pipeline.\r\n \"\"\"\r\n\r\n def __init__(self, values: Union[List[Any], Tensor]) -> None:\r\n self._values = values\r\n self.atomic = torch.is_tensor(values)\r\n\r\n # Verify at least on tensor\r\n if not self.atomic:\r\n if not any(torch.is_tensor(value) for value in self._values):\r\n raise TypeError(f'No tensors found in batch: {self._values}')\r\n\r\n @property\r\n def tensor(self) -> Tensor:\r\n \"\"\"Retrieves the underlying tensor.\"\"\"\r\n if not self.atomic:\r\n raise AttributeError(\"not atomic batch\")\r\n return cast(Tensor, self._values)\r\n\r\n @property\r\n def values(self):\r\n \"\"\"Retreives the underlying values for the batch\"\"\"\r\n return self._values\r\n\r\n def find_tensor_idx(self):\r\n \"\"\"\r\n Retrieves the index of first tensor found.\r\n \"\"\"\r\n if self.atomic:\r\n return 0\r\n for i, value in enumerate(self._values):\r\n if torch.is_tensor(value):\r\n return i\r\n\r\n raise TypeError(\"No tensor found!\")\r\n\r\n def get_device(self):\r\n \"\"\"\r\n Retrieves the device for this microbatch.\r\n \"\"\"\r\n if self.atomic:\r\n return self._values.device # type: ignore[union-attr]\r\n\r\n for value in self._values:\r\n if torch.is_tensor(value):\r\n return value.device\r\n\r\n def call(self, function: Function) -> \"Batch\":\r\n \"\"\"Calls a function on the microbatch. It also wraps\r\n the output with :class:`Batch`.\r\n \"\"\"\r\n if self.atomic:\r\n return Batch(function(self._values))\r\n else:\r\n return Batch(function(*self._values))\r\n\r\n def __repr__(self) -> str:\r\n return f\"Batch[atomic={self.atomic!r}]({self._values!r})\"\r\n\r\n def __iter__(self):\r\n if self.atomic:\r\n yield self._values\r\n else:\r\n yield from self._values\r\n\r\n def __len__(self) -> int:\r\n return 1 if self.atomic else len(self._values)\r\n\r\n def __getitem__(self, index: int):\r\n if not self.atomic:\r\n return self._values[index]\r\n\r\n if index != 0:\r\n raise IndexError(\"atomic batch allows index 0 only\")\r\n\r\n return self._values\r\n\r\n # NOTE(sublee): pyflakes can't detect \"overload\" instead of \"typing.overload\".\r\n @typing.overload\r\n def __setitem__(self, index: int, value: Tensor) -> None:\r\n ...\r\n\r\n @typing.overload\r\n def __setitem__(self, index: slice, value: Tensors) -> None:\r\n ...\r\n\r\n def __setitem__(self, index: Union[int, slice], value) -> None:\r\n if isinstance(index, int):\r\n self._setitem_by_index(index, value)\r\n else:\r\n self._setitem_by_slice(index, value)\r\n\r\n def _setitem_by_index(self, index: int, value) -> None:\r\n if not self.atomic:\r\n i = index\r\n self._values = self._values[:i] + (value,) + self._values[i + 1 :] # type: ignore[operator]\r\n return\r\n\r\n if index != 0:\r\n raise IndexError(\"atomic batch allows index 0 only\")\r\n\r\n self._values = value\r\n\r\n def _setitem_by_slice(self, index: slice, value) -> None:\r\n if not (index.start is index.stop is index.step is None):\r\n raise NotImplementedError(\"only slice [:] supported\")\r\n\r\n if not self.atomic:\r\n self._values = value\r\n return\r\n\r\n if len(value) != 1:\r\n raise IndexError(\"atomic batch cannot be replaced with multiple tensors\")\r\n\r\n self._values = value[0]\r\n\r\n\r\ndef check(first_device, *inputs) -> None:\r\n \"\"\"\r\n Checks whether the input contains at least one tensor and each tensor is\r\n on the same device as the first partition.\r\n\r\n Raises:\r\n ValueError: input does not contain at least one tensor\r\n\r\n \"\"\"\r\n\r\n if not any(torch.is_tensor(input) for input in inputs):\r\n raise TypeError(f'inputs do not have any tensors: {inputs}')\r\n if any(torch.is_tensor(input) and input.device != first_device for input in inputs):\r\n raise ValueError('All inputs should be on the same device as the first partition')\r\n\r\n\r\ndef scatter(*inputs, chunks: int) -> List[Batch]:\r\n \"\"\"Splits an input mini-batch into multiple micro-batches.\"\"\"\r\n if len(inputs) == 1 and isinstance(inputs[0], Tensor):\r\n return [Batch(x) for x in inputs[0].chunk(chunks)]\r\n\r\n batches: List[Any] = [[] for _ in range(chunks)]\r\n # Actual number of chunks produced\r\n num_chunks = -1\r\n for input in inputs:\r\n if torch.is_tensor(input):\r\n # Chunk only tensors.\r\n tensors = input.chunk(chunks)\r\n\r\n # Validate number of chunks equal across all inputs.\r\n if num_chunks != -1 and num_chunks != len(tensors):\r\n raise RuntimeError(f'Found different number of chunks produced for inputs: {num_chunks} and {len(tensors)}')\r\n num_chunks = len(tensors)\r\n\r\n for i, tensor in enumerate(tensors):\r\n batches[i].append(tensor)\r\n else:\r\n # Replicate non-tensors or tensors wrapped with 'NoChunk'.\r\n for i in range(chunks):\r\n if isinstance(input, NoChunk):\r\n # Extract the tensor out.\r\n batches[i].append(input.tensor)\r\n else:\r\n batches[i].append(input)\r\n\r\n # Truncate to actual number of chunks\r\n batches = batches[:num_chunks]\r\n\r\n return [Batch(x) for x in batches]\r\n\r\n\r\ndef gather(outputs: List[Batch]):\r\n \"\"\"Concatenates output micro-batches into a mini-batch.\"\"\"\r\n output: Any\r\n\r\n if outputs[0].atomic:\r\n tensors = tuple(b.tensor for b in outputs)\r\n output = torch.cat(tensors)\r\n else:\r\n output_buf: List[Any] = []\r\n for i in range(len(outputs[0])):\r\n output_type = type(outputs[0][i])\r\n current_outputs = []\r\n for batch in outputs:\r\n if output_type != type(batch[i]):\r\n raise TypeError(f'Types for microbatch outputs do not match, found: {output_type} and {type(batch[i])}')\r\n current_outputs.append(batch[i])\r\n\r\n if torch.is_tensor(outputs[0][i]):\r\n output_buf.append(torch.cat(current_outputs))\r\n else:\r\n output_buf.append(current_outputs)\r\n\r\n output = tuple(output_buf)\r\n\r\n return output\r\n",
"\"\"\"\r\nexec_command\r\n\r\nImplements exec_command function that is (almost) equivalent to\r\ncommands.getstatusoutput function but on NT, DOS systems the\r\nreturned status is actually correct (though, the returned status\r\nvalues may be different by a factor). In addition, exec_command\r\ntakes keyword arguments for (re-)defining environment variables.\r\n\r\nProvides functions:\r\n\r\n exec_command --- execute command in a specified directory and\r\n in the modified environment.\r\n find_executable --- locate a command using info from environment\r\n variable PATH. Equivalent to posix `which`\r\n command.\r\n\r\nAuthor: Pearu Peterson <[email protected]>\r\nCreated: 11 January 2003\r\n\r\nRequires: Python 2.x\r\n\r\nSuccessfully tested on:\r\n\r\n======== ============ =================================================\r\nos.name sys.platform comments\r\n======== ============ =================================================\r\nposix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3\r\n PyCrust 0.9.3, Idle 1.0.2\r\nposix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2\r\nposix sunos5 SunOS 5.9, Python 2.2, 2.3.2\r\nposix darwin Darwin 7.2.0, Python 2.3\r\nnt win32 Windows Me\r\n Python 2.3(EE), Idle 1.0, PyCrust 0.7.2\r\n Python 2.1.1 Idle 0.8\r\nnt win32 Windows 98, Python 2.1.1. Idle 0.8\r\nnt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests\r\n fail i.e. redefining environment variables may\r\n not work. FIXED: don't use cygwin echo!\r\n Comment: also `cmd /c echo` will not work\r\n but redefining environment variables do work.\r\nposix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special)\r\nnt win32 Windows XP, Python 2.3.3\r\n======== ============ =================================================\r\n\r\nKnown bugs:\r\n\r\n* Tests, that send messages to stderr, fail when executed from MSYS prompt\r\n because the messages are lost at some point.\r\n\r\n\"\"\"\r\n__all__ = ['exec_command', 'find_executable']\r\n\r\nimport os\r\nimport sys\r\nimport subprocess\r\nimport locale\r\nimport warnings\r\n\r\nfrom numpy.distutils.misc_util import is_sequence, make_temp_file\r\nfrom numpy.distutils import log\r\n\r\ndef filepath_from_subprocess_output(output):\r\n \"\"\"\r\n Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`.\r\n\r\n Inherited from `exec_command`, and possibly incorrect.\r\n \"\"\"\r\n mylocale = locale.getpreferredencoding(False)\r\n if mylocale is None:\r\n mylocale = 'ascii'\r\n output = output.decode(mylocale, errors='replace')\r\n output = output.replace('\\r\\n', '\\n')\r\n # Another historical oddity\r\n if output[-1:] == '\\n':\r\n output = output[:-1]\r\n return output\r\n\r\n\r\ndef forward_bytes_to_stdout(val):\r\n \"\"\"\r\n Forward bytes from a subprocess call to the console, without attempting to\r\n decode them.\r\n\r\n The assumption is that the subprocess call already returned bytes in\r\n a suitable encoding.\r\n \"\"\"\r\n if hasattr(sys.stdout, 'buffer'):\r\n # use the underlying binary output if there is one\r\n sys.stdout.buffer.write(val)\r\n elif hasattr(sys.stdout, 'encoding'):\r\n # round-trip the encoding if necessary\r\n sys.stdout.write(val.decode(sys.stdout.encoding))\r\n else:\r\n # make a best-guess at the encoding\r\n sys.stdout.write(val.decode('utf8', errors='replace'))\r\n\r\n\r\ndef temp_file_name():\r\n # 2019-01-30, 1.17\r\n warnings.warn('temp_file_name is deprecated since NumPy v1.17, use '\r\n 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1)\r\n fo, name = make_temp_file()\r\n fo.close()\r\n return name\r\n\r\ndef get_pythonexe():\r\n pythonexe = sys.executable\r\n if os.name in ['nt', 'dos']:\r\n fdir, fn = os.path.split(pythonexe)\r\n fn = fn.upper().replace('PYTHONW', 'PYTHON')\r\n pythonexe = os.path.join(fdir, fn)\r\n assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)\r\n return pythonexe\r\n\r\ndef find_executable(exe, path=None, _cache={}):\r\n \"\"\"Return full path of a executable or None.\r\n\r\n Symbolic links are not followed.\r\n \"\"\"\r\n key = exe, path\r\n try:\r\n return _cache[key]\r\n except KeyError:\r\n pass\r\n log.debug('find_executable(%r)' % exe)\r\n orig_exe = exe\r\n\r\n if path is None:\r\n path = os.environ.get('PATH', os.defpath)\r\n if os.name=='posix':\r\n realpath = os.path.realpath\r\n else:\r\n realpath = lambda a:a\r\n\r\n if exe.startswith('\"'):\r\n exe = exe[1:-1]\r\n\r\n suffixes = ['']\r\n if os.name in ['nt', 'dos', 'os2']:\r\n fn, ext = os.path.splitext(exe)\r\n extra_suffixes = ['.exe', '.com', '.bat']\r\n if ext.lower() not in extra_suffixes:\r\n suffixes = extra_suffixes\r\n\r\n if os.path.isabs(exe):\r\n paths = ['']\r\n else:\r\n paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]\r\n\r\n for path in paths:\r\n fn = os.path.join(path, exe)\r\n for s in suffixes:\r\n f_ext = fn+s\r\n if not os.path.islink(f_ext):\r\n f_ext = realpath(f_ext)\r\n if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):\r\n log.info('Found executable %s' % f_ext)\r\n _cache[key] = f_ext\r\n return f_ext\r\n\r\n log.warn('Could not locate executable %s' % orig_exe)\r\n return None\r\n\r\n############################################################\r\n\r\ndef _preserve_environment( names ):\r\n log.debug('_preserve_environment(%r)' % (names))\r\n env = {name: os.environ.get(name) for name in names}\r\n return env\r\n\r\ndef _update_environment( **env ):\r\n log.debug('_update_environment(...)')\r\n for name, value in env.items():\r\n os.environ[name] = value or ''\r\n\r\ndef exec_command(command, execute_in='', use_shell=None, use_tee=None,\r\n _with_python = 1, **env ):\r\n \"\"\"\r\n Return (status,output) of executed command.\r\n\r\n .. deprecated:: 1.17\r\n Use subprocess.Popen instead\r\n\r\n Parameters\r\n ----------\r\n command : str\r\n A concatenated string of executable and arguments.\r\n execute_in : str\r\n Before running command ``cd execute_in`` and after ``cd -``.\r\n use_shell : {bool, None}, optional\r\n If True, execute ``sh -c command``. Default None (True)\r\n use_tee : {bool, None}, optional\r\n If True use tee. Default None (True)\r\n\r\n\r\n Returns\r\n -------\r\n res : str\r\n Both stdout and stderr messages.\r\n\r\n Notes\r\n -----\r\n On NT, DOS systems the returned status is correct for external commands.\r\n Wild cards will not work for non-posix systems or when use_shell=0.\r\n\r\n \"\"\"\r\n # 2019-01-30, 1.17\r\n warnings.warn('exec_command is deprecated since NumPy v1.17, use '\r\n 'subprocess.Popen instead', DeprecationWarning, stacklevel=1)\r\n log.debug('exec_command(%r,%s)' % (command,\r\n ','.join(['%s=%r'%kv for kv in env.items()])))\r\n\r\n if use_tee is None:\r\n use_tee = os.name=='posix'\r\n if use_shell is None:\r\n use_shell = os.name=='posix'\r\n execute_in = os.path.abspath(execute_in)\r\n oldcwd = os.path.abspath(os.getcwd())\r\n\r\n if __name__[-12:] == 'exec_command':\r\n exec_dir = os.path.dirname(os.path.abspath(__file__))\r\n elif os.path.isfile('exec_command.py'):\r\n exec_dir = os.path.abspath('.')\r\n else:\r\n exec_dir = os.path.abspath(sys.argv[0])\r\n if os.path.isfile(exec_dir):\r\n exec_dir = os.path.dirname(exec_dir)\r\n\r\n if oldcwd!=execute_in:\r\n os.chdir(execute_in)\r\n log.debug('New cwd: %s' % execute_in)\r\n else:\r\n log.debug('Retaining cwd: %s' % oldcwd)\r\n\r\n oldenv = _preserve_environment( list(env.keys()) )\r\n _update_environment( **env )\r\n\r\n try:\r\n st = _exec_command(command,\r\n use_shell=use_shell,\r\n use_tee=use_tee,\r\n **env)\r\n finally:\r\n if oldcwd!=execute_in:\r\n os.chdir(oldcwd)\r\n log.debug('Restored cwd to %s' % oldcwd)\r\n _update_environment(**oldenv)\r\n\r\n return st\r\n\r\n\r\ndef _exec_command(command, use_shell=None, use_tee = None, **env):\r\n \"\"\"\r\n Internal workhorse for exec_command().\r\n \"\"\"\r\n if use_shell is None:\r\n use_shell = os.name=='posix'\r\n if use_tee is None:\r\n use_tee = os.name=='posix'\r\n\r\n if os.name == 'posix' and use_shell:\r\n # On POSIX, subprocess always uses /bin/sh, override\r\n sh = os.environ.get('SHELL', '/bin/sh')\r\n if is_sequence(command):\r\n command = [sh, '-c', ' '.join(command)]\r\n else:\r\n command = [sh, '-c', command]\r\n use_shell = False\r\n\r\n elif os.name == 'nt' and is_sequence(command):\r\n # On Windows, join the string for CreateProcess() ourselves as\r\n # subprocess does it a bit differently\r\n command = ' '.join(_quote_arg(arg) for arg in command)\r\n\r\n # Inherit environment by default\r\n env = env or None\r\n try:\r\n # universal_newlines is set to False so that communicate()\r\n # will return bytes. We need to decode the output ourselves\r\n # so that Python will not raise a UnicodeDecodeError when\r\n # it encounters an invalid character; rather, we simply replace it\r\n proc = subprocess.Popen(command, shell=use_shell, env=env,\r\n stdout=subprocess.PIPE,\r\n stderr=subprocess.STDOUT,\r\n universal_newlines=False)\r\n except OSError:\r\n # Return 127, as os.spawn*() and /bin/sh do\r\n return 127, ''\r\n\r\n text, err = proc.communicate()\r\n mylocale = locale.getpreferredencoding(False)\r\n if mylocale is None:\r\n mylocale = 'ascii'\r\n text = text.decode(mylocale, errors='replace')\r\n text = text.replace('\\r\\n', '\\n')\r\n # Another historical oddity\r\n if text[-1:] == '\\n':\r\n text = text[:-1]\r\n\r\n if use_tee and text:\r\n print(text)\r\n return proc.returncode, text\r\n\r\n\r\ndef _quote_arg(arg):\r\n \"\"\"\r\n Quote the argument for safe use in a shell command line.\r\n \"\"\"\r\n # If there is a quote in the string, assume relevants parts of the\r\n # string are already quoted (e.g. '-I\"C:\\\\Program Files\\\\...\"')\r\n if '\"' not in arg and ' ' in arg:\r\n return '\"%s\"' % arg\r\n return arg\r\n\r\n############################################################\r\n",
"\r\n\r\n\r\n\r\n\r\nfrom caffe2.python import core\r\nfrom hypothesis import given, settings\r\nimport caffe2.python.hypothesis_test_util as hu\r\nimport hypothesis.strategies as st\r\nimport numpy as np\r\n\r\n\r\nclass RMACRegionsOpTest(hu.HypothesisTestCase):\r\n @given(\r\n n=st.integers(500, 500),\r\n h=st.integers(1, 10),\r\n w=st.integers(1, 10),\r\n scales=st.integers(1, 3),\r\n **hu.gcs\r\n )\r\n @settings(deadline=10000)\r\n def test(self, n, h, w, scales, gc, dc):\r\n X = np.random.rand(n, 64, h, w).astype(np.float32)\r\n overlap = 0.4\r\n\r\n def ref_op(X):\r\n N, H, W = X.shape[0], X.shape[2], X.shape[3]\r\n\r\n # Possible regions for the long dimension\r\n steps = np.array((2, 3, 4, 5, 6, 7), dtype=np.float32)\r\n minW = np.minimum(H, W)\r\n\r\n # steps(idx) regions for long dimension\r\n b = (np.maximum(H, W) - minW) / (steps - 1)\r\n idx = np.argmin(\r\n np.abs(((minW**2 - minW * b) / minW**2) - overlap)) + 1\r\n\r\n # Region overplus per dimension\r\n Wd = 0\r\n Hd = 0\r\n if H < W:\r\n Wd = idx\r\n elif H > W:\r\n Hd = idx\r\n\r\n regions_xywh = []\r\n for l in range(1, scales + 1):\r\n wl = np.floor(2 * minW / (l + 1))\r\n\r\n # Center coordinates\r\n if l + Wd - 1 > 0:\r\n b = (W - wl) / (l + Wd - 1)\r\n else:\r\n b = 0\r\n cenW = np.floor(b * np.arange(l - 1 + Wd + 1))\r\n\r\n # Center coordinates\r\n if l + Hd - 1 > 0:\r\n b = (H - wl) / (l + Hd - 1)\r\n else:\r\n b = 0\r\n cenH = np.floor(b * np.arange(l - 1 + Hd + 1))\r\n\r\n for i_ in cenW:\r\n for j_ in cenH:\r\n regions_xywh.append([i_, j_, wl, wl])\r\n\r\n # Round the regions. Careful with the borders!\r\n for i in range(len(regions_xywh)):\r\n for j in range(4):\r\n regions_xywh[i][j] = int(round(regions_xywh[i][j]))\r\n if regions_xywh[i][0] + regions_xywh[i][2] > W:\r\n regions_xywh[i][0] -= (\r\n (regions_xywh[i][0] + regions_xywh[i][2]) - W\r\n )\r\n if regions_xywh[i][1] + regions_xywh[i][3] > H:\r\n regions_xywh[i][1] -= (\r\n (regions_xywh[i][1] + regions_xywh[i][3]) - H\r\n )\r\n # Filter out 0-sized regions\r\n regions_xywh = [r for r in regions_xywh if r[2] * r[3] > 0]\r\n\r\n # Convert to ROIPoolOp format: (batch_index x1 y1 x2 y2)\r\n regions = [\r\n [i, x, y, x + w - 1, y + h - 1]\r\n for i in np.arange(N) for x, y, w, h in regions_xywh\r\n ]\r\n return (np.array(regions).astype(np.float32), )\r\n\r\n op = core.CreateOperator(\r\n 'RMACRegions',\r\n ['X'],\r\n ['RMAC_REGIONS'],\r\n scales=scales,\r\n overlap=overlap,\r\n )\r\n\r\n # Check against numpy reference\r\n self.assertReferenceChecks(gc, op, [X], ref_op)\r\n",
"\r\nimport numpy\r\nimport numpy as np\r\nimport datetime\r\nimport pytest\r\nfrom numpy.testing import (\r\n assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,\r\n assert_raises_regex, assert_array_equal,\r\n )\r\nfrom numpy.compat import pickle\r\n\r\n# Use pytz to test out various time zones if available\r\ntry:\r\n from pytz import timezone as tz\r\n _has_pytz = True\r\nexcept ImportError:\r\n _has_pytz = False\r\n\r\ntry:\r\n RecursionError\r\nexcept NameError:\r\n RecursionError = RuntimeError # python < 3.5\r\n\r\n\r\nclass TestDateTime:\r\n def test_datetime_dtype_creation(self):\r\n for unit in ['Y', 'M', 'W', 'D',\r\n 'h', 'm', 's', 'ms', 'us',\r\n 'μs', # alias for us\r\n 'ns', 'ps', 'fs', 'as']:\r\n dt1 = np.dtype('M8[750%s]' % unit)\r\n assert_(dt1 == np.dtype('datetime64[750%s]' % unit))\r\n dt2 = np.dtype('m8[%s]' % unit)\r\n assert_(dt2 == np.dtype('timedelta64[%s]' % unit))\r\n\r\n # Generic units shouldn't add [] to the end\r\n assert_equal(str(np.dtype(\"M8\")), \"datetime64\")\r\n\r\n # Should be possible to specify the endianness\r\n assert_equal(np.dtype(\"=M8\"), np.dtype(\"M8\"))\r\n assert_equal(np.dtype(\"=M8[s]\"), np.dtype(\"M8[s]\"))\r\n assert_(np.dtype(\">M8\") == np.dtype(\"M8\") or\r\n np.dtype(\"<M8\") == np.dtype(\"M8\"))\r\n assert_(np.dtype(\">M8[D]\") == np.dtype(\"M8[D]\") or\r\n np.dtype(\"<M8[D]\") == np.dtype(\"M8[D]\"))\r\n assert_(np.dtype(\">M8\") != np.dtype(\"<M8\"))\r\n\r\n assert_equal(np.dtype(\"=m8\"), np.dtype(\"m8\"))\r\n assert_equal(np.dtype(\"=m8[s]\"), np.dtype(\"m8[s]\"))\r\n assert_(np.dtype(\">m8\") == np.dtype(\"m8\") or\r\n np.dtype(\"<m8\") == np.dtype(\"m8\"))\r\n assert_(np.dtype(\">m8[D]\") == np.dtype(\"m8[D]\") or\r\n np.dtype(\"<m8[D]\") == np.dtype(\"m8[D]\"))\r\n assert_(np.dtype(\">m8\") != np.dtype(\"<m8\"))\r\n\r\n # Check that the parser rejects bad datetime types\r\n assert_raises(TypeError, np.dtype, 'M8[badunit]')\r\n assert_raises(TypeError, np.dtype, 'm8[badunit]')\r\n assert_raises(TypeError, np.dtype, 'M8[YY]')\r\n assert_raises(TypeError, np.dtype, 'm8[YY]')\r\n assert_raises(TypeError, np.dtype, 'm4')\r\n assert_raises(TypeError, np.dtype, 'M7')\r\n assert_raises(TypeError, np.dtype, 'm7')\r\n assert_raises(TypeError, np.dtype, 'M16')\r\n assert_raises(TypeError, np.dtype, 'm16')\r\n assert_raises(TypeError, np.dtype, 'M8[3000000000ps]')\r\n\r\n def test_datetime_casting_rules(self):\r\n # Cannot cast safely/same_kind between timedelta and datetime\r\n assert_(not np.can_cast('m8', 'M8', casting='same_kind'))\r\n assert_(not np.can_cast('M8', 'm8', casting='same_kind'))\r\n assert_(not np.can_cast('m8', 'M8', casting='safe'))\r\n assert_(not np.can_cast('M8', 'm8', casting='safe'))\r\n\r\n # Can cast safely/same_kind from integer to timedelta\r\n assert_(np.can_cast('i8', 'm8', casting='same_kind'))\r\n assert_(np.can_cast('i8', 'm8', casting='safe'))\r\n assert_(np.can_cast('i4', 'm8', casting='same_kind'))\r\n assert_(np.can_cast('i4', 'm8', casting='safe'))\r\n assert_(np.can_cast('u4', 'm8', casting='same_kind'))\r\n assert_(np.can_cast('u4', 'm8', casting='safe'))\r\n\r\n # Cannot cast safely from unsigned integer of the same size, which\r\n # could overflow\r\n assert_(np.can_cast('u8', 'm8', casting='same_kind'))\r\n assert_(not np.can_cast('u8', 'm8', casting='safe'))\r\n\r\n # Cannot cast safely/same_kind from float to timedelta\r\n assert_(not np.can_cast('f4', 'm8', casting='same_kind'))\r\n assert_(not np.can_cast('f4', 'm8', casting='safe'))\r\n\r\n # Cannot cast safely/same_kind from integer to datetime\r\n assert_(not np.can_cast('i8', 'M8', casting='same_kind'))\r\n assert_(not np.can_cast('i8', 'M8', casting='safe'))\r\n\r\n # Cannot cast safely/same_kind from bool to datetime\r\n assert_(not np.can_cast('b1', 'M8', casting='same_kind'))\r\n assert_(not np.can_cast('b1', 'M8', casting='safe'))\r\n # Can cast safely/same_kind from bool to timedelta\r\n assert_(np.can_cast('b1', 'm8', casting='same_kind'))\r\n assert_(np.can_cast('b1', 'm8', casting='safe'))\r\n\r\n # Can cast datetime safely from months/years to days\r\n assert_(np.can_cast('M8[M]', 'M8[D]', casting='safe'))\r\n assert_(np.can_cast('M8[Y]', 'M8[D]', casting='safe'))\r\n # Cannot cast timedelta safely from months/years to days\r\n assert_(not np.can_cast('m8[M]', 'm8[D]', casting='safe'))\r\n assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='safe'))\r\n # Can cast datetime same_kind from months/years to days\r\n assert_(np.can_cast('M8[M]', 'M8[D]', casting='same_kind'))\r\n assert_(np.can_cast('M8[Y]', 'M8[D]', casting='same_kind'))\r\n # Can't cast timedelta same_kind from months/years to days\r\n assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind'))\r\n assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind'))\r\n # Can cast datetime same_kind across the date/time boundary\r\n assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind'))\r\n # Can cast timedelta same_kind across the date/time boundary\r\n assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind'))\r\n assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind'))\r\n\r\n # Cannot cast safely if the integer multiplier doesn't divide\r\n assert_(not np.can_cast('M8[7h]', 'M8[3h]', casting='safe'))\r\n assert_(not np.can_cast('M8[3h]', 'M8[6h]', casting='safe'))\r\n # But can cast same_kind\r\n assert_(np.can_cast('M8[7h]', 'M8[3h]', casting='same_kind'))\r\n # Can cast safely if the integer multiplier does divide\r\n assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe'))\r\n\r\n # We can always cast types with generic units (corresponding to NaT) to\r\n # more specific types\r\n assert_(np.can_cast('m8', 'm8[h]', casting='same_kind'))\r\n assert_(np.can_cast('m8', 'm8[h]', casting='safe'))\r\n assert_(np.can_cast('M8', 'M8[h]', casting='same_kind'))\r\n assert_(np.can_cast('M8', 'M8[h]', casting='safe'))\r\n # but not the other way around\r\n assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind'))\r\n assert_(not np.can_cast('m8[h]', 'm8', casting='safe'))\r\n assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind'))\r\n assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))\r\n\r\n def test_datetime_prefix_conversions(self):\r\n # regression tests related to gh-19631;\r\n # test metric prefixes from seconds down to\r\n # attoseconds for bidirectional conversions\r\n smaller_units = ['M8[7000ms]',\r\n 'M8[2000us]',\r\n 'M8[1000ns]',\r\n 'M8[5000ns]',\r\n 'M8[2000ps]',\r\n 'M8[9000fs]',\r\n 'M8[1000as]',\r\n 'M8[2000000ps]',\r\n 'M8[1000000as]',\r\n 'M8[2000000000ps]',\r\n 'M8[1000000000as]']\r\n larger_units = ['M8[7s]',\r\n 'M8[2ms]',\r\n 'M8[us]',\r\n 'M8[5us]',\r\n 'M8[2ns]',\r\n 'M8[9ps]',\r\n 'M8[1fs]',\r\n 'M8[2us]',\r\n 'M8[1ps]',\r\n 'M8[2ms]',\r\n 'M8[1ns]']\r\n for larger_unit, smaller_unit in zip(larger_units, smaller_units):\r\n assert np.can_cast(larger_unit, smaller_unit, casting='safe')\r\n assert np.can_cast(smaller_unit, larger_unit, casting='safe')\r\n\r\n @pytest.mark.parametrize(\"unit\", [\r\n \"s\", \"ms\", \"us\", \"ns\", \"ps\", \"fs\", \"as\"])\r\n def test_prohibit_negative_datetime(self, unit):\r\n with assert_raises(TypeError):\r\n np.array([1], dtype=f\"M8[-1{unit}]\")\r\n\r\n def test_compare_generic_nat(self):\r\n # regression tests for gh-6452\r\n assert_(np.datetime64('NaT') !=\r\n np.datetime64('2000') + np.timedelta64('NaT'))\r\n assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))\r\n assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))\r\n\r\n @pytest.mark.parametrize(\"size\", [\r\n 3, 21, 217, 1000])\r\n def test_datetime_nat_argsort_stability(self, size):\r\n # NaT < NaT should be False internally for\r\n # sort stability\r\n expected = np.arange(size)\r\n arr = np.tile(np.datetime64('NaT'), size)\r\n assert_equal(np.argsort(arr, kind='mergesort'), expected)\r\n\r\n @pytest.mark.parametrize(\"size\", [\r\n 3, 21, 217, 1000])\r\n def test_timedelta_nat_argsort_stability(self, size):\r\n # NaT < NaT should be False internally for\r\n # sort stability\r\n expected = np.arange(size)\r\n arr = np.tile(np.timedelta64('NaT'), size)\r\n assert_equal(np.argsort(arr, kind='mergesort'), expected)\r\n\r\n @pytest.mark.parametrize(\"arr, expected\", [\r\n # the example provided in gh-12629\r\n (['NaT', 1, 2, 3],\r\n [1, 2, 3, 'NaT']),\r\n # multiple NaTs\r\n (['NaT', 9, 'NaT', -707],\r\n [-707, 9, 'NaT', 'NaT']),\r\n # this sort explores another code path for NaT\r\n ([1, -2, 3, 'NaT'],\r\n [-2, 1, 3, 'NaT']),\r\n # 2-D array\r\n ([[51, -220, 'NaT'],\r\n [-17, 'NaT', -90]],\r\n [[-220, 51, 'NaT'],\r\n [-90, -17, 'NaT']]),\r\n ])\r\n @pytest.mark.parametrize(\"dtype\", [\r\n 'M8[ns]', 'M8[us]',\r\n 'm8[ns]', 'm8[us]'])\r\n def test_datetime_timedelta_sort_nat(self, arr, expected, dtype):\r\n # fix for gh-12629 and gh-15063; NaT sorting to end of array\r\n arr = np.array(arr, dtype=dtype)\r\n expected = np.array(expected, dtype=dtype)\r\n arr.sort()\r\n assert_equal(arr, expected)\r\n\r\n def test_datetime_scalar_construction(self):\r\n # Construct with different units\r\n assert_equal(np.datetime64('1950-03-12', 'D'),\r\n np.datetime64('1950-03-12'))\r\n assert_equal(np.datetime64('1950-03-12T13', 's'),\r\n np.datetime64('1950-03-12T13', 'm'))\r\n\r\n # Default construction means NaT\r\n assert_equal(np.datetime64(), np.datetime64('NaT'))\r\n\r\n # Some basic strings and repr\r\n assert_equal(str(np.datetime64('NaT')), 'NaT')\r\n assert_equal(repr(np.datetime64('NaT')),\r\n \"numpy.datetime64('NaT')\")\r\n assert_equal(str(np.datetime64('2011-02')), '2011-02')\r\n assert_equal(repr(np.datetime64('2011-02')),\r\n \"numpy.datetime64('2011-02')\")\r\n\r\n # None gets constructed as NaT\r\n assert_equal(np.datetime64(None), np.datetime64('NaT'))\r\n\r\n # Default construction of NaT is in generic units\r\n assert_equal(np.datetime64().dtype, np.dtype('M8'))\r\n assert_equal(np.datetime64('NaT').dtype, np.dtype('M8'))\r\n\r\n # Construction from integers requires a specified unit\r\n assert_raises(ValueError, np.datetime64, 17)\r\n\r\n # When constructing from a scalar or zero-dimensional array,\r\n # it either keeps the units or you can override them.\r\n a = np.datetime64('2000-03-18T16', 'h')\r\n b = np.array('2000-03-18T16', dtype='M8[h]')\r\n\r\n assert_equal(a.dtype, np.dtype('M8[h]'))\r\n assert_equal(b.dtype, np.dtype('M8[h]'))\r\n\r\n assert_equal(np.datetime64(a), a)\r\n assert_equal(np.datetime64(a).dtype, np.dtype('M8[h]'))\r\n\r\n assert_equal(np.datetime64(b), a)\r\n assert_equal(np.datetime64(b).dtype, np.dtype('M8[h]'))\r\n\r\n assert_equal(np.datetime64(a, 's'), a)\r\n assert_equal(np.datetime64(a, 's').dtype, np.dtype('M8[s]'))\r\n\r\n assert_equal(np.datetime64(b, 's'), a)\r\n assert_equal(np.datetime64(b, 's').dtype, np.dtype('M8[s]'))\r\n\r\n # Construction from datetime.date\r\n assert_equal(np.datetime64('1945-03-25'),\r\n np.datetime64(datetime.date(1945, 3, 25)))\r\n assert_equal(np.datetime64('2045-03-25', 'D'),\r\n np.datetime64(datetime.date(2045, 3, 25), 'D'))\r\n # Construction from datetime.datetime\r\n assert_equal(np.datetime64('1980-01-25T14:36:22.5'),\r\n np.datetime64(datetime.datetime(1980, 1, 25,\r\n 14, 36, 22, 500000)))\r\n\r\n # Construction with time units from a date is okay\r\n assert_equal(np.datetime64('1920-03-13', 'h'),\r\n np.datetime64('1920-03-13T00'))\r\n assert_equal(np.datetime64('1920-03', 'm'),\r\n np.datetime64('1920-03-01T00:00'))\r\n assert_equal(np.datetime64('1920', 's'),\r\n np.datetime64('1920-01-01T00:00:00'))\r\n assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'),\r\n np.datetime64('2045-03-25T00:00:00.000'))\r\n\r\n # Construction with date units from a datetime is also okay\r\n assert_equal(np.datetime64('1920-03-13T18', 'D'),\r\n np.datetime64('1920-03-13'))\r\n assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'),\r\n np.datetime64('1920-03'))\r\n assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'),\r\n np.datetime64('1920'))\r\n\r\n def test_datetime_scalar_construction_timezone(self):\r\n # verify that supplying an explicit timezone works, but is deprecated\r\n with assert_warns(DeprecationWarning):\r\n assert_equal(np.datetime64('2000-01-01T00Z'),\r\n np.datetime64('2000-01-01T00'))\r\n with assert_warns(DeprecationWarning):\r\n assert_equal(np.datetime64('2000-01-01T00-08'),\r\n np.datetime64('2000-01-01T08'))\r\n\r\n def test_datetime_array_find_type(self):\r\n dt = np.datetime64('1970-01-01', 'M')\r\n arr = np.array([dt])\r\n assert_equal(arr.dtype, np.dtype('M8[M]'))\r\n\r\n # at the moment, we don't automatically convert these to datetime64\r\n\r\n dt = datetime.date(1970, 1, 1)\r\n arr = np.array([dt])\r\n assert_equal(arr.dtype, np.dtype('O'))\r\n\r\n dt = datetime.datetime(1970, 1, 1, 12, 30, 40)\r\n arr = np.array([dt])\r\n assert_equal(arr.dtype, np.dtype('O'))\r\n\r\n # find \"supertype\" for non-dates and dates\r\n\r\n b = np.bool_(True)\r\n dm = np.datetime64('1970-01-01', 'M')\r\n d = datetime.date(1970, 1, 1)\r\n dt = datetime.datetime(1970, 1, 1, 12, 30, 40)\r\n\r\n arr = np.array([b, dm])\r\n assert_equal(arr.dtype, np.dtype('O'))\r\n\r\n arr = np.array([b, d])\r\n assert_equal(arr.dtype, np.dtype('O'))\r\n\r\n arr = np.array([b, dt])\r\n assert_equal(arr.dtype, np.dtype('O'))\r\n\r\n arr = np.array([d, d]).astype('datetime64')\r\n assert_equal(arr.dtype, np.dtype('M8[D]'))\r\n\r\n arr = np.array([dt, dt]).astype('datetime64')\r\n assert_equal(arr.dtype, np.dtype('M8[us]'))\r\n\r\n @pytest.mark.parametrize(\"unit\", [\r\n # test all date / time units and use\r\n # \"generic\" to select generic unit\r\n (\"Y\"), (\"M\"), (\"W\"), (\"D\"), (\"h\"), (\"m\"),\r\n (\"s\"), (\"ms\"), (\"us\"), (\"ns\"), (\"ps\"),\r\n (\"fs\"), (\"as\"), (\"generic\") ])\r\n def test_timedelta_np_int_construction(self, unit):\r\n # regression test for gh-7617\r\n if unit != \"generic\":\r\n assert_equal(np.timedelta64(np.int64(123), unit),\r\n np.timedelta64(123, unit))\r\n else:\r\n assert_equal(np.timedelta64(np.int64(123)),\r\n np.timedelta64(123))\r\n\r\n def test_timedelta_scalar_construction(self):\r\n # Construct with different units\r\n assert_equal(np.timedelta64(7, 'D'),\r\n np.timedelta64(1, 'W'))\r\n assert_equal(np.timedelta64(120, 's'),\r\n np.timedelta64(2, 'm'))\r\n\r\n # Default construction means 0\r\n assert_equal(np.timedelta64(), np.timedelta64(0))\r\n\r\n # None gets constructed as NaT\r\n assert_equal(np.timedelta64(None), np.timedelta64('NaT'))\r\n\r\n # Some basic strings and repr\r\n assert_equal(str(np.timedelta64('NaT')), 'NaT')\r\n assert_equal(repr(np.timedelta64('NaT')),\r\n \"numpy.timedelta64('NaT')\")\r\n assert_equal(str(np.timedelta64(3, 's')), '3 seconds')\r\n assert_equal(repr(np.timedelta64(-3, 's')),\r\n \"numpy.timedelta64(-3,'s')\")\r\n assert_equal(repr(np.timedelta64(12)),\r\n \"numpy.timedelta64(12)\")\r\n\r\n # Construction from an integer produces generic units\r\n assert_equal(np.timedelta64(12).dtype, np.dtype('m8'))\r\n\r\n # When constructing from a scalar or zero-dimensional array,\r\n # it either keeps the units or you can override them.\r\n a = np.timedelta64(2, 'h')\r\n b = np.array(2, dtype='m8[h]')\r\n\r\n assert_equal(a.dtype, np.dtype('m8[h]'))\r\n assert_equal(b.dtype, np.dtype('m8[h]'))\r\n\r\n assert_equal(np.timedelta64(a), a)\r\n assert_equal(np.timedelta64(a).dtype, np.dtype('m8[h]'))\r\n\r\n assert_equal(np.timedelta64(b), a)\r\n assert_equal(np.timedelta64(b).dtype, np.dtype('m8[h]'))\r\n\r\n assert_equal(np.timedelta64(a, 's'), a)\r\n assert_equal(np.timedelta64(a, 's').dtype, np.dtype('m8[s]'))\r\n\r\n assert_equal(np.timedelta64(b, 's'), a)\r\n assert_equal(np.timedelta64(b, 's').dtype, np.dtype('m8[s]'))\r\n\r\n # Construction from datetime.timedelta\r\n assert_equal(np.timedelta64(5, 'D'),\r\n np.timedelta64(datetime.timedelta(days=5)))\r\n assert_equal(np.timedelta64(102347621, 's'),\r\n np.timedelta64(datetime.timedelta(seconds=102347621)))\r\n assert_equal(np.timedelta64(-10234760000, 'us'),\r\n np.timedelta64(datetime.timedelta(\r\n microseconds=-10234760000)))\r\n assert_equal(np.timedelta64(10234760000, 'us'),\r\n np.timedelta64(datetime.timedelta(\r\n microseconds=10234760000)))\r\n assert_equal(np.timedelta64(1023476, 'ms'),\r\n np.timedelta64(datetime.timedelta(milliseconds=1023476)))\r\n assert_equal(np.timedelta64(10, 'm'),\r\n np.timedelta64(datetime.timedelta(minutes=10)))\r\n assert_equal(np.timedelta64(281, 'h'),\r\n np.timedelta64(datetime.timedelta(hours=281)))\r\n assert_equal(np.timedelta64(28, 'W'),\r\n np.timedelta64(datetime.timedelta(weeks=28)))\r\n\r\n # Cannot construct across nonlinear time unit boundaries\r\n a = np.timedelta64(3, 's')\r\n assert_raises(TypeError, np.timedelta64, a, 'M')\r\n assert_raises(TypeError, np.timedelta64, a, 'Y')\r\n a = np.timedelta64(6, 'M')\r\n assert_raises(TypeError, np.timedelta64, a, 'D')\r\n assert_raises(TypeError, np.timedelta64, a, 'h')\r\n a = np.timedelta64(1, 'Y')\r\n assert_raises(TypeError, np.timedelta64, a, 'D')\r\n assert_raises(TypeError, np.timedelta64, a, 'm')\r\n a = datetime.timedelta(seconds=3)\r\n assert_raises(TypeError, np.timedelta64, a, 'M')\r\n assert_raises(TypeError, np.timedelta64, a, 'Y')\r\n a = datetime.timedelta(weeks=3)\r\n assert_raises(TypeError, np.timedelta64, a, 'M')\r\n assert_raises(TypeError, np.timedelta64, a, 'Y')\r\n a = datetime.timedelta()\r\n assert_raises(TypeError, np.timedelta64, a, 'M')\r\n assert_raises(TypeError, np.timedelta64, a, 'Y')\r\n\r\n def test_timedelta_object_array_conversion(self):\r\n # Regression test for gh-11096\r\n inputs = [datetime.timedelta(28),\r\n datetime.timedelta(30),\r\n datetime.timedelta(31)]\r\n expected = np.array([28, 30, 31], dtype='timedelta64[D]')\r\n actual = np.array(inputs, dtype='timedelta64[D]')\r\n assert_equal(expected, actual)\r\n\r\n def test_timedelta_0_dim_object_array_conversion(self):\r\n # Regression test for gh-11151\r\n test = np.array(datetime.timedelta(seconds=20))\r\n actual = test.astype(np.timedelta64)\r\n # expected value from the array constructor workaround\r\n # described in above issue\r\n expected = np.array(datetime.timedelta(seconds=20),\r\n np.timedelta64)\r\n assert_equal(actual, expected)\r\n\r\n def test_timedelta_nat_format(self):\r\n # gh-17552\r\n assert_equal('NaT', '{0}'.format(np.timedelta64('nat')))\r\n\r\n def test_timedelta_scalar_construction_units(self):\r\n # String construction detecting units\r\n assert_equal(np.datetime64('2010').dtype,\r\n np.dtype('M8[Y]'))\r\n assert_equal(np.datetime64('2010-03').dtype,\r\n np.dtype('M8[M]'))\r\n assert_equal(np.datetime64('2010-03-12').dtype,\r\n np.dtype('M8[D]'))\r\n assert_equal(np.datetime64('2010-03-12T17').dtype,\r\n np.dtype('M8[h]'))\r\n assert_equal(np.datetime64('2010-03-12T17:15').dtype,\r\n np.dtype('M8[m]'))\r\n assert_equal(np.datetime64('2010-03-12T17:15:08').dtype,\r\n np.dtype('M8[s]'))\r\n\r\n assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype,\r\n np.dtype('M8[ms]'))\r\n assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype,\r\n np.dtype('M8[ms]'))\r\n assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype,\r\n np.dtype('M8[ms]'))\r\n\r\n assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype,\r\n np.dtype('M8[us]'))\r\n assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype,\r\n np.dtype('M8[us]'))\r\n assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype,\r\n np.dtype('M8[us]'))\r\n\r\n assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype,\r\n np.dtype('M8[ns]'))\r\n assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype,\r\n np.dtype('M8[ns]'))\r\n assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype,\r\n np.dtype('M8[ns]'))\r\n\r\n assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype,\r\n np.dtype('M8[ps]'))\r\n assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype,\r\n np.dtype('M8[ps]'))\r\n assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype,\r\n np.dtype('M8[ps]'))\r\n\r\n assert_equal(np.datetime64(\r\n '1970-01-01T00:00:02.1234567890123').dtype,\r\n np.dtype('M8[fs]'))\r\n assert_equal(np.datetime64(\r\n '1970-01-01T00:00:02.12345678901234').dtype,\r\n np.dtype('M8[fs]'))\r\n assert_equal(np.datetime64(\r\n '1970-01-01T00:00:02.123456789012345').dtype,\r\n np.dtype('M8[fs]'))\r\n\r\n assert_equal(np.datetime64(\r\n '1970-01-01T00:00:02.1234567890123456').dtype,\r\n np.dtype('M8[as]'))\r\n assert_equal(np.datetime64(\r\n '1970-01-01T00:00:02.12345678901234567').dtype,\r\n np.dtype('M8[as]'))\r\n assert_equal(np.datetime64(\r\n '1970-01-01T00:00:02.123456789012345678').dtype,\r\n np.dtype('M8[as]'))\r\n\r\n # Python date object\r\n assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype,\r\n np.dtype('M8[D]'))\r\n\r\n # Python datetime object\r\n assert_equal(np.datetime64(\r\n datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype,\r\n np.dtype('M8[us]'))\r\n\r\n # 'today' special value\r\n assert_equal(np.datetime64('today').dtype,\r\n np.dtype('M8[D]'))\r\n\r\n # 'now' special value\r\n assert_equal(np.datetime64('now').dtype,\r\n np.dtype('M8[s]'))\r\n\r\n def test_datetime_nat_casting(self):\r\n a = np.array('NaT', dtype='M8[D]')\r\n b = np.datetime64('NaT', '[D]')\r\n\r\n # Arrays\r\n assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]'))\r\n assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]'))\r\n assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]'))\r\n assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]'))\r\n assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]'))\r\n\r\n # Scalars -> Scalars\r\n assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))\r\n assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))\r\n assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))\r\n assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))\r\n assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))\r\n\r\n # Arrays -> Scalars\r\n assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))\r\n assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))\r\n assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))\r\n assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))\r\n assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))\r\n\r\n # NaN -> NaT\r\n nan = np.array([np.nan] * 8)\r\n fnan = nan.astype('f')\r\n lnan = nan.astype('g')\r\n cnan = nan.astype('D')\r\n cfnan = nan.astype('F')\r\n clnan = nan.astype('G')\r\n\r\n nat = np.array([np.datetime64('NaT')] * 8)\r\n assert_equal(nan.astype('M8[ns]'), nat)\r\n assert_equal(fnan.astype('M8[ns]'), nat)\r\n assert_equal(lnan.astype('M8[ns]'), nat)\r\n assert_equal(cnan.astype('M8[ns]'), nat)\r\n assert_equal(cfnan.astype('M8[ns]'), nat)\r\n assert_equal(clnan.astype('M8[ns]'), nat)\r\n\r\n nat = np.array([np.timedelta64('NaT')] * 8)\r\n assert_equal(nan.astype('timedelta64[ns]'), nat)\r\n assert_equal(fnan.astype('timedelta64[ns]'), nat)\r\n assert_equal(lnan.astype('timedelta64[ns]'), nat)\r\n assert_equal(cnan.astype('timedelta64[ns]'), nat)\r\n assert_equal(cfnan.astype('timedelta64[ns]'), nat)\r\n assert_equal(clnan.astype('timedelta64[ns]'), nat)\r\n\r\n def test_days_creation(self):\r\n assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),\r\n (1600-1970)*365 - (1972-1600)/4 + 3 - 365)\r\n assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),\r\n (1600-1970)*365 - (1972-1600)/4 + 3)\r\n assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),\r\n (1600-1970)*365 - (1972-1600)/4 + 3 + 366)\r\n assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),\r\n (1900-1970)*365 - (1970-1900)//4)\r\n assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),\r\n (1900-1970)*365 - (1970-1900)//4 + 365)\r\n assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1)\r\n assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1)\r\n assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365)\r\n assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365)\r\n assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365)\r\n assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365)\r\n assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1)\r\n assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1)\r\n assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),\r\n (2000 - 1970)*365 + (2000 - 1972)//4)\r\n assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),\r\n (2000 - 1970)*365 + (2000 - 1972)//4 + 366)\r\n assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),\r\n (2400 - 1970)*365 + (2400 - 1972)//4 - 3)\r\n assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),\r\n (2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366)\r\n\r\n assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),\r\n (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28)\r\n assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),\r\n (1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29)\r\n assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),\r\n (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28)\r\n assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),\r\n (2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29)\r\n assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),\r\n (2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21)\r\n\r\n def test_days_to_pydate(self):\r\n assert_equal(np.array('1599', dtype='M8[D]').astype('O'),\r\n datetime.date(1599, 1, 1))\r\n assert_equal(np.array('1600', dtype='M8[D]').astype('O'),\r\n datetime.date(1600, 1, 1))\r\n assert_equal(np.array('1601', dtype='M8[D]').astype('O'),\r\n datetime.date(1601, 1, 1))\r\n assert_equal(np.array('1900', dtype='M8[D]').astype('O'),\r\n datetime.date(1900, 1, 1))\r\n assert_equal(np.array('1901', dtype='M8[D]').astype('O'),\r\n datetime.date(1901, 1, 1))\r\n assert_equal(np.array('2000', dtype='M8[D]').astype('O'),\r\n datetime.date(2000, 1, 1))\r\n assert_equal(np.array('2001', dtype='M8[D]').astype('O'),\r\n datetime.date(2001, 1, 1))\r\n assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),\r\n datetime.date(1600, 2, 29))\r\n assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),\r\n datetime.date(1600, 3, 1))\r\n assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),\r\n datetime.date(2001, 3, 22))\r\n\r\n def test_dtype_comparison(self):\r\n assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))\r\n assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))\r\n assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))\r\n assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))\r\n\r\n def test_pydatetime_creation(self):\r\n a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')\r\n assert_equal(a[0], a[1])\r\n a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')\r\n assert_equal(a[0], a[1])\r\n a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')\r\n assert_equal(a[0], a[1])\r\n # Will fail if the date changes during the exact right moment\r\n a = np.array(['today', datetime.date.today()], dtype='M8[D]')\r\n assert_equal(a[0], a[1])\r\n # datetime.datetime.now() returns local time, not UTC\r\n #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]')\r\n #assert_equal(a[0], a[1])\r\n\r\n # we can give a datetime.date time units\r\n assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),\r\n np.array(np.datetime64('1960-03-12T00:00:00')))\r\n\r\n def test_datetime_string_conversion(self):\r\n a = ['2011-03-16', '1920-01-01', '2013-05-19']\r\n str_a = np.array(a, dtype='S')\r\n uni_a = np.array(a, dtype='U')\r\n dt_a = np.array(a, dtype='M')\r\n\r\n # String to datetime\r\n assert_equal(dt_a, str_a.astype('M'))\r\n assert_equal(dt_a.dtype, str_a.astype('M').dtype)\r\n dt_b = np.empty_like(dt_a)\r\n dt_b[...] = str_a\r\n assert_equal(dt_a, dt_b)\r\n\r\n # Datetime to string\r\n assert_equal(str_a, dt_a.astype('S0'))\r\n str_b = np.empty_like(str_a)\r\n str_b[...] = dt_a\r\n assert_equal(str_a, str_b)\r\n\r\n # Unicode to datetime\r\n assert_equal(dt_a, uni_a.astype('M'))\r\n assert_equal(dt_a.dtype, uni_a.astype('M').dtype)\r\n dt_b = np.empty_like(dt_a)\r\n dt_b[...] = uni_a\r\n assert_equal(dt_a, dt_b)\r\n\r\n # Datetime to unicode\r\n assert_equal(uni_a, dt_a.astype('U'))\r\n uni_b = np.empty_like(uni_a)\r\n uni_b[...] = dt_a\r\n assert_equal(uni_a, uni_b)\r\n\r\n # Datetime to long string - gh-9712\r\n assert_equal(str_a, dt_a.astype((np.string_, 128)))\r\n str_b = np.empty(str_a.shape, dtype=(np.string_, 128))\r\n str_b[...] = dt_a\r\n assert_equal(str_a, str_b)\r\n\r\n @pytest.mark.parametrize(\"time_dtype\", [\"m8[D]\", \"M8[Y]\"])\r\n def test_time_byteswapping(self, time_dtype):\r\n times = np.array([\"2017\", \"NaT\"], dtype=time_dtype)\r\n times_swapped = times.astype(times.dtype.newbyteorder())\r\n assert_array_equal(times, times_swapped)\r\n\r\n unswapped = times_swapped.view(np.int64).newbyteorder()\r\n assert_array_equal(unswapped, times.view(np.int64))\r\n\r\n @pytest.mark.parametrize([\"time1\", \"time2\"],\r\n [(\"M8[s]\", \"M8[D]\"), (\"m8[s]\", \"m8[ns]\")])\r\n def test_time_byteswapped_cast(self, time1, time2):\r\n dtype1 = np.dtype(time1)\r\n dtype2 = np.dtype(time2)\r\n times = np.array([\"2017\", \"NaT\"], dtype=dtype1)\r\n expected = times.astype(dtype2)\r\n\r\n # Test that every byte-swapping combination also returns the same\r\n # results (previous tests check that this comparison works fine).\r\n res = times.astype(dtype1.newbyteorder()).astype(dtype2)\r\n assert_array_equal(res, expected)\r\n res = times.astype(dtype2.newbyteorder())\r\n assert_array_equal(res, expected)\r\n res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder())\r\n assert_array_equal(res, expected)\r\n\r\n @pytest.mark.parametrize(\"time_dtype\", [\"m8[D]\", \"M8[Y]\"])\r\n @pytest.mark.parametrize(\"str_dtype\", [\"U\", \"S\"])\r\n def test_datetime_conversions_byteorders(self, str_dtype, time_dtype):\r\n times = np.array([\"2017\", \"NaT\"], dtype=time_dtype)\r\n # Unfortunately, timedelta does not roundtrip:\r\n from_strings = np.array([\"2017\", \"NaT\"], dtype=str_dtype)\r\n to_strings = times.astype(str_dtype) # assume this is correct\r\n\r\n # Check that conversion from times to string works if src is swapped:\r\n times_swapped = times.astype(times.dtype.newbyteorder())\r\n res = times_swapped.astype(str_dtype)\r\n assert_array_equal(res, to_strings)\r\n # And also if both are swapped:\r\n res = times_swapped.astype(to_strings.dtype.newbyteorder())\r\n assert_array_equal(res, to_strings)\r\n # only destination is swapped:\r\n res = times.astype(to_strings.dtype.newbyteorder())\r\n assert_array_equal(res, to_strings)\r\n\r\n # Check that conversion from string to times works if src is swapped:\r\n from_strings_swapped = from_strings.astype(\r\n from_strings.dtype.newbyteorder())\r\n res = from_strings_swapped.astype(time_dtype)\r\n assert_array_equal(res, times)\r\n # And if both are swapped:\r\n res = from_strings_swapped.astype(times.dtype.newbyteorder())\r\n assert_array_equal(res, times)\r\n # Only destination is swapped:\r\n res = from_strings.astype(times.dtype.newbyteorder())\r\n assert_array_equal(res, times)\r\n\r\n def test_datetime_array_str(self):\r\n a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')\r\n assert_equal(str(a), \"['2011-03-16' '1920-01-01' '2013-05-19']\")\r\n\r\n a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')\r\n assert_equal(np.array2string(a, separator=', ',\r\n formatter={'datetime': lambda x:\r\n \"'%s'\" % np.datetime_as_string(x, timezone='UTC')}),\r\n \"['2011-03-16T13:55Z', '1920-01-01T03:12Z']\")\r\n\r\n # Check that one NaT doesn't corrupt subsequent entries\r\n a = np.array(['2010', 'NaT', '2030']).astype('M')\r\n assert_equal(str(a), \"['2010' 'NaT' '2030']\")\r\n\r\n def test_timedelta_array_str(self):\r\n a = np.array([-1, 0, 100], dtype='m')\r\n assert_equal(str(a), \"[ -1 0 100]\")\r\n a = np.array(['NaT', 'NaT'], dtype='m')\r\n assert_equal(str(a), \"['NaT' 'NaT']\")\r\n # Check right-alignment with NaTs\r\n a = np.array([-1, 'NaT', 0], dtype='m')\r\n assert_equal(str(a), \"[ -1 'NaT' 0]\")\r\n a = np.array([-1, 'NaT', 1234567], dtype='m')\r\n assert_equal(str(a), \"[ -1 'NaT' 1234567]\")\r\n\r\n # Test with other byteorder:\r\n a = np.array([-1, 'NaT', 1234567], dtype='>m')\r\n assert_equal(str(a), \"[ -1 'NaT' 1234567]\")\r\n a = np.array([-1, 'NaT', 1234567], dtype='<m')\r\n assert_equal(str(a), \"[ -1 'NaT' 1234567]\")\r\n\r\n def test_pickle(self):\r\n # Check that pickle roundtripping works\r\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\r\n dt = np.dtype('M8[7D]')\r\n assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)\r\n dt = np.dtype('M8[W]')\r\n assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)\r\n scalar = np.datetime64('2016-01-01T00:00:00.000000000')\r\n assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)),\r\n scalar)\r\n delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')\r\n assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)),\r\n delta)\r\n\r\n # Check that loading pickles from 1.6 works\r\n pkl = b\"cnumpy\\ndtype\\np0\\n(S'M8'\\np1\\nI0\\nI1\\ntp2\\nRp3\\n\" + \\\r\n b\"(I4\\nS'<'\\np4\\nNNNI-1\\nI-1\\nI0\\n((dp5\\n(S'D'\\np6\\n\" + \\\r\n b\"I7\\nI1\\nI1\\ntp7\\ntp8\\ntp9\\nb.\"\r\n assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]'))\r\n pkl = b\"cnumpy\\ndtype\\np0\\n(S'M8'\\np1\\nI0\\nI1\\ntp2\\nRp3\\n\" + \\\r\n b\"(I4\\nS'<'\\np4\\nNNNI-1\\nI-1\\nI0\\n((dp5\\n(S'W'\\np6\\n\" + \\\r\n b\"I1\\nI1\\nI1\\ntp7\\ntp8\\ntp9\\nb.\"\r\n assert_equal(pickle.loads(pkl), np.dtype('<M8[W]'))\r\n pkl = b\"cnumpy\\ndtype\\np0\\n(S'M8'\\np1\\nI0\\nI1\\ntp2\\nRp3\\n\" + \\\r\n b\"(I4\\nS'>'\\np4\\nNNNI-1\\nI-1\\nI0\\n((dp5\\n(S'us'\\np6\\n\" + \\\r\n b\"I1\\nI1\\nI1\\ntp7\\ntp8\\ntp9\\nb.\"\r\n assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))\r\n\r\n def test_setstate(self):\r\n \"Verify that datetime dtype __setstate__ can handle bad arguments\"\r\n dt = np.dtype('>M8[us]')\r\n assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))\r\n assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])\r\n assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))\r\n assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])\r\n\r\n def test_dtype_promotion(self):\r\n # datetime <op> datetime computes the metadata gcd\r\n # timedelta <op> timedelta computes the metadata gcd\r\n for mM in ['m', 'M']:\r\n assert_equal(\r\n np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')),\r\n np.dtype(mM+'8[2Y]'))\r\n assert_equal(\r\n np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')),\r\n np.dtype(mM+'8[3Y]'))\r\n assert_equal(\r\n np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')),\r\n np.dtype(mM+'8[2M]'))\r\n assert_equal(\r\n np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')),\r\n np.dtype(mM+'8[1D]'))\r\n assert_equal(\r\n np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')),\r\n np.dtype(mM+'8[s]'))\r\n assert_equal(\r\n np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')),\r\n np.dtype(mM+'8[7s]'))\r\n # timedelta <op> timedelta raises when there is no reasonable gcd\r\n assert_raises(TypeError, np.promote_types,\r\n np.dtype('m8[Y]'), np.dtype('m8[D]'))\r\n assert_raises(TypeError, np.promote_types,\r\n np.dtype('m8[M]'), np.dtype('m8[W]'))\r\n # timedelta and float cannot be safely cast with each other\r\n assert_raises(TypeError, np.promote_types, \"float32\", \"m8\")\r\n assert_raises(TypeError, np.promote_types, \"m8\", \"float32\")\r\n assert_raises(TypeError, np.promote_types, \"uint64\", \"m8\")\r\n assert_raises(TypeError, np.promote_types, \"m8\", \"uint64\")\r\n\r\n # timedelta <op> timedelta may overflow with big unit ranges\r\n assert_raises(OverflowError, np.promote_types,\r\n np.dtype('m8[W]'), np.dtype('m8[fs]'))\r\n assert_raises(OverflowError, np.promote_types,\r\n np.dtype('m8[s]'), np.dtype('m8[as]'))\r\n\r\n def test_cast_overflow(self):\r\n # gh-4486\r\n def cast():\r\n numpy.datetime64(\"1971-01-01 00:00:00.000000000000000\").astype(\"<M8[D]\")\r\n assert_raises(OverflowError, cast)\r\n\r\n def cast2():\r\n numpy.datetime64(\"2014\").astype(\"<M8[fs]\")\r\n assert_raises(OverflowError, cast2)\r\n\r\n def test_pyobject_roundtrip(self):\r\n # All datetime types should be able to roundtrip through object\r\n a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0,\r\n -1020040340, -2942398, -1, 0, 1, 234523453, 1199164176],\r\n dtype=np.int64)\r\n # With date units\r\n for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']:\r\n b = a.copy().view(dtype=unit)\r\n b[0] = '-0001-01-01'\r\n b[1] = '-0001-12-31'\r\n b[2] = '0000-01-01'\r\n b[3] = '0001-01-01'\r\n b[4] = '1969-12-31'\r\n b[5] = '1970-01-01'\r\n b[6] = '9999-12-31'\r\n b[7] = '10000-01-01'\r\n b[8] = 'NaT'\r\n\r\n assert_equal(b.astype(object).astype(unit), b,\r\n \"Error roundtripping unit %s\" % unit)\r\n # With time units\r\n for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]',\r\n 'M8[300as]', 'M8[20us]']:\r\n b = a.copy().view(dtype=unit)\r\n b[0] = '-0001-01-01T00'\r\n b[1] = '-0001-12-31T00'\r\n b[2] = '0000-01-01T00'\r\n b[3] = '0001-01-01T00'\r\n b[4] = '1969-12-31T23:59:59.999999'\r\n b[5] = '1970-01-01T00'\r\n b[6] = '9999-12-31T23:59:59.999999'\r\n b[7] = '10000-01-01T00'\r\n b[8] = 'NaT'\r\n\r\n assert_equal(b.astype(object).astype(unit), b,\r\n \"Error roundtripping unit %s\" % unit)\r\n\r\n def test_month_truncation(self):\r\n # Make sure that months are truncating correctly\r\n assert_equal(np.array('1945-03-01', dtype='M8[M]'),\r\n np.array('1945-03-31', dtype='M8[M]'))\r\n assert_equal(np.array('1969-11-01', dtype='M8[M]'),\r\n np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]'))\r\n assert_equal(np.array('1969-12-01', dtype='M8[M]'),\r\n np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]'))\r\n assert_equal(np.array('1970-01-01', dtype='M8[M]'),\r\n np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]'))\r\n assert_equal(np.array('1980-02-01', dtype='M8[M]'),\r\n np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]'))\r\n\r\n def test_different_unit_comparison(self):\r\n # Check some years with date units\r\n for unit1 in ['Y', 'M', 'D']:\r\n dt1 = np.dtype('M8[%s]' % unit1)\r\n for unit2 in ['Y', 'M', 'D']:\r\n dt2 = np.dtype('M8[%s]' % unit2)\r\n assert_equal(np.array('1945', dtype=dt1),\r\n np.array('1945', dtype=dt2))\r\n assert_equal(np.array('1970', dtype=dt1),\r\n np.array('1970', dtype=dt2))\r\n assert_equal(np.array('9999', dtype=dt1),\r\n np.array('9999', dtype=dt2))\r\n assert_equal(np.array('10000', dtype=dt1),\r\n np.array('10000-01-01', dtype=dt2))\r\n assert_equal(np.datetime64('1945', unit1),\r\n np.datetime64('1945', unit2))\r\n assert_equal(np.datetime64('1970', unit1),\r\n np.datetime64('1970', unit2))\r\n assert_equal(np.datetime64('9999', unit1),\r\n np.datetime64('9999', unit2))\r\n assert_equal(np.datetime64('10000', unit1),\r\n np.datetime64('10000-01-01', unit2))\r\n # Check some datetimes with time units\r\n for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']:\r\n dt1 = np.dtype('M8[%s]' % unit1)\r\n for unit2 in ['h', 'm', 's', 'ms', 'us']:\r\n dt2 = np.dtype('M8[%s]' % unit2)\r\n assert_equal(np.array('1945-03-12T18', dtype=dt1),\r\n np.array('1945-03-12T18', dtype=dt2))\r\n assert_equal(np.array('1970-03-12T18', dtype=dt1),\r\n np.array('1970-03-12T18', dtype=dt2))\r\n assert_equal(np.array('9999-03-12T18', dtype=dt1),\r\n np.array('9999-03-12T18', dtype=dt2))\r\n assert_equal(np.array('10000-01-01T00', dtype=dt1),\r\n np.array('10000-01-01T00', dtype=dt2))\r\n assert_equal(np.datetime64('1945-03-12T18', unit1),\r\n np.datetime64('1945-03-12T18', unit2))\r\n assert_equal(np.datetime64('1970-03-12T18', unit1),\r\n np.datetime64('1970-03-12T18', unit2))\r\n assert_equal(np.datetime64('9999-03-12T18', unit1),\r\n np.datetime64('9999-03-12T18', unit2))\r\n assert_equal(np.datetime64('10000-01-01T00', unit1),\r\n np.datetime64('10000-01-01T00', unit2))\r\n # Check some days with units that won't overflow\r\n for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']:\r\n dt1 = np.dtype('M8[%s]' % unit1)\r\n for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']:\r\n dt2 = np.dtype('M8[%s]' % unit2)\r\n assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1),\r\n np.array('1932-02-17T00:00:00', dtype='M').astype(dt2),\r\n casting='unsafe'))\r\n assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1),\r\n np.array('10000-04-27T00:00:00', dtype='M').astype(dt2),\r\n casting='unsafe'))\r\n\r\n # Shouldn't be able to compare datetime and timedelta\r\n # TODO: Changing to 'same_kind' or 'safe' casting in the ufuncs by\r\n # default is needed to properly catch this kind of thing...\r\n a = np.array('2012-12-21', dtype='M8[D]')\r\n b = np.array(3, dtype='m8[D]')\r\n #assert_raises(TypeError, np.less, a, b)\r\n assert_raises(TypeError, np.less, a, b, casting='same_kind')\r\n\r\n def test_datetime_like(self):\r\n a = np.array([3], dtype='m8[4D]')\r\n b = np.array(['2012-12-21'], dtype='M8[D]')\r\n\r\n assert_equal(np.ones_like(a).dtype, a.dtype)\r\n assert_equal(np.zeros_like(a).dtype, a.dtype)\r\n assert_equal(np.empty_like(a).dtype, a.dtype)\r\n assert_equal(np.ones_like(b).dtype, b.dtype)\r\n assert_equal(np.zeros_like(b).dtype, b.dtype)\r\n assert_equal(np.empty_like(b).dtype, b.dtype)\r\n\r\n def test_datetime_unary(self):\r\n for tda, tdb, tdzero, tdone, tdmone in \\\r\n [\r\n # One-dimensional arrays\r\n (np.array([3], dtype='m8[D]'),\r\n np.array([-3], dtype='m8[D]'),\r\n np.array([0], dtype='m8[D]'),\r\n np.array([1], dtype='m8[D]'),\r\n np.array([-1], dtype='m8[D]')),\r\n # NumPy scalars\r\n (np.timedelta64(3, '[D]'),\r\n np.timedelta64(-3, '[D]'),\r\n np.timedelta64(0, '[D]'),\r\n np.timedelta64(1, '[D]'),\r\n np.timedelta64(-1, '[D]'))]:\r\n # negative ufunc\r\n assert_equal(-tdb, tda)\r\n assert_equal((-tdb).dtype, tda.dtype)\r\n assert_equal(np.negative(tdb), tda)\r\n assert_equal(np.negative(tdb).dtype, tda.dtype)\r\n\r\n # positive ufunc\r\n assert_equal(np.positive(tda), tda)\r\n assert_equal(np.positive(tda).dtype, tda.dtype)\r\n assert_equal(np.positive(tdb), tdb)\r\n assert_equal(np.positive(tdb).dtype, tdb.dtype)\r\n\r\n # absolute ufunc\r\n assert_equal(np.absolute(tdb), tda)\r\n assert_equal(np.absolute(tdb).dtype, tda.dtype)\r\n\r\n # sign ufunc\r\n assert_equal(np.sign(tda), tdone)\r\n assert_equal(np.sign(tdb), tdmone)\r\n assert_equal(np.sign(tdzero), tdzero)\r\n assert_equal(np.sign(tda).dtype, tda.dtype)\r\n\r\n # The ufuncs always produce native-endian results\r\n assert_\r\n\r\n def test_datetime_add(self):\r\n for dta, dtb, dtc, dtnat, tda, tdb, tdc in \\\r\n [\r\n # One-dimensional arrays\r\n (np.array(['2012-12-21'], dtype='M8[D]'),\r\n np.array(['2012-12-24'], dtype='M8[D]'),\r\n np.array(['2012-12-21T11'], dtype='M8[h]'),\r\n np.array(['NaT'], dtype='M8[D]'),\r\n np.array([3], dtype='m8[D]'),\r\n np.array([11], dtype='m8[h]'),\r\n np.array([3*24 + 11], dtype='m8[h]')),\r\n # NumPy scalars\r\n (np.datetime64('2012-12-21', '[D]'),\r\n np.datetime64('2012-12-24', '[D]'),\r\n np.datetime64('2012-12-21T11', '[h]'),\r\n np.datetime64('NaT', '[D]'),\r\n np.timedelta64(3, '[D]'),\r\n np.timedelta64(11, '[h]'),\r\n np.timedelta64(3*24 + 11, '[h]'))]:\r\n # m8 + m8\r\n assert_equal(tda + tdb, tdc)\r\n assert_equal((tda + tdb).dtype, np.dtype('m8[h]'))\r\n # m8 + bool\r\n assert_equal(tdb + True, tdb + 1)\r\n assert_equal((tdb + True).dtype, np.dtype('m8[h]'))\r\n # m8 + int\r\n assert_equal(tdb + 3*24, tdc)\r\n assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]'))\r\n # bool + m8\r\n assert_equal(False + tdb, tdb)\r\n assert_equal((False + tdb).dtype, np.dtype('m8[h]'))\r\n # int + m8\r\n assert_equal(3*24 + tdb, tdc)\r\n assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]'))\r\n # M8 + bool\r\n assert_equal(dta + True, dta + 1)\r\n assert_equal(dtnat + True, dtnat)\r\n assert_equal((dta + True).dtype, np.dtype('M8[D]'))\r\n # M8 + int\r\n assert_equal(dta + 3, dtb)\r\n assert_equal(dtnat + 3, dtnat)\r\n assert_equal((dta + 3).dtype, np.dtype('M8[D]'))\r\n # bool + M8\r\n assert_equal(False + dta, dta)\r\n assert_equal(False + dtnat, dtnat)\r\n assert_equal((False + dta).dtype, np.dtype('M8[D]'))\r\n # int + M8\r\n assert_equal(3 + dta, dtb)\r\n assert_equal(3 + dtnat, dtnat)\r\n assert_equal((3 + dta).dtype, np.dtype('M8[D]'))\r\n # M8 + m8\r\n assert_equal(dta + tda, dtb)\r\n assert_equal(dtnat + tda, dtnat)\r\n assert_equal((dta + tda).dtype, np.dtype('M8[D]'))\r\n # m8 + M8\r\n assert_equal(tda + dta, dtb)\r\n assert_equal(tda + dtnat, dtnat)\r\n assert_equal((tda + dta).dtype, np.dtype('M8[D]'))\r\n\r\n # In M8 + m8, the result goes to higher precision\r\n assert_equal(np.add(dta, tdb, casting='unsafe'), dtc)\r\n assert_equal(np.add(dta, tdb, casting='unsafe').dtype,\r\n np.dtype('M8[h]'))\r\n assert_equal(np.add(tdb, dta, casting='unsafe'), dtc)\r\n assert_equal(np.add(tdb, dta, casting='unsafe').dtype,\r\n np.dtype('M8[h]'))\r\n\r\n # M8 + M8\r\n assert_raises(TypeError, np.add, dta, dtb)\r\n\r\n def test_datetime_subtract(self):\r\n for dta, dtb, dtc, dtd, dte, dtnat, tda, tdb, tdc in \\\r\n [\r\n # One-dimensional arrays\r\n (np.array(['2012-12-21'], dtype='M8[D]'),\r\n np.array(['2012-12-24'], dtype='M8[D]'),\r\n np.array(['1940-12-24'], dtype='M8[D]'),\r\n np.array(['1940-12-24T00'], dtype='M8[h]'),\r\n np.array(['1940-12-23T13'], dtype='M8[h]'),\r\n np.array(['NaT'], dtype='M8[D]'),\r\n np.array([3], dtype='m8[D]'),\r\n np.array([11], dtype='m8[h]'),\r\n np.array([3*24 - 11], dtype='m8[h]')),\r\n # NumPy scalars\r\n (np.datetime64('2012-12-21', '[D]'),\r\n np.datetime64('2012-12-24', '[D]'),\r\n np.datetime64('1940-12-24', '[D]'),\r\n np.datetime64('1940-12-24T00', '[h]'),\r\n np.datetime64('1940-12-23T13', '[h]'),\r\n np.datetime64('NaT', '[D]'),\r\n np.timedelta64(3, '[D]'),\r\n np.timedelta64(11, '[h]'),\r\n np.timedelta64(3*24 - 11, '[h]'))]:\r\n # m8 - m8\r\n assert_equal(tda - tdb, tdc)\r\n assert_equal((tda - tdb).dtype, np.dtype('m8[h]'))\r\n assert_equal(tdb - tda, -tdc)\r\n assert_equal((tdb - tda).dtype, np.dtype('m8[h]'))\r\n # m8 - bool\r\n assert_equal(tdc - True, tdc - 1)\r\n assert_equal((tdc - True).dtype, np.dtype('m8[h]'))\r\n # m8 - int\r\n assert_equal(tdc - 3*24, -tdb)\r\n assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]'))\r\n # int - m8\r\n assert_equal(False - tdb, -tdb)\r\n assert_equal((False - tdb).dtype, np.dtype('m8[h]'))\r\n # int - m8\r\n assert_equal(3*24 - tdb, tdc)\r\n assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]'))\r\n # M8 - bool\r\n assert_equal(dtb - True, dtb - 1)\r\n assert_equal(dtnat - True, dtnat)\r\n assert_equal((dtb - True).dtype, np.dtype('M8[D]'))\r\n # M8 - int\r\n assert_equal(dtb - 3, dta)\r\n assert_equal(dtnat - 3, dtnat)\r\n assert_equal((dtb - 3).dtype, np.dtype('M8[D]'))\r\n # M8 - m8\r\n assert_equal(dtb - tda, dta)\r\n assert_equal(dtnat - tda, dtnat)\r\n assert_equal((dtb - tda).dtype, np.dtype('M8[D]'))\r\n\r\n # In M8 - m8, the result goes to higher precision\r\n assert_equal(np.subtract(dtc, tdb, casting='unsafe'), dte)\r\n assert_equal(np.subtract(dtc, tdb, casting='unsafe').dtype,\r\n np.dtype('M8[h]'))\r\n\r\n # M8 - M8 with different goes to higher precision\r\n assert_equal(np.subtract(dtc, dtd, casting='unsafe'),\r\n np.timedelta64(0, 'h'))\r\n assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype,\r\n np.dtype('m8[h]'))\r\n assert_equal(np.subtract(dtd, dtc, casting='unsafe'),\r\n np.timedelta64(0, 'h'))\r\n assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype,\r\n np.dtype('m8[h]'))\r\n\r\n # m8 - M8\r\n assert_raises(TypeError, np.subtract, tda, dta)\r\n # bool - M8\r\n assert_raises(TypeError, np.subtract, False, dta)\r\n # int - M8\r\n assert_raises(TypeError, np.subtract, 3, dta)\r\n\r\n def test_datetime_multiply(self):\r\n for dta, tda, tdb, tdc in \\\r\n [\r\n # One-dimensional arrays\r\n (np.array(['2012-12-21'], dtype='M8[D]'),\r\n np.array([6], dtype='m8[h]'),\r\n np.array([9], dtype='m8[h]'),\r\n np.array([12], dtype='m8[h]')),\r\n # NumPy scalars\r\n (np.datetime64('2012-12-21', '[D]'),\r\n np.timedelta64(6, '[h]'),\r\n np.timedelta64(9, '[h]'),\r\n np.timedelta64(12, '[h]'))]:\r\n # m8 * int\r\n assert_equal(tda * 2, tdc)\r\n assert_equal((tda * 2).dtype, np.dtype('m8[h]'))\r\n # int * m8\r\n assert_equal(2 * tda, tdc)\r\n assert_equal((2 * tda).dtype, np.dtype('m8[h]'))\r\n # m8 * float\r\n assert_equal(tda * 1.5, tdb)\r\n assert_equal((tda * 1.5).dtype, np.dtype('m8[h]'))\r\n # float * m8\r\n assert_equal(1.5 * tda, tdb)\r\n assert_equal((1.5 * tda).dtype, np.dtype('m8[h]'))\r\n\r\n # m8 * m8\r\n assert_raises(TypeError, np.multiply, tda, tdb)\r\n # m8 * M8\r\n assert_raises(TypeError, np.multiply, dta, tda)\r\n # M8 * m8\r\n assert_raises(TypeError, np.multiply, tda, dta)\r\n # M8 * int\r\n assert_raises(TypeError, np.multiply, dta, 2)\r\n # int * M8\r\n assert_raises(TypeError, np.multiply, 2, dta)\r\n # M8 * float\r\n assert_raises(TypeError, np.multiply, dta, 1.5)\r\n # float * M8\r\n assert_raises(TypeError, np.multiply, 1.5, dta)\r\n\r\n # NaTs\r\n with suppress_warnings() as sup:\r\n sup.filter(RuntimeWarning, \"invalid value encountered in multiply\")\r\n nat = np.timedelta64('NaT')\r\n def check(a, b, res):\r\n assert_equal(a * b, res)\r\n assert_equal(b * a, res)\r\n for tp in (int, float):\r\n check(nat, tp(2), nat)\r\n check(nat, tp(0), nat)\r\n for f in (float('inf'), float('nan')):\r\n check(np.timedelta64(1), f, nat)\r\n check(np.timedelta64(0), f, nat)\r\n check(nat, f, nat)\r\n\r\n @pytest.mark.parametrize(\"op1, op2, exp\", [\r\n # m8 same units round down\r\n (np.timedelta64(7, 's'),\r\n np.timedelta64(4, 's'),\r\n 1),\r\n # m8 same units round down with negative\r\n (np.timedelta64(7, 's'),\r\n np.timedelta64(-4, 's'),\r\n -2),\r\n # m8 same units negative no round down\r\n (np.timedelta64(8, 's'),\r\n np.timedelta64(-4, 's'),\r\n -2),\r\n # m8 different units\r\n (np.timedelta64(1, 'm'),\r\n np.timedelta64(31, 's'),\r\n 1),\r\n # m8 generic units\r\n (np.timedelta64(1890),\r\n np.timedelta64(31),\r\n 60),\r\n # Y // M works\r\n (np.timedelta64(2, 'Y'),\r\n np.timedelta64('13', 'M'),\r\n 1),\r\n # handle 1D arrays\r\n (np.array([1, 2, 3], dtype='m8'),\r\n np.array([2], dtype='m8'),\r\n np.array([0, 1, 1], dtype=np.int64)),\r\n ])\r\n def test_timedelta_floor_divide(self, op1, op2, exp):\r\n assert_equal(op1 // op2, exp)\r\n\r\n @pytest.mark.parametrize(\"op1, op2\", [\r\n # div by 0\r\n (np.timedelta64(10, 'us'),\r\n np.timedelta64(0, 'us')),\r\n # div with NaT\r\n (np.timedelta64('NaT'),\r\n np.timedelta64(50, 'us')),\r\n # special case for int64 min\r\n # in integer floor division\r\n (np.timedelta64(np.iinfo(np.int64).min),\r\n np.timedelta64(-1)),\r\n ])\r\n def test_timedelta_floor_div_warnings(self, op1, op2):\r\n with assert_warns(RuntimeWarning):\r\n actual = op1 // op2\r\n assert_equal(actual, 0)\r\n assert_equal(actual.dtype, np.int64)\r\n\r\n @pytest.mark.parametrize(\"val1, val2\", [\r\n # the smallest integer that can't be represented\r\n # exactly in a double should be preserved if we avoid\r\n # casting to double in floordiv operation\r\n (9007199254740993, 1),\r\n # stress the alternate floordiv code path where\r\n # operand signs don't match and remainder isn't 0\r\n (9007199254740999, -2),\r\n ])\r\n def test_timedelta_floor_div_precision(self, val1, val2):\r\n op1 = np.timedelta64(val1)\r\n op2 = np.timedelta64(val2)\r\n actual = op1 // op2\r\n # Python reference integer floor\r\n expected = val1 // val2\r\n assert_equal(actual, expected)\r\n\r\n @pytest.mark.parametrize(\"val1, val2\", [\r\n # years and months sometimes can't be unambiguously\r\n # divided for floor division operation\r\n (np.timedelta64(7, 'Y'),\r\n np.timedelta64(3, 's')),\r\n (np.timedelta64(7, 'M'),\r\n np.timedelta64(1, 'D')),\r\n ])\r\n def test_timedelta_floor_div_error(self, val1, val2):\r\n with assert_raises_regex(TypeError, \"common metadata divisor\"):\r\n val1 // val2\r\n\r\n @pytest.mark.parametrize(\"op1, op2\", [\r\n # reuse the test cases from floordiv\r\n (np.timedelta64(7, 's'),\r\n np.timedelta64(4, 's')),\r\n # m8 same units round down with negative\r\n (np.timedelta64(7, 's'),\r\n np.timedelta64(-4, 's')),\r\n # m8 same units negative no round down\r\n (np.timedelta64(8, 's'),\r\n np.timedelta64(-4, 's')),\r\n # m8 different units\r\n (np.timedelta64(1, 'm'),\r\n np.timedelta64(31, 's')),\r\n # m8 generic units\r\n (np.timedelta64(1890),\r\n np.timedelta64(31)),\r\n # Y // M works\r\n (np.timedelta64(2, 'Y'),\r\n np.timedelta64('13', 'M')),\r\n # handle 1D arrays\r\n (np.array([1, 2, 3], dtype='m8'),\r\n np.array([2], dtype='m8')),\r\n ])\r\n def test_timedelta_divmod(self, op1, op2):\r\n expected = (op1 // op2, op1 % op2)\r\n assert_equal(divmod(op1, op2), expected)\r\n\r\n @pytest.mark.parametrize(\"op1, op2\", [\r\n # reuse cases from floordiv\r\n # div by 0\r\n (np.timedelta64(10, 'us'),\r\n np.timedelta64(0, 'us')),\r\n # div with NaT\r\n (np.timedelta64('NaT'),\r\n np.timedelta64(50, 'us')),\r\n # special case for int64 min\r\n # in integer floor division\r\n (np.timedelta64(np.iinfo(np.int64).min),\r\n np.timedelta64(-1)),\r\n ])\r\n def test_timedelta_divmod_warnings(self, op1, op2):\r\n with assert_warns(RuntimeWarning):\r\n expected = (op1 // op2, op1 % op2)\r\n with assert_warns(RuntimeWarning):\r\n actual = divmod(op1, op2)\r\n assert_equal(actual, expected)\r\n\r\n def test_datetime_divide(self):\r\n for dta, tda, tdb, tdc, tdd in \\\r\n [\r\n # One-dimensional arrays\r\n (np.array(['2012-12-21'], dtype='M8[D]'),\r\n np.array([6], dtype='m8[h]'),\r\n np.array([9], dtype='m8[h]'),\r\n np.array([12], dtype='m8[h]'),\r\n np.array([6], dtype='m8[m]')),\r\n # NumPy scalars\r\n (np.datetime64('2012-12-21', '[D]'),\r\n np.timedelta64(6, '[h]'),\r\n np.timedelta64(9, '[h]'),\r\n np.timedelta64(12, '[h]'),\r\n np.timedelta64(6, '[m]'))]:\r\n # m8 / int\r\n assert_equal(tdc / 2, tda)\r\n assert_equal((tdc / 2).dtype, np.dtype('m8[h]'))\r\n # m8 / float\r\n assert_equal(tda / 0.5, tdc)\r\n assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))\r\n # m8 / m8\r\n assert_equal(tda / tdb, 6 / 9)\r\n assert_equal(np.divide(tda, tdb), 6 / 9)\r\n assert_equal(np.true_divide(tda, tdb), 6 / 9)\r\n assert_equal(tdb / tda, 9 / 6)\r\n assert_equal((tda / tdb).dtype, np.dtype('f8'))\r\n assert_equal(tda / tdd, 60)\r\n assert_equal(tdd / tda, 1 / 60)\r\n\r\n # int / m8\r\n assert_raises(TypeError, np.divide, 2, tdb)\r\n # float / m8\r\n assert_raises(TypeError, np.divide, 0.5, tdb)\r\n # m8 / M8\r\n assert_raises(TypeError, np.divide, dta, tda)\r\n # M8 / m8\r\n assert_raises(TypeError, np.divide, tda, dta)\r\n # M8 / int\r\n assert_raises(TypeError, np.divide, dta, 2)\r\n # int / M8\r\n assert_raises(TypeError, np.divide, 2, dta)\r\n # M8 / float\r\n assert_raises(TypeError, np.divide, dta, 1.5)\r\n # float / M8\r\n assert_raises(TypeError, np.divide, 1.5, dta)\r\n\r\n # NaTs\r\n with suppress_warnings() as sup:\r\n sup.filter(RuntimeWarning, r\".*encountered in true\\_divide\")\r\n nat = np.timedelta64('NaT')\r\n for tp in (int, float):\r\n assert_equal(np.timedelta64(1) / tp(0), nat)\r\n assert_equal(np.timedelta64(0) / tp(0), nat)\r\n assert_equal(nat / tp(0), nat)\r\n assert_equal(nat / tp(2), nat)\r\n # Division by inf\r\n assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0))\r\n assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0))\r\n assert_equal(nat / float('inf'), nat)\r\n # Division by nan\r\n assert_equal(np.timedelta64(1) / float('nan'), nat)\r\n assert_equal(np.timedelta64(0) / float('nan'), nat)\r\n assert_equal(nat / float('nan'), nat)\r\n\r\n def test_datetime_compare(self):\r\n # Test all the comparison operators\r\n a = np.datetime64('2000-03-12T18:00:00.000000')\r\n b = np.array(['2000-03-12T18:00:00.000000',\r\n '2000-03-12T17:59:59.999999',\r\n '2000-03-12T18:00:00.000001',\r\n '1970-01-11T12:00:00.909090',\r\n '2016-01-11T12:00:00.909090'],\r\n dtype='datetime64[us]')\r\n assert_equal(np.equal(a, b), [1, 0, 0, 0, 0])\r\n assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1])\r\n assert_equal(np.less(a, b), [0, 0, 1, 0, 1])\r\n assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1])\r\n assert_equal(np.greater(a, b), [0, 1, 0, 1, 0])\r\n assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0])\r\n\r\n def test_datetime_compare_nat(self):\r\n dt_nat = np.datetime64('NaT', 'D')\r\n dt_other = np.datetime64('2000-01-01')\r\n td_nat = np.timedelta64('NaT', 'h')\r\n td_other = np.timedelta64(1, 'h')\r\n\r\n for op in [np.equal, np.less, np.less_equal,\r\n np.greater, np.greater_equal]:\r\n assert_(not op(dt_nat, dt_nat))\r\n assert_(not op(dt_nat, dt_other))\r\n assert_(not op(dt_other, dt_nat))\r\n\r\n assert_(not op(td_nat, td_nat))\r\n assert_(not op(td_nat, td_other))\r\n assert_(not op(td_other, td_nat))\r\n\r\n assert_(np.not_equal(dt_nat, dt_nat))\r\n assert_(np.not_equal(dt_nat, dt_other))\r\n assert_(np.not_equal(dt_other, dt_nat))\r\n\r\n assert_(np.not_equal(td_nat, td_nat))\r\n assert_(np.not_equal(td_nat, td_other))\r\n assert_(np.not_equal(td_other, td_nat))\r\n\r\n def test_datetime_minmax(self):\r\n # The metadata of the result should become the GCD\r\n # of the operand metadata\r\n a = np.array('1999-03-12T13', dtype='M8[2m]')\r\n b = np.array('1999-03-12T12', dtype='M8[s]')\r\n assert_equal(np.minimum(a, b), b)\r\n assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]'))\r\n assert_equal(np.fmin(a, b), b)\r\n assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]'))\r\n assert_equal(np.maximum(a, b), a)\r\n assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]'))\r\n assert_equal(np.fmax(a, b), a)\r\n assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]'))\r\n # Viewed as integers, the comparison is opposite because\r\n # of the units chosen\r\n assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))\r\n\r\n # Interaction with NaT\r\n a = np.array('1999-03-12T13', dtype='M8[2m]')\r\n dtnat = np.array('NaT', dtype='M8[h]')\r\n assert_equal(np.minimum(a, dtnat), dtnat)\r\n assert_equal(np.minimum(dtnat, a), dtnat)\r\n assert_equal(np.maximum(a, dtnat), dtnat)\r\n assert_equal(np.maximum(dtnat, a), dtnat)\r\n assert_equal(np.fmin(dtnat, a), a)\r\n assert_equal(np.fmin(a, dtnat), a)\r\n assert_equal(np.fmax(dtnat, a), a)\r\n assert_equal(np.fmax(a, dtnat), a)\r\n\r\n # Also do timedelta\r\n a = np.array(3, dtype='m8[h]')\r\n b = np.array(3*3600 - 3, dtype='m8[s]')\r\n assert_equal(np.minimum(a, b), b)\r\n assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]'))\r\n assert_equal(np.fmin(a, b), b)\r\n assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]'))\r\n assert_equal(np.maximum(a, b), a)\r\n assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]'))\r\n assert_equal(np.fmax(a, b), a)\r\n assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]'))\r\n # Viewed as integers, the comparison is opposite because\r\n # of the units chosen\r\n assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))\r\n\r\n # should raise between datetime and timedelta\r\n #\r\n # TODO: Allowing unsafe casting by\r\n # default in ufuncs strikes again... :(\r\n a = np.array(3, dtype='m8[h]')\r\n b = np.array('1999-03-12T12', dtype='M8[s]')\r\n #assert_raises(TypeError, np.minimum, a, b)\r\n #assert_raises(TypeError, np.maximum, a, b)\r\n #assert_raises(TypeError, np.fmin, a, b)\r\n #assert_raises(TypeError, np.fmax, a, b)\r\n assert_raises(TypeError, np.minimum, a, b, casting='same_kind')\r\n assert_raises(TypeError, np.maximum, a, b, casting='same_kind')\r\n assert_raises(TypeError, np.fmin, a, b, casting='same_kind')\r\n assert_raises(TypeError, np.fmax, a, b, casting='same_kind')\r\n\r\n def test_hours(self):\r\n t = np.ones(3, dtype='M8[s]')\r\n t[0] = 60*60*24 + 60*60*10\r\n assert_(t[0].item().hour == 10)\r\n\r\n def test_divisor_conversion_year(self):\r\n assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]'))\r\n assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]'))\r\n assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]'))\r\n\r\n def test_divisor_conversion_month(self):\r\n assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]'))\r\n assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]'))\r\n assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]'))\r\n\r\n def test_divisor_conversion_week(self):\r\n assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]'))\r\n assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]'))\r\n assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]'))\r\n\r\n def test_divisor_conversion_day(self):\r\n assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]'))\r\n assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]'))\r\n assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]'))\r\n\r\n def test_divisor_conversion_hour(self):\r\n assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]'))\r\n assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]'))\r\n\r\n def test_divisor_conversion_minute(self):\r\n assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]'))\r\n assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]'))\r\n\r\n def test_divisor_conversion_second(self):\r\n assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]'))\r\n assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]'))\r\n\r\n def test_divisor_conversion_fs(self):\r\n assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))\r\n assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))\r\n\r\n def test_divisor_conversion_as(self):\r\n assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))\r\n\r\n def test_string_parser_variants(self):\r\n # Allow space instead of 'T' between date and time\r\n assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),\r\n np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))\r\n # Allow positive years\r\n assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),\r\n np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]')))\r\n # Allow negative years\r\n assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),\r\n np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))\r\n # UTC specifier\r\n with assert_warns(DeprecationWarning):\r\n assert_equal(\r\n np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),\r\n np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]')))\r\n with assert_warns(DeprecationWarning):\r\n assert_equal(\r\n np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),\r\n np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]')))\r\n # Time zone offset\r\n with assert_warns(DeprecationWarning):\r\n assert_equal(\r\n np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')),\r\n np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]')))\r\n with assert_warns(DeprecationWarning):\r\n assert_equal(\r\n np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')),\r\n np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]')))\r\n with assert_warns(DeprecationWarning):\r\n assert_equal(\r\n np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')),\r\n np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]')))\r\n with assert_warns(DeprecationWarning):\r\n assert_equal(np.datetime64('1977-03-02T12:30-0230'),\r\n np.datetime64('1977-03-02T15:00'))\r\n\r\n def test_string_parser_error_check(self):\r\n # Arbitrary bad string\r\n assert_raises(ValueError, np.array, ['badvalue'], np.dtype('M8[us]'))\r\n # Character after year must be '-'\r\n assert_raises(ValueError, np.array, ['1980X'], np.dtype('M8[us]'))\r\n # Cannot have trailing '-'\r\n assert_raises(ValueError, np.array, ['1980-'], np.dtype('M8[us]'))\r\n # Month must be in range [1,12]\r\n assert_raises(ValueError, np.array, ['1980-00'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-13'], np.dtype('M8[us]'))\r\n # Month must have two digits\r\n assert_raises(ValueError, np.array, ['1980-1'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-1-02'], np.dtype('M8[us]'))\r\n # 'Mor' is not a valid month\r\n assert_raises(ValueError, np.array, ['1980-Mor'], np.dtype('M8[us]'))\r\n # Cannot have trailing '-'\r\n assert_raises(ValueError, np.array, ['1980-01-'], np.dtype('M8[us]'))\r\n # Day must be in range [1,len(month)]\r\n assert_raises(ValueError, np.array, ['1980-01-0'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-01-00'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-01-32'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1979-02-29'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-02-30'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-03-32'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-04-31'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-05-32'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-06-31'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-07-32'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-08-32'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-09-31'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-10-32'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-11-31'], np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-12-32'], np.dtype('M8[us]'))\r\n # Cannot have trailing characters\r\n assert_raises(ValueError, np.array, ['1980-02-03%'],\r\n np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-02-03 q'],\r\n np.dtype('M8[us]'))\r\n\r\n # Hours must be in range [0, 23]\r\n assert_raises(ValueError, np.array, ['1980-02-03 25'],\r\n np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-02-03T25'],\r\n np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-02-03 24:01'],\r\n np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-02-03T24:01'],\r\n np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-02-03 -1'],\r\n np.dtype('M8[us]'))\r\n # No trailing ':'\r\n assert_raises(ValueError, np.array, ['1980-02-03 01:'],\r\n np.dtype('M8[us]'))\r\n # Minutes must be in range [0, 59]\r\n assert_raises(ValueError, np.array, ['1980-02-03 01:-1'],\r\n np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-02-03 01:60'],\r\n np.dtype('M8[us]'))\r\n # No trailing ':'\r\n assert_raises(ValueError, np.array, ['1980-02-03 01:60:'],\r\n np.dtype('M8[us]'))\r\n # Seconds must be in range [0, 59]\r\n assert_raises(ValueError, np.array, ['1980-02-03 01:10:-1'],\r\n np.dtype('M8[us]'))\r\n assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'],\r\n np.dtype('M8[us]'))\r\n # Timezone offset must within a reasonable range\r\n with assert_warns(DeprecationWarning):\r\n assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'],\r\n np.dtype('M8[us]'))\r\n with assert_warns(DeprecationWarning):\r\n assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'],\r\n np.dtype('M8[us]'))\r\n with assert_warns(DeprecationWarning):\r\n assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'],\r\n np.dtype('M8[us]'))\r\n with assert_warns(DeprecationWarning):\r\n assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'],\r\n np.dtype('M8[us]'))\r\n with assert_warns(DeprecationWarning):\r\n assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'],\r\n np.dtype('M8[us]'))\r\n\r\n def test_creation_overflow(self):\r\n date = '1980-03-23 20:00:00'\r\n timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)\r\n for unit in ['ms', 'us', 'ns']:\r\n timesteps *= 1000\r\n x = np.array([date], dtype='datetime64[%s]' % unit)\r\n\r\n assert_equal(timesteps, x[0].astype(np.int64),\r\n err_msg='Datetime conversion error for unit %s' % unit)\r\n\r\n assert_equal(x[0].astype(np.int64), 322689600000000000)\r\n\r\n # gh-13062\r\n with pytest.raises(OverflowError):\r\n np.datetime64(2**64, 'D')\r\n with pytest.raises(OverflowError):\r\n np.timedelta64(2**64, 'D')\r\n\r\n def test_datetime_as_string(self):\r\n # Check all the units with default string conversion\r\n date = '1959-10-13'\r\n datetime = '1959-10-13T12:34:56.789012345678901234'\r\n\r\n assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')),\r\n '1959')\r\n assert_equal(np.datetime_as_string(np.datetime64(date, 'M')),\r\n '1959-10')\r\n assert_equal(np.datetime_as_string(np.datetime64(date, 'D')),\r\n '1959-10-13')\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')),\r\n '1959-10-13T12')\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')),\r\n '1959-10-13T12:34')\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')),\r\n '1959-10-13T12:34:56')\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')),\r\n '1959-10-13T12:34:56.789')\r\n for us in ['us', 'μs', b'us']: # check non-ascii and bytes too\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, us)),\r\n '1959-10-13T12:34:56.789012')\r\n\r\n datetime = '1969-12-31T23:34:56.789012345678901234'\r\n\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),\r\n '1969-12-31T23:34:56.789012345')\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),\r\n '1969-12-31T23:34:56.789012345678')\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),\r\n '1969-12-31T23:34:56.789012345678901')\r\n\r\n datetime = '1969-12-31T23:59:57.789012345678901234'\r\n\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),\r\n datetime)\r\n datetime = '1970-01-01T00:34:56.789012345678901234'\r\n\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),\r\n '1970-01-01T00:34:56.789012345')\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),\r\n '1970-01-01T00:34:56.789012345678')\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),\r\n '1970-01-01T00:34:56.789012345678901')\r\n\r\n datetime = '1970-01-01T00:00:05.789012345678901234'\r\n\r\n assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),\r\n datetime)\r\n\r\n # String conversion with the unit= parameter\r\n a = np.datetime64('2032-07-18T12:23:34.123456', 'us')\r\n assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'),\r\n '2032')\r\n assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'),\r\n '2032-07')\r\n assert_equal(np.datetime_as_string(a, unit='W', casting='unsafe'),\r\n '2032-07-18')\r\n assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'),\r\n '2032-07-18')\r\n assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12')\r\n assert_equal(np.datetime_as_string(a, unit='m'),\r\n '2032-07-18T12:23')\r\n assert_equal(np.datetime_as_string(a, unit='s'),\r\n '2032-07-18T12:23:34')\r\n assert_equal(np.datetime_as_string(a, unit='ms'),\r\n '2032-07-18T12:23:34.123')\r\n assert_equal(np.datetime_as_string(a, unit='us'),\r\n '2032-07-18T12:23:34.123456')\r\n assert_equal(np.datetime_as_string(a, unit='ns'),\r\n '2032-07-18T12:23:34.123456000')\r\n assert_equal(np.datetime_as_string(a, unit='ps'),\r\n '2032-07-18T12:23:34.123456000000')\r\n assert_equal(np.datetime_as_string(a, unit='fs'),\r\n '2032-07-18T12:23:34.123456000000000')\r\n assert_equal(np.datetime_as_string(a, unit='as'),\r\n '2032-07-18T12:23:34.123456000000000000')\r\n\r\n # unit='auto' parameter\r\n assert_equal(np.datetime_as_string(\r\n np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'),\r\n '2032-07-18T12:23:34.123456')\r\n assert_equal(np.datetime_as_string(\r\n np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'),\r\n '2032-07-18T12:23:34.120')\r\n assert_equal(np.datetime_as_string(\r\n np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'),\r\n '2032-07-18T12:23:34')\r\n assert_equal(np.datetime_as_string(\r\n np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'),\r\n '2032-07-18T12:23')\r\n # 'auto' doesn't split up hour and minute\r\n assert_equal(np.datetime_as_string(\r\n np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'),\r\n '2032-07-18T12:00')\r\n assert_equal(np.datetime_as_string(\r\n np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'),\r\n '2032-07-18')\r\n # 'auto' doesn't split up the date\r\n assert_equal(np.datetime_as_string(\r\n np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'),\r\n '2032-07-01')\r\n assert_equal(np.datetime_as_string(\r\n np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'),\r\n '2032-01-01')\r\n\r\n @pytest.mark.skipif(not _has_pytz, reason=\"The pytz module is not available.\")\r\n def test_datetime_as_string_timezone(self):\r\n # timezone='local' vs 'UTC'\r\n a = np.datetime64('2010-03-15T06:30', 'm')\r\n assert_equal(np.datetime_as_string(a),\r\n '2010-03-15T06:30')\r\n assert_equal(np.datetime_as_string(a, timezone='naive'),\r\n '2010-03-15T06:30')\r\n assert_equal(np.datetime_as_string(a, timezone='UTC'),\r\n '2010-03-15T06:30Z')\r\n assert_(np.datetime_as_string(a, timezone='local') !=\r\n '2010-03-15T06:30')\r\n\r\n b = np.datetime64('2010-02-15T06:30', 'm')\r\n\r\n assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')),\r\n '2010-03-15T01:30-0500')\r\n assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')),\r\n '2010-03-15T02:30-0400')\r\n assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')),\r\n '2010-03-14T23:30-0700')\r\n\r\n assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')),\r\n '2010-02-15T00:30-0600')\r\n assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')),\r\n '2010-02-15T01:30-0500')\r\n assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')),\r\n '2010-02-14T22:30-0800')\r\n\r\n # Dates to strings with a timezone attached is disabled by default\r\n assert_raises(TypeError, np.datetime_as_string, a, unit='D',\r\n timezone=tz('US/Pacific'))\r\n # Check that we can print out the date in the specified time zone\r\n assert_equal(np.datetime_as_string(a, unit='D',\r\n timezone=tz('US/Pacific'), casting='unsafe'),\r\n '2010-03-14')\r\n assert_equal(np.datetime_as_string(b, unit='D',\r\n timezone=tz('US/Central'), casting='unsafe'),\r\n '2010-02-15')\r\n\r\n def test_datetime_arange(self):\r\n # With two datetimes provided as strings\r\n a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]')\r\n assert_equal(a.dtype, np.dtype('M8[D]'))\r\n assert_equal(a,\r\n np.array(['2010-01-05', '2010-01-06', '2010-01-07',\r\n '2010-01-08', '2010-01-09'], dtype='M8[D]'))\r\n\r\n a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]')\r\n assert_equal(a.dtype, np.dtype('M8[D]'))\r\n assert_equal(a,\r\n np.array(['1950-02-10', '1950-02-09', '1950-02-08',\r\n '1950-02-07'], dtype='M8[D]'))\r\n\r\n # Unit should be detected as months here\r\n a = np.arange('1969-05', '1970-05', 2, dtype='M8')\r\n assert_equal(a.dtype, np.dtype('M8[M]'))\r\n assert_equal(a,\r\n np.datetime64('1969-05') + np.arange(12, step=2))\r\n\r\n # datetime, integer|timedelta works as well\r\n # produces arange (start, start + stop) in this case\r\n a = np.arange('1969', 18, 3, dtype='M8')\r\n assert_equal(a.dtype, np.dtype('M8[Y]'))\r\n assert_equal(a,\r\n np.datetime64('1969') + np.arange(18, step=3))\r\n a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8')\r\n assert_equal(a.dtype, np.dtype('M8[D]'))\r\n assert_equal(a,\r\n np.datetime64('1969-12-19') + np.arange(22, step=2))\r\n\r\n # Step of 0 is disallowed\r\n assert_raises(ValueError, np.arange, np.datetime64('today'),\r\n np.datetime64('today') + 3, 0)\r\n # Promotion across nonlinear unit boundaries is disallowed\r\n assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'),\r\n np.timedelta64(5, 'M'))\r\n assert_raises(TypeError, np.arange,\r\n np.datetime64('2012-02-03T14', 's'),\r\n np.timedelta64(5, 'Y'))\r\n\r\n def test_datetime_arange_no_dtype(self):\r\n d = np.array('2010-01-04', dtype=\"M8[D]\")\r\n assert_equal(np.arange(d, d + 1), d)\r\n assert_raises(ValueError, np.arange, d)\r\n\r\n def test_timedelta_arange(self):\r\n a = np.arange(3, 10, dtype='m8')\r\n assert_equal(a.dtype, np.dtype('m8'))\r\n assert_equal(a, np.timedelta64(0) + np.arange(3, 10))\r\n\r\n a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8')\r\n assert_equal(a.dtype, np.dtype('m8[s]'))\r\n assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2))\r\n\r\n # Step of 0 is disallowed\r\n assert_raises(ValueError, np.arange, np.timedelta64(0),\r\n np.timedelta64(5), 0)\r\n # Promotion across nonlinear unit boundaries is disallowed\r\n assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'),\r\n np.timedelta64(5, 'M'))\r\n assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),\r\n np.timedelta64(5, 'D'))\r\n\r\n @pytest.mark.parametrize(\"val1, val2, expected\", [\r\n # case from gh-12092\r\n (np.timedelta64(7, 's'),\r\n np.timedelta64(3, 's'),\r\n np.timedelta64(1, 's')),\r\n # negative value cases\r\n (np.timedelta64(3, 's'),\r\n np.timedelta64(-2, 's'),\r\n np.timedelta64(-1, 's')),\r\n (np.timedelta64(-3, 's'),\r\n np.timedelta64(2, 's'),\r\n np.timedelta64(1, 's')),\r\n # larger value cases\r\n (np.timedelta64(17, 's'),\r\n np.timedelta64(22, 's'),\r\n np.timedelta64(17, 's')),\r\n (np.timedelta64(22, 's'),\r\n np.timedelta64(17, 's'),\r\n np.timedelta64(5, 's')),\r\n # different units\r\n (np.timedelta64(1, 'm'),\r\n np.timedelta64(57, 's'),\r\n np.timedelta64(3, 's')),\r\n (np.timedelta64(1, 'us'),\r\n np.timedelta64(727, 'ns'),\r\n np.timedelta64(273, 'ns')),\r\n # NaT is propagated\r\n (np.timedelta64('NaT'),\r\n np.timedelta64(50, 'ns'),\r\n np.timedelta64('NaT')),\r\n # Y % M works\r\n (np.timedelta64(2, 'Y'),\r\n np.timedelta64(22, 'M'),\r\n np.timedelta64(2, 'M')),\r\n ])\r\n def test_timedelta_modulus(self, val1, val2, expected):\r\n assert_equal(val1 % val2, expected)\r\n\r\n @pytest.mark.parametrize(\"val1, val2\", [\r\n # years and months sometimes can't be unambiguously\r\n # divided for modulus operation\r\n (np.timedelta64(7, 'Y'),\r\n np.timedelta64(3, 's')),\r\n (np.timedelta64(7, 'M'),\r\n np.timedelta64(1, 'D')),\r\n ])\r\n def test_timedelta_modulus_error(self, val1, val2):\r\n with assert_raises_regex(TypeError, \"common metadata divisor\"):\r\n val1 % val2\r\n\r\n def test_timedelta_modulus_div_by_zero(self):\r\n with assert_warns(RuntimeWarning):\r\n actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')\r\n assert_equal(actual, np.timedelta64('NaT'))\r\n\r\n @pytest.mark.parametrize(\"val1, val2\", [\r\n # cases where one operand is not\r\n # timedelta64\r\n (np.timedelta64(7, 'Y'),\r\n 15,),\r\n (7.5,\r\n np.timedelta64(1, 'D')),\r\n ])\r\n def test_timedelta_modulus_type_resolution(self, val1, val2):\r\n # NOTE: some of the operations may be supported\r\n # in the future\r\n with assert_raises_regex(TypeError,\r\n \"'remainder' cannot use operands with types\"):\r\n val1 % val2\r\n\r\n def test_timedelta_arange_no_dtype(self):\r\n d = np.array(5, dtype=\"m8[D]\")\r\n assert_equal(np.arange(d, d + 1), d)\r\n assert_equal(np.arange(d), np.arange(0, d))\r\n\r\n def test_datetime_maximum_reduce(self):\r\n a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')\r\n assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]'))\r\n assert_equal(np.maximum.reduce(a),\r\n np.datetime64('2010-01-02'))\r\n\r\n a = np.array([1, 4, 0, 7, 2], dtype='m8[s]')\r\n assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]'))\r\n assert_equal(np.maximum.reduce(a),\r\n np.timedelta64(7, 's'))\r\n\r\n def test_timedelta_correct_mean(self):\r\n # test mainly because it worked only via a bug in that allowed:\r\n # `timedelta.sum(dtype=\"f8\")` to ignore the dtype request.\r\n a = np.arange(1000, dtype=\"m8[s]\")\r\n assert_array_equal(a.mean(), a.sum() / len(a))\r\n\r\n def test_datetime_no_subtract_reducelike(self):\r\n # subtracting two datetime64 works, but we cannot reduce it, since\r\n # the result of that subtraction will have a different dtype.\r\n arr = np.array([\"2021-12-02\", \"2019-05-12\"], dtype=\"M8[ms]\")\r\n msg = r\"the resolved dtypes are not compatible\"\r\n\r\n with pytest.raises(TypeError, match=msg):\r\n np.subtract.reduce(arr)\r\n\r\n with pytest.raises(TypeError, match=msg):\r\n np.subtract.accumulate(arr)\r\n\r\n with pytest.raises(TypeError, match=msg):\r\n np.subtract.reduceat(arr, [0])\r\n\r\n def test_datetime_busday_offset(self):\r\n # First Monday in June\r\n assert_equal(\r\n np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'),\r\n np.datetime64('2011-06-06'))\r\n # Last Monday in June\r\n assert_equal(\r\n np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),\r\n np.datetime64('2011-06-27'))\r\n assert_equal(\r\n np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),\r\n np.datetime64('2011-06-27'))\r\n\r\n # Default M-F business days, different roll modes\r\n assert_equal(np.busday_offset('2010-08', 0, roll='backward'),\r\n np.datetime64('2010-07-30'))\r\n assert_equal(np.busday_offset('2010-08', 0, roll='preceding'),\r\n np.datetime64('2010-07-30'))\r\n assert_equal(np.busday_offset('2010-08', 0, roll='modifiedpreceding'),\r\n np.datetime64('2010-08-02'))\r\n assert_equal(np.busday_offset('2010-08', 0, roll='modifiedfollowing'),\r\n np.datetime64('2010-08-02'))\r\n assert_equal(np.busday_offset('2010-08', 0, roll='forward'),\r\n np.datetime64('2010-08-02'))\r\n assert_equal(np.busday_offset('2010-08', 0, roll='following'),\r\n np.datetime64('2010-08-02'))\r\n assert_equal(np.busday_offset('2010-10-30', 0, roll='following'),\r\n np.datetime64('2010-11-01'))\r\n assert_equal(\r\n np.busday_offset('2010-10-30', 0, roll='modifiedfollowing'),\r\n np.datetime64('2010-10-29'))\r\n assert_equal(\r\n np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'),\r\n np.datetime64('2010-10-29'))\r\n assert_equal(\r\n np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'),\r\n np.datetime64('2010-10-18'))\r\n assert_equal(\r\n np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'),\r\n np.datetime64('2010-10-15'))\r\n # roll='raise' by default\r\n assert_raises(ValueError, np.busday_offset, '2011-06-04', 0)\r\n\r\n # Bigger offset values\r\n assert_equal(np.busday_offset('2006-02-01', 25),\r\n np.datetime64('2006-03-08'))\r\n assert_equal(np.busday_offset('2006-03-08', -25),\r\n np.datetime64('2006-02-01'))\r\n assert_equal(np.busday_offset('2007-02-25', 11, weekmask='SatSun'),\r\n np.datetime64('2007-04-07'))\r\n assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'),\r\n np.datetime64('2007-02-25'))\r\n\r\n # NaT values when roll is not raise\r\n assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'),\r\n np.datetime64('NaT'))\r\n assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'),\r\n np.datetime64('NaT'))\r\n assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'),\r\n np.datetime64('NaT'))\r\n\r\n def test_datetime_busdaycalendar(self):\r\n # Check that it removes NaT, duplicates, and weekends\r\n # and sorts the result.\r\n bdd = np.busdaycalendar(\r\n holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT',\r\n '2011-12-26', '2011-05-30', '2011-01-17'])\r\n assert_equal(bdd.holidays,\r\n np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8'))\r\n # Default M-F weekmask\r\n assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?'))\r\n\r\n # Check string weekmask with varying whitespace.\r\n bdd = np.busdaycalendar(weekmask=\"Sun TueWed Thu\\tFri\")\r\n assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?'))\r\n\r\n # Check length 7 0/1 string\r\n bdd = np.busdaycalendar(weekmask=\"0011001\")\r\n assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?'))\r\n\r\n # Check length 7 string weekmask.\r\n bdd = np.busdaycalendar(weekmask=\"Mon Tue\")\r\n assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?'))\r\n\r\n # All-zeros weekmask should raise\r\n assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0])\r\n # weekday names must be correct case\r\n assert_raises(ValueError, np.busdaycalendar, weekmask=\"satsun\")\r\n # All-zeros weekmask should raise\r\n assert_raises(ValueError, np.busdaycalendar, weekmask=\"\")\r\n # Invalid weekday name codes should raise\r\n assert_raises(ValueError, np.busdaycalendar, weekmask=\"Mon Tue We\")\r\n assert_raises(ValueError, np.busdaycalendar, weekmask=\"Max\")\r\n assert_raises(ValueError, np.busdaycalendar, weekmask=\"Monday Tue\")\r\n\r\n def test_datetime_busday_holidays_offset(self):\r\n # With exactly one holiday\r\n assert_equal(\r\n np.busday_offset('2011-11-10', 1, holidays=['2011-11-11']),\r\n np.datetime64('2011-11-14'))\r\n assert_equal(\r\n np.busday_offset('2011-11-04', 5, holidays=['2011-11-11']),\r\n np.datetime64('2011-11-14'))\r\n assert_equal(\r\n np.busday_offset('2011-11-10', 5, holidays=['2011-11-11']),\r\n np.datetime64('2011-11-18'))\r\n assert_equal(\r\n np.busday_offset('2011-11-14', -1, holidays=['2011-11-11']),\r\n np.datetime64('2011-11-10'))\r\n assert_equal(\r\n np.busday_offset('2011-11-18', -5, holidays=['2011-11-11']),\r\n np.datetime64('2011-11-10'))\r\n assert_equal(\r\n np.busday_offset('2011-11-14', -5, holidays=['2011-11-11']),\r\n np.datetime64('2011-11-04'))\r\n # With the holiday appearing twice\r\n assert_equal(\r\n np.busday_offset('2011-11-10', 1,\r\n holidays=['2011-11-11', '2011-11-11']),\r\n np.datetime64('2011-11-14'))\r\n assert_equal(\r\n np.busday_offset('2011-11-14', -1,\r\n holidays=['2011-11-11', '2011-11-11']),\r\n np.datetime64('2011-11-10'))\r\n # With a NaT holiday\r\n assert_equal(\r\n np.busday_offset('2011-11-10', 1,\r\n holidays=['2011-11-11', 'NaT']),\r\n np.datetime64('2011-11-14'))\r\n assert_equal(\r\n np.busday_offset('2011-11-14', -1,\r\n holidays=['NaT', '2011-11-11']),\r\n np.datetime64('2011-11-10'))\r\n # With another holiday after\r\n assert_equal(\r\n np.busday_offset('2011-11-10', 1,\r\n holidays=['2011-11-11', '2011-11-24']),\r\n np.datetime64('2011-11-14'))\r\n assert_equal(\r\n np.busday_offset('2011-11-14', -1,\r\n holidays=['2011-11-11', '2011-11-24']),\r\n np.datetime64('2011-11-10'))\r\n # With another holiday before\r\n assert_equal(\r\n np.busday_offset('2011-11-10', 1,\r\n holidays=['2011-10-10', '2011-11-11']),\r\n np.datetime64('2011-11-14'))\r\n assert_equal(\r\n np.busday_offset('2011-11-14', -1,\r\n holidays=['2011-10-10', '2011-11-11']),\r\n np.datetime64('2011-11-10'))\r\n # With another holiday before and after\r\n assert_equal(\r\n np.busday_offset('2011-11-10', 1,\r\n holidays=['2011-10-10', '2011-11-11', '2011-11-24']),\r\n np.datetime64('2011-11-14'))\r\n assert_equal(\r\n np.busday_offset('2011-11-14', -1,\r\n holidays=['2011-10-10', '2011-11-11', '2011-11-24']),\r\n np.datetime64('2011-11-10'))\r\n\r\n # A bigger forward jump across more than one week/holiday\r\n holidays = ['2011-10-10', '2011-11-11', '2011-11-24',\r\n '2011-12-25', '2011-05-30', '2011-02-21',\r\n '2011-12-26', '2012-01-02']\r\n bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)\r\n assert_equal(\r\n np.busday_offset('2011-10-03', 4, holidays=holidays),\r\n np.busday_offset('2011-10-03', 4))\r\n assert_equal(\r\n np.busday_offset('2011-10-03', 5, holidays=holidays),\r\n np.busday_offset('2011-10-03', 5 + 1))\r\n assert_equal(\r\n np.busday_offset('2011-10-03', 27, holidays=holidays),\r\n np.busday_offset('2011-10-03', 27 + 1))\r\n assert_equal(\r\n np.busday_offset('2011-10-03', 28, holidays=holidays),\r\n np.busday_offset('2011-10-03', 28 + 2))\r\n assert_equal(\r\n np.busday_offset('2011-10-03', 35, holidays=holidays),\r\n np.busday_offset('2011-10-03', 35 + 2))\r\n assert_equal(\r\n np.busday_offset('2011-10-03', 36, holidays=holidays),\r\n np.busday_offset('2011-10-03', 36 + 3))\r\n assert_equal(\r\n np.busday_offset('2011-10-03', 56, holidays=holidays),\r\n np.busday_offset('2011-10-03', 56 + 3))\r\n assert_equal(\r\n np.busday_offset('2011-10-03', 57, holidays=holidays),\r\n np.busday_offset('2011-10-03', 57 + 4))\r\n assert_equal(\r\n np.busday_offset('2011-10-03', 60, holidays=holidays),\r\n np.busday_offset('2011-10-03', 60 + 4))\r\n assert_equal(\r\n np.busday_offset('2011-10-03', 61, holidays=holidays),\r\n np.busday_offset('2011-10-03', 61 + 5))\r\n assert_equal(\r\n np.busday_offset('2011-10-03', 61, busdaycal=bdd),\r\n np.busday_offset('2011-10-03', 61 + 5))\r\n # A bigger backward jump across more than one week/holiday\r\n assert_equal(\r\n np.busday_offset('2012-01-03', -1, holidays=holidays),\r\n np.busday_offset('2012-01-03', -1 - 1))\r\n assert_equal(\r\n np.busday_offset('2012-01-03', -4, holidays=holidays),\r\n np.busday_offset('2012-01-03', -4 - 1))\r\n assert_equal(\r\n np.busday_offset('2012-01-03', -5, holidays=holidays),\r\n np.busday_offset('2012-01-03', -5 - 2))\r\n assert_equal(\r\n np.busday_offset('2012-01-03', -25, holidays=holidays),\r\n np.busday_offset('2012-01-03', -25 - 2))\r\n assert_equal(\r\n np.busday_offset('2012-01-03', -26, holidays=holidays),\r\n np.busday_offset('2012-01-03', -26 - 3))\r\n assert_equal(\r\n np.busday_offset('2012-01-03', -33, holidays=holidays),\r\n np.busday_offset('2012-01-03', -33 - 3))\r\n assert_equal(\r\n np.busday_offset('2012-01-03', -34, holidays=holidays),\r\n np.busday_offset('2012-01-03', -34 - 4))\r\n assert_equal(\r\n np.busday_offset('2012-01-03', -56, holidays=holidays),\r\n np.busday_offset('2012-01-03', -56 - 4))\r\n assert_equal(\r\n np.busday_offset('2012-01-03', -57, holidays=holidays),\r\n np.busday_offset('2012-01-03', -57 - 5))\r\n assert_equal(\r\n np.busday_offset('2012-01-03', -57, busdaycal=bdd),\r\n np.busday_offset('2012-01-03', -57 - 5))\r\n\r\n # Can't supply both a weekmask/holidays and busdaycal\r\n assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,\r\n weekmask='1111100', busdaycal=bdd)\r\n assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,\r\n holidays=holidays, busdaycal=bdd)\r\n\r\n # Roll with the holidays\r\n assert_equal(\r\n np.busday_offset('2011-12-25', 0,\r\n roll='forward', holidays=holidays),\r\n np.datetime64('2011-12-27'))\r\n assert_equal(\r\n np.busday_offset('2011-12-26', 0,\r\n roll='forward', holidays=holidays),\r\n np.datetime64('2011-12-27'))\r\n assert_equal(\r\n np.busday_offset('2011-12-26', 0,\r\n roll='backward', holidays=holidays),\r\n np.datetime64('2011-12-23'))\r\n assert_equal(\r\n np.busday_offset('2012-02-27', 0,\r\n roll='modifiedfollowing',\r\n holidays=['2012-02-27', '2012-02-26', '2012-02-28',\r\n '2012-03-01', '2012-02-29']),\r\n np.datetime64('2012-02-24'))\r\n assert_equal(\r\n np.busday_offset('2012-03-06', 0,\r\n roll='modifiedpreceding',\r\n holidays=['2012-03-02', '2012-03-03', '2012-03-01',\r\n '2012-03-05', '2012-03-07', '2012-03-06']),\r\n np.datetime64('2012-03-08'))\r\n\r\n def test_datetime_busday_holidays_count(self):\r\n holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',\r\n '2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',\r\n '2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',\r\n '2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10']\r\n bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)\r\n\r\n # Validate against busday_offset broadcast against\r\n # a range of offsets\r\n dates = np.busday_offset('2011-01-01', np.arange(366),\r\n roll='forward', busdaycal=bdd)\r\n assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd),\r\n np.arange(366))\r\n # Returns negative value when reversed\r\n assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd),\r\n -np.arange(366))\r\n\r\n dates = np.busday_offset('2011-12-31', -np.arange(366),\r\n roll='forward', busdaycal=bdd)\r\n assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd),\r\n np.arange(366))\r\n # Returns negative value when reversed\r\n assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd),\r\n -np.arange(366))\r\n\r\n # Can't supply both a weekmask/holidays and busdaycal\r\n assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',\r\n weekmask='1111100', busdaycal=bdd)\r\n assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',\r\n holidays=holidays, busdaycal=bdd)\r\n\r\n # Number of Mondays in March 2011\r\n assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4)\r\n # Returns negative value when reversed\r\n assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4)\r\n\r\n def test_datetime_is_busday(self):\r\n holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',\r\n '2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',\r\n '2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',\r\n '2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10',\r\n 'NaT']\r\n bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)\r\n\r\n # Weekend/weekday tests\r\n assert_equal(np.is_busday('2011-01-01'), False)\r\n assert_equal(np.is_busday('2011-01-02'), False)\r\n assert_equal(np.is_busday('2011-01-03'), True)\r\n\r\n # All the holidays are not business days\r\n assert_equal(np.is_busday(holidays, busdaycal=bdd),\r\n np.zeros(len(holidays), dtype='?'))\r\n\r\n def test_datetime_y2038(self):\r\n # Test parsing on either side of the Y2038 boundary\r\n a = np.datetime64('2038-01-19T03:14:07')\r\n assert_equal(a.view(np.int64), 2**31 - 1)\r\n a = np.datetime64('2038-01-19T03:14:08')\r\n assert_equal(a.view(np.int64), 2**31)\r\n\r\n # Test parsing on either side of the Y2038 boundary with\r\n # a manually specified timezone offset\r\n with assert_warns(DeprecationWarning):\r\n a = np.datetime64('2038-01-19T04:14:07+0100')\r\n assert_equal(a.view(np.int64), 2**31 - 1)\r\n with assert_warns(DeprecationWarning):\r\n a = np.datetime64('2038-01-19T04:14:08+0100')\r\n assert_equal(a.view(np.int64), 2**31)\r\n\r\n # Test parsing a date after Y2038\r\n a = np.datetime64('2038-01-20T13:21:14')\r\n assert_equal(str(a), '2038-01-20T13:21:14')\r\n\r\n def test_isnat(self):\r\n assert_(np.isnat(np.datetime64('NaT', 'ms')))\r\n assert_(np.isnat(np.datetime64('NaT', 'ns')))\r\n assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07')))\r\n\r\n assert_(np.isnat(np.timedelta64('NaT', \"ms\")))\r\n assert_(not np.isnat(np.timedelta64(34, \"ms\")))\r\n\r\n res = np.array([False, False, True])\r\n for unit in ['Y', 'M', 'W', 'D',\r\n 'h', 'm', 's', 'ms', 'us',\r\n 'ns', 'ps', 'fs', 'as']:\r\n arr = np.array([123, -321, \"NaT\"], dtype='<datetime64[%s]' % unit)\r\n assert_equal(np.isnat(arr), res)\r\n arr = np.array([123, -321, \"NaT\"], dtype='>datetime64[%s]' % unit)\r\n assert_equal(np.isnat(arr), res)\r\n arr = np.array([123, -321, \"NaT\"], dtype='<timedelta64[%s]' % unit)\r\n assert_equal(np.isnat(arr), res)\r\n arr = np.array([123, -321, \"NaT\"], dtype='>timedelta64[%s]' % unit)\r\n assert_equal(np.isnat(arr), res)\r\n\r\n def test_isnat_error(self):\r\n # Test that only datetime dtype arrays are accepted\r\n for t in np.typecodes[\"All\"]:\r\n if t in np.typecodes[\"Datetime\"]:\r\n continue\r\n assert_raises(TypeError, np.isnat, np.zeros(10, t))\r\n\r\n def test_isfinite_scalar(self):\r\n assert_(not np.isfinite(np.datetime64('NaT', 'ms')))\r\n assert_(not np.isfinite(np.datetime64('NaT', 'ns')))\r\n assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))\r\n\r\n assert_(not np.isfinite(np.timedelta64('NaT', \"ms\")))\r\n assert_(np.isfinite(np.timedelta64(34, \"ms\")))\r\n\r\n @pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\r\n 'us', 'ns', 'ps', 'fs', 'as'])\r\n @pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',\r\n '<timedelta64[%s]', '>timedelta64[%s]'])\r\n def test_isfinite_isinf_isnan_units(self, unit, dstr):\r\n '''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes\r\n '''\r\n arr_val = [123, -321, \"NaT\"]\r\n arr = np.array(arr_val, dtype= dstr % unit)\r\n pos = np.array([True, True, False])\r\n neg = np.array([False, False, True])\r\n false = np.array([False, False, False])\r\n assert_equal(np.isfinite(arr), pos)\r\n assert_equal(np.isinf(arr), false)\r\n assert_equal(np.isnan(arr), neg)\r\n\r\n def test_assert_equal(self):\r\n assert_raises(AssertionError, assert_equal,\r\n np.datetime64('nat'), np.timedelta64('nat'))\r\n\r\n def test_corecursive_input(self):\r\n # construct a co-recursive list\r\n a, b = [], []\r\n a.append(b)\r\n b.append(a)\r\n obj_arr = np.array([None])\r\n obj_arr[0] = a\r\n\r\n # At some point this caused a stack overflow (gh-11154). Now raises\r\n # ValueError since the nested list cannot be converted to a datetime.\r\n assert_raises(ValueError, obj_arr.astype, 'M8')\r\n assert_raises(ValueError, obj_arr.astype, 'm8')\r\n\r\n @pytest.mark.parametrize(\"shape\", [(), (1,)])\r\n def test_discovery_from_object_array(self, shape):\r\n arr = np.array(\"2020-10-10\", dtype=object).reshape(shape)\r\n res = np.array(\"2020-10-10\", dtype=\"M8\").reshape(shape)\r\n assert res.dtype == np.dtype(\"M8[D]\")\r\n assert_equal(arr.astype(\"M8\"), res)\r\n arr[...] = np.bytes_(\"2020-10-10\") # try a numpy string type\r\n assert_equal(arr.astype(\"M8\"), res)\r\n arr = arr.astype(\"S\")\r\n assert_equal(arr.astype(\"S\").astype(\"M8\"), res)\r\n\r\n @pytest.mark.parametrize(\"time_unit\", [\r\n \"Y\", \"M\", \"W\", \"D\", \"h\", \"m\", \"s\", \"ms\", \"us\", \"ns\", \"ps\", \"fs\", \"as\",\r\n # compound units\r\n \"10D\", \"2M\",\r\n ])\r\n def test_limit_symmetry(self, time_unit):\r\n \"\"\"\r\n Dates should have symmetric limits around the unix epoch at +/-np.int64\r\n \"\"\"\r\n epoch = np.datetime64(0, time_unit)\r\n latest = np.datetime64(np.iinfo(np.int64).max, time_unit)\r\n earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit)\r\n\r\n # above should not have overflowed\r\n assert earliest < epoch < latest\r\n\r\n @pytest.mark.parametrize(\"time_unit\", [\r\n \"Y\", \"M\",\r\n pytest.param(\"W\", marks=pytest.mark.xfail(reason=\"gh-13197\")),\r\n \"D\", \"h\", \"m\",\r\n \"s\", \"ms\", \"us\", \"ns\", \"ps\", \"fs\", \"as\",\r\n pytest.param(\"10D\", marks=pytest.mark.xfail(reason=\"similar to gh-13197\")),\r\n ])\r\n @pytest.mark.parametrize(\"sign\", [-1, 1])\r\n def test_limit_str_roundtrip(self, time_unit, sign):\r\n \"\"\"\r\n Limits should roundtrip when converted to strings.\r\n\r\n This tests the conversion to and from npy_datetimestruct.\r\n \"\"\"\r\n # TODO: add absolute (gold standard) time span limit strings\r\n limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit)\r\n\r\n # Convert to string and back. Explicit unit needed since the day and\r\n # week reprs are not distinguishable.\r\n limit_via_str = np.datetime64(str(limit), time_unit)\r\n assert limit_via_str == limit\r\n\r\n\r\nclass TestDateTimeData:\r\n\r\n def test_basic(self):\r\n a = np.array(['1980-03-23'], dtype=np.datetime64)\r\n assert_equal(np.datetime_data(a.dtype), ('D', 1))\r\n\r\n def test_bytes(self):\r\n # byte units are converted to unicode\r\n dt = np.datetime64('2000', (b'ms', 5))\r\n assert np.datetime_data(dt.dtype) == ('ms', 5)\r\n\r\n dt = np.datetime64('2000', b'5ms')\r\n assert np.datetime_data(dt.dtype) == ('ms', 5)\r\n\r\n def test_non_ascii(self):\r\n # μs is normalized to μ\r\n dt = np.datetime64('2000', ('μs', 5))\r\n assert np.datetime_data(dt.dtype) == ('us', 5)\r\n\r\n dt = np.datetime64('2000', '5μs')\r\n assert np.datetime_data(dt.dtype) == ('us', 5)\r\n",
"from numbers import Number\r\n\r\nimport torch\r\nfrom torch.distributions import constraints\r\nfrom torch.distributions.distribution import Distribution\r\nfrom torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property\r\nfrom torch.nn.functional import binary_cross_entropy_with_logits\r\n\r\n\r\nclass Geometric(Distribution):\r\n r\"\"\"\r\n Creates a Geometric distribution parameterized by :attr:`probs`,\r\n where :attr:`probs` is the probability of success of Bernoulli trials.\r\n It represents the probability that in :math:`k + 1` Bernoulli trials, the\r\n first :math:`k` trials failed, before seeing a success.\r\n\r\n Samples are non-negative integers [0, :math:`\\inf`).\r\n\r\n Example::\r\n\r\n >>> m = Geometric(torch.tensor([0.3]))\r\n >>> m.sample() # underlying Bernoulli has 30% chance 1; 70% chance 0\r\n tensor([ 2.])\r\n\r\n Args:\r\n probs (Number, Tensor): the probability of sampling `1`. Must be in range (0, 1]\r\n logits (Number, Tensor): the log-odds of sampling `1`.\r\n \"\"\"\r\n arg_constraints = {'probs': constraints.unit_interval,\r\n 'logits': constraints.real}\r\n support = constraints.nonnegative_integer\r\n\r\n def __init__(self, probs=None, logits=None, validate_args=None):\r\n if (probs is None) == (logits is None):\r\n raise ValueError(\"Either `probs` or `logits` must be specified, but not both.\")\r\n if probs is not None:\r\n self.probs, = broadcast_all(probs)\r\n else:\r\n self.logits, = broadcast_all(logits)\r\n probs_or_logits = probs if probs is not None else logits\r\n if isinstance(probs_or_logits, Number):\r\n batch_shape = torch.Size()\r\n else:\r\n batch_shape = probs_or_logits.size()\r\n super(Geometric, self).__init__(batch_shape, validate_args=validate_args)\r\n if self._validate_args and probs is not None:\r\n # Add an extra check beyond unit_interval\r\n value = self.probs\r\n valid = value > 0\r\n if not valid.all():\r\n invalid_value = value.data[~valid]\r\n raise ValueError(\r\n \"Expected parameter probs \"\r\n f\"({type(value).__name__} of shape {tuple(value.shape)}) \"\r\n f\"of distribution {repr(self)} \"\r\n f\"to be positive but found invalid values:\\n{invalid_value}\"\r\n )\r\n\r\n def expand(self, batch_shape, _instance=None):\r\n new = self._get_checked_instance(Geometric, _instance)\r\n batch_shape = torch.Size(batch_shape)\r\n if 'probs' in self.__dict__:\r\n new.probs = self.probs.expand(batch_shape)\r\n if 'logits' in self.__dict__:\r\n new.logits = self.logits.expand(batch_shape)\r\n super(Geometric, new).__init__(batch_shape, validate_args=False)\r\n new._validate_args = self._validate_args\r\n return new\r\n\r\n @property\r\n def mean(self):\r\n return 1. / self.probs - 1.\r\n\r\n @property\r\n def variance(self):\r\n return (1. / self.probs - 1.) / self.probs\r\n\r\n @lazy_property\r\n def logits(self):\r\n return probs_to_logits(self.probs, is_binary=True)\r\n\r\n @lazy_property\r\n def probs(self):\r\n return logits_to_probs(self.logits, is_binary=True)\r\n\r\n def sample(self, sample_shape=torch.Size()):\r\n shape = self._extended_shape(sample_shape)\r\n tiny = torch.finfo(self.probs.dtype).tiny\r\n with torch.no_grad():\r\n if torch._C._get_tracing_state():\r\n # [JIT WORKAROUND] lack of support for .uniform_()\r\n u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device)\r\n u = u.clamp(min=tiny)\r\n else:\r\n u = self.probs.new(shape).uniform_(tiny, 1)\r\n return (u.log() / (-self.probs).log1p()).floor()\r\n\r\n def log_prob(self, value):\r\n if self._validate_args:\r\n self._validate_sample(value)\r\n value, probs = broadcast_all(value, self.probs)\r\n probs = probs.clone(memory_format=torch.contiguous_format)\r\n probs[(probs == 1) & (value == 0)] = 0\r\n return value * (-probs).log1p() + self.probs.log()\r\n\r\n def entropy(self):\r\n return binary_cross_entropy_with_logits(self.logits, self.probs, reduction='none') / self.probs\r\n",
"import copy\r\n\r\nimport torch\r\nfrom torch import nn\r\n\r\nimport torch.nn.functional as F\r\nimport torch.nn.intrinsic as nni\r\nimport torch.nn.intrinsic.quantized as nniq\r\nimport torch.nn.intrinsic.quantized.dynamic as nniqd\r\nimport torch.nn.intrinsic.qat as nniqat\r\nimport torch.nn.quantized as nnq\r\nimport torch.nn.quantized._reference as nnqr\r\nimport torch.nn.quantized.dynamic as nnqd\r\nimport torch.nn.qat as nnqat\r\n\r\nfrom typing import Optional, Union, Dict, Set, Callable, Any\r\n\r\nfrom torch.ao.quantization.stubs import QuantStub, DeQuantStub\r\nfrom torch.ao.quantization.fake_quantize import (\r\n default_affine_fixed_qparams_fake_quant,\r\n default_symmetric_fixed_qparams_fake_quant,\r\n)\r\nfrom torch.ao.quantization.utils import get_combined_dict\r\n\r\n# Default map for swapping float module to reference quantized modules\r\nDEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = {\r\n nn.Linear: nnqr.Linear,\r\n nn.Conv1d: nnqr.Conv1d,\r\n nn.Conv2d: nnqr.Conv2d,\r\n nn.Conv3d: nnqr.Conv3d,\r\n}\r\n\r\n# Default map for swapping float module to quantized ones\r\nDEFAULT_STATIC_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = {\r\n QuantStub: nnq.Quantize,\r\n DeQuantStub: nnq.DeQuantize,\r\n nn.BatchNorm2d: nnq.BatchNorm2d,\r\n nn.BatchNorm3d: nnq.BatchNorm3d,\r\n nn.Conv1d: nnq.Conv1d,\r\n nn.Conv2d: nnq.Conv2d,\r\n nn.Conv3d: nnq.Conv3d,\r\n nn.ConvTranspose1d: nnq.ConvTranspose1d,\r\n nn.ConvTranspose2d: nnq.ConvTranspose2d,\r\n nn.ELU: nnq.ELU,\r\n nn.Embedding: nnq.Embedding,\r\n nn.EmbeddingBag: nnq.EmbeddingBag,\r\n nn.GroupNorm: nnq.GroupNorm,\r\n nn.Hardswish: nnq.Hardswish,\r\n nn.InstanceNorm1d: nnq.InstanceNorm1d,\r\n nn.InstanceNorm2d: nnq.InstanceNorm2d,\r\n nn.InstanceNorm3d: nnq.InstanceNorm3d,\r\n nn.LayerNorm: nnq.LayerNorm,\r\n nn.LeakyReLU: nnq.LeakyReLU,\r\n nn.modules.linear.NonDynamicallyQuantizableLinear: nnq.Linear,\r\n nn.Linear: nnq.Linear,\r\n nn.ReLU6: nnq.ReLU6,\r\n # Wrapper Modules:\r\n nnq.FloatFunctional: nnq.QFunctional,\r\n # Intrinsic modules:\r\n nni.BNReLU2d: nniq.BNReLU2d,\r\n nni.BNReLU3d: nniq.BNReLU3d,\r\n nni.ConvReLU1d: nniq.ConvReLU1d,\r\n nni.ConvReLU2d: nniq.ConvReLU2d,\r\n nni.ConvReLU3d: nniq.ConvReLU3d,\r\n nni.LinearReLU: nniq.LinearReLU,\r\n nniqat.ConvBn1d: nnq.Conv1d,\r\n nniqat.ConvBn2d: nnq.Conv2d,\r\n nniqat.ConvBn3d: nnq.Conv3d,\r\n nniqat.ConvBnReLU1d: nniq.ConvReLU1d,\r\n nniqat.ConvBnReLU2d: nniq.ConvReLU2d,\r\n nniqat.ConvBnReLU3d: nniq.ConvReLU3d,\r\n nniqat.ConvReLU2d: nniq.ConvReLU2d,\r\n nniqat.ConvReLU3d: nniq.ConvReLU3d,\r\n nniqat.LinearReLU: nniq.LinearReLU,\r\n # QAT modules:\r\n nnqat.Linear: nnq.Linear,\r\n nnqat.Conv2d: nnq.Conv2d,\r\n nnqat.Conv3d: nnq.Conv3d,\r\n}\r\n\r\n# Default map for swapping float module to qat modules\r\nDEFAULT_QAT_MODULE_MAPPINGS : Dict[Callable, Any] = {\r\n nn.Conv2d: nnqat.Conv2d,\r\n nn.Conv3d: nnqat.Conv3d,\r\n nn.Linear: nnqat.Linear,\r\n nn.modules.linear.NonDynamicallyQuantizableLinear: nnqat.Linear,\r\n # Intrinsic modules:\r\n nni.ConvBn1d: nniqat.ConvBn1d,\r\n nni.ConvBn2d: nniqat.ConvBn2d,\r\n nni.ConvBn3d: nniqat.ConvBn3d,\r\n nni.ConvBnReLU1d: nniqat.ConvBnReLU1d,\r\n nni.ConvBnReLU2d: nniqat.ConvBnReLU2d,\r\n nni.ConvBnReLU3d: nniqat.ConvBnReLU3d,\r\n nni.ConvReLU2d: nniqat.ConvReLU2d,\r\n nni.ConvReLU3d: nniqat.ConvReLU3d,\r\n nni.LinearReLU: nniqat.LinearReLU,\r\n}\r\n\r\n# Default map for swapping dynamic modules\r\nDEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = {\r\n nn.GRUCell: nnqd.GRUCell,\r\n nn.Linear: nnqd.Linear,\r\n nn.modules.linear.NonDynamicallyQuantizableLinear: nnqd.Linear,\r\n nn.LSTM: nnqd.LSTM,\r\n nn.GRU: nnqd.GRU,\r\n nn.LSTMCell: nnqd.LSTMCell,\r\n nn.RNNCell: nnqd.RNNCell,\r\n nni.LinearReLU: nniqd.LinearReLU,\r\n}\r\n\r\n# Allowlist for propagating the qconfig\r\n_INCLUDE_QCONFIG_PROPAGATE_LIST : Set[Callable] = {\r\n nn.Sequential,\r\n}\r\n\r\n# Default mapping from floating point function or torch ops to quantized ops\r\n# TODO: merge with default static mapping\r\nDEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS : Dict[Union[Callable, str], Callable] = {\r\n F.elu: torch._ops.ops.quantized.elu,\r\n F.hardswish: torch._ops.ops.quantized.hardswish,\r\n F.instance_norm: torch._ops.ops.quantized.instance_norm,\r\n F.layer_norm: torch._ops.ops.quantized.layer_norm,\r\n F.leaky_relu: torch._ops.ops.quantized.leaky_relu,\r\n}\r\n\r\n# mapping from module to output activation post process class\r\nDEFAULT_MODULE_TO_ACT_POST_PROCESS : Dict[Callable, Callable] = {\r\n nn.Hardsigmoid: default_affine_fixed_qparams_fake_quant,\r\n nn.Sigmoid: default_affine_fixed_qparams_fake_quant,\r\n nn.Tanh: default_symmetric_fixed_qparams_fake_quant,\r\n}\r\n\r\ndef no_observer_set() -> Set[Any]:\r\n r\"\"\"These modules cannot have observers inserted by default.\"\"\"\r\n no_observers = set([\r\n nn.quantizable.LSTM,\r\n nn.quantizable.MultiheadAttention\r\n ])\r\n return no_observers\r\n\r\ndef get_default_static_quant_module_mappings() -> Dict[Callable, Any]:\r\n ''' Get module mapping for post training static quantization\r\n '''\r\n return copy.deepcopy(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS)\r\n\r\ndef get_static_quant_module_class(\r\n float_module_class: Callable,\r\n additional_static_quant_mapping: Optional[Dict[Callable, Any]] = None,\r\n is_reference: bool = False) -> Any:\r\n r\"\"\"n Get the statically quantized module class corresponding to\r\n the floating point module class\r\n \"\"\"\r\n if additional_static_quant_mapping is None:\r\n additional_static_quant_mapping = {}\r\n all_mappings = get_combined_dict(\r\n DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS if is_reference\r\n else DEFAULT_STATIC_QUANT_MODULE_MAPPINGS, additional_static_quant_mapping)\r\n static_quant_module_class = all_mappings.get(float_module_class, None)\r\n assert static_quant_module_class is not None, \\\r\n \"Floating point module class {}\".format(str(float_module_class)) + \\\r\n \" does not have a corresponding quantized module class\"\r\n return copy.deepcopy(static_quant_module_class)\r\n\r\ndef get_dynamic_quant_module_class(\r\n float_module_class: Callable,\r\n additional_dynamic_quant_mapping: Optional[Dict[Callable, Any]] = None) -> Any:\r\n r\"\"\"n Get the dynamically quantized module class corresponding to\r\n the floating point module class\r\n \"\"\"\r\n if additional_dynamic_quant_mapping is None:\r\n additional_dynamic_quant_mapping = {}\r\n all_mappings = get_combined_dict(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS, additional_dynamic_quant_mapping)\r\n dynamic_quant_module_class = all_mappings.get(float_module_class, None)\r\n assert dynamic_quant_module_class is not None, \\\r\n \"Floating point module class {}\".format(str(float_module_class)) + \\\r\n \" does not have a corresponding quantized module class\"\r\n return copy.deepcopy(dynamic_quant_module_class)\r\n\r\ndef get_default_qat_module_mappings() -> Dict[Callable, Any]:\r\n ''' Get default module mapping for quantization aware training\r\n '''\r\n return copy.deepcopy(DEFAULT_QAT_MODULE_MAPPINGS)\r\n\r\ndef get_default_dynamic_quant_module_mappings() -> Dict[Callable, Any]:\r\n ''' Get module mapping for post training dynamic quantization\r\n '''\r\n return DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS\r\n\r\ndef get_default_qconfig_propagation_list() -> Set[Callable]:\r\n ''' Get the default list of module types that we'll attach qconfig\r\n attribute to in prepare\r\n '''\r\n QCONFIG_PROPAGATE_MODULE_CLASS_LIST = (\r\n (set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.keys()) |\r\n set(DEFAULT_QAT_MODULE_MAPPINGS.keys()) |\r\n set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.keys()) |\r\n _INCLUDE_QCONFIG_PROPAGATE_LIST)\r\n )\r\n return copy.deepcopy(QCONFIG_PROPAGATE_MODULE_CLASS_LIST)\r\n\r\ndef get_default_compare_output_module_list() -> Set[Callable]:\r\n ''' Get list of module class types that we will record output\r\n in numeric suite\r\n '''\r\n NUMERIC_SUITE_COMPARE_MODEL_OUTPUT_MODULE_LIST = (\r\n set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.values())\r\n | set(DEFAULT_QAT_MODULE_MAPPINGS.values())\r\n | set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.values())\r\n | set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.keys())\r\n | set(DEFAULT_QAT_MODULE_MAPPINGS.keys())\r\n | set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.keys())\r\n | _INCLUDE_QCONFIG_PROPAGATE_LIST\r\n )\r\n return copy.deepcopy(NUMERIC_SUITE_COMPARE_MODEL_OUTPUT_MODULE_LIST)\r\n\r\ndef get_default_float_to_quantized_operator_mappings(\r\n) -> Dict[Union[Callable, str], Callable]:\r\n return copy.deepcopy(DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS)\r\n\r\n# TODO: merge with get_static_quant_module_class\r\ndef get_quantized_operator(float_op: Union[Callable, str]) -> Callable:\r\n ''' Get the quantized operator corresponding to the float operator\r\n '''\r\n quantized_op = DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS.get(float_op, None)\r\n assert quantized_op is not None, \\\r\n 'Operator {} does not have corresponding quantized op'.format(str(float_op))\r\n return quantized_op\r\n\r\ndef _get_special_act_post_process(module: torch.nn.Module) -> Optional[Callable]:\r\n r\"\"\" Get the special activation post process for `module`, this has\r\n higher priority than the activation post process in `qconfig`\r\n e.g.\r\n input: torch.nn.Sigmoid\r\n output: default_affine_fixed_qparam_fake_quant\r\n \"\"\"\r\n return DEFAULT_MODULE_TO_ACT_POST_PROCESS.get(type(module), None)\r\n\r\ndef _has_special_act_post_process(module: torch.nn.Module) -> bool:\r\n return module.training and type(module) in DEFAULT_MODULE_TO_ACT_POST_PROCESS\r\n",
"\"\"\"\r\nFunctions to operate on polynomials.\r\n\r\n\"\"\"\r\n__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',\r\n 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',\r\n 'polyfit', 'RankWarning']\r\n\r\nimport functools\r\nimport re\r\nimport warnings\r\nimport numpy.core.numeric as NX\r\n\r\nfrom numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,\r\n ones)\r\nfrom numpy.core import overrides\r\nfrom numpy.core.overrides import set_module\r\nfrom numpy.lib.twodim_base import diag, vander\r\nfrom numpy.lib.function_base import trim_zeros\r\nfrom numpy.lib.type_check import iscomplex, real, imag, mintypecode\r\nfrom numpy.linalg import eigvals, lstsq, inv\r\n\r\n\r\narray_function_dispatch = functools.partial(\r\n overrides.array_function_dispatch, module='numpy')\r\n\r\n\r\n@set_module('numpy')\r\nclass RankWarning(UserWarning):\r\n \"\"\"\r\n Issued by `polyfit` when the Vandermonde matrix is rank deficient.\r\n\r\n For more information, a way to suppress the warning, and an example of\r\n `RankWarning` being issued, see `polyfit`.\r\n\r\n \"\"\"\r\n pass\r\n\r\n\r\ndef _poly_dispatcher(seq_of_zeros):\r\n return seq_of_zeros\r\n\r\n\r\n@array_function_dispatch(_poly_dispatcher)\r\ndef poly(seq_of_zeros):\r\n \"\"\"\r\n Find the coefficients of a polynomial with the given sequence of roots.\r\n\r\n .. note::\r\n This forms part of the old polynomial API. Since version 1.4, the\r\n new polynomial API defined in `numpy.polynomial` is preferred.\r\n A summary of the differences can be found in the\r\n :doc:`transition guide </reference/routines.polynomials>`.\r\n\r\n Returns the coefficients of the polynomial whose leading coefficient\r\n is one for the given sequence of zeros (multiple roots must be included\r\n in the sequence as many times as their multiplicity; see Examples).\r\n A square matrix (or array, which will be treated as a matrix) can also\r\n be given, in which case the coefficients of the characteristic polynomial\r\n of the matrix are returned.\r\n\r\n Parameters\r\n ----------\r\n seq_of_zeros : array_like, shape (N,) or (N, N)\r\n A sequence of polynomial roots, or a square array or matrix object.\r\n\r\n Returns\r\n -------\r\n c : ndarray\r\n 1D array of polynomial coefficients from highest to lowest degree:\r\n\r\n ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``\r\n where c[0] always equals 1.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If input is the wrong shape (the input must be a 1-D or square\r\n 2-D array).\r\n\r\n See Also\r\n --------\r\n polyval : Compute polynomial values.\r\n roots : Return the roots of a polynomial.\r\n polyfit : Least squares polynomial fit.\r\n poly1d : A one-dimensional polynomial class.\r\n\r\n Notes\r\n -----\r\n Specifying the roots of a polynomial still leaves one degree of\r\n freedom, typically represented by an undetermined leading\r\n coefficient. [1]_ In the case of this function, that coefficient -\r\n the first one in the returned array - is always taken as one. (If\r\n for some reason you have one other point, the only automatic way\r\n presently to leverage that information is to use ``polyfit``.)\r\n\r\n The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`\r\n matrix **A** is given by\r\n\r\n :math:`p_a(t) = \\\\mathrm{det}(t\\\\, \\\\mathbf{I} - \\\\mathbf{A})`,\r\n\r\n where **I** is the `n`-by-`n` identity matrix. [2]_\r\n\r\n References\r\n ----------\r\n .. [1] M. Sullivan and M. Sullivan, III, \"Algebra and Trignometry,\r\n Enhanced With Graphing Utilities,\" Prentice-Hall, pg. 318, 1996.\r\n\r\n .. [2] G. Strang, \"Linear Algebra and Its Applications, 2nd Edition,\"\r\n Academic Press, pg. 182, 1980.\r\n\r\n Examples\r\n --------\r\n Given a sequence of a polynomial's zeros:\r\n\r\n >>> np.poly((0, 0, 0)) # Multiple root example\r\n array([1., 0., 0., 0.])\r\n\r\n The line above represents z**3 + 0*z**2 + 0*z + 0.\r\n\r\n >>> np.poly((-1./2, 0, 1./2))\r\n array([ 1. , 0. , -0.25, 0. ])\r\n\r\n The line above represents z**3 - z/4\r\n\r\n >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))\r\n array([ 1. , -0.77086955, 0.08618131, 0. ]) # random\r\n\r\n Given a square array object:\r\n\r\n >>> P = np.array([[0, 1./3], [-1./2, 0]])\r\n >>> np.poly(P)\r\n array([1. , 0. , 0.16666667])\r\n\r\n Note how in all cases the leading coefficient is always 1.\r\n\r\n \"\"\"\r\n seq_of_zeros = atleast_1d(seq_of_zeros)\r\n sh = seq_of_zeros.shape\r\n\r\n if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:\r\n seq_of_zeros = eigvals(seq_of_zeros)\r\n elif len(sh) == 1:\r\n dt = seq_of_zeros.dtype\r\n # Let object arrays slip through, e.g. for arbitrary precision\r\n if dt != object:\r\n seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))\r\n else:\r\n raise ValueError(\"input must be 1d or non-empty square 2d array.\")\r\n\r\n if len(seq_of_zeros) == 0:\r\n return 1.0\r\n dt = seq_of_zeros.dtype\r\n a = ones((1,), dtype=dt)\r\n for zero in seq_of_zeros:\r\n a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full')\r\n\r\n if issubclass(a.dtype.type, NX.complexfloating):\r\n # if complex roots are all complex conjugates, the roots are real.\r\n roots = NX.asarray(seq_of_zeros, complex)\r\n if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):\r\n a = a.real.copy()\r\n\r\n return a\r\n\r\n\r\ndef _roots_dispatcher(p):\r\n return p\r\n\r\n\r\n@array_function_dispatch(_roots_dispatcher)\r\ndef roots(p):\r\n \"\"\"\r\n Return the roots of a polynomial with coefficients given in p.\r\n\r\n .. note::\r\n This forms part of the old polynomial API. Since version 1.4, the\r\n new polynomial API defined in `numpy.polynomial` is preferred.\r\n A summary of the differences can be found in the\r\n :doc:`transition guide </reference/routines.polynomials>`.\r\n\r\n The values in the rank-1 array `p` are coefficients of a polynomial.\r\n If the length of `p` is n+1 then the polynomial is described by::\r\n\r\n p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]\r\n\r\n Parameters\r\n ----------\r\n p : array_like\r\n Rank-1 array of polynomial coefficients.\r\n\r\n Returns\r\n -------\r\n out : ndarray\r\n An array containing the roots of the polynomial.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n When `p` cannot be converted to a rank-1 array.\r\n\r\n See also\r\n --------\r\n poly : Find the coefficients of a polynomial with a given sequence\r\n of roots.\r\n polyval : Compute polynomial values.\r\n polyfit : Least squares polynomial fit.\r\n poly1d : A one-dimensional polynomial class.\r\n\r\n Notes\r\n -----\r\n The algorithm relies on computing the eigenvalues of the\r\n companion matrix [1]_.\r\n\r\n References\r\n ----------\r\n .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:\r\n Cambridge University Press, 1999, pp. 146-7.\r\n\r\n Examples\r\n --------\r\n >>> coeff = [3.2, 2, 1]\r\n >>> np.roots(coeff)\r\n array([-0.3125+0.46351241j, -0.3125-0.46351241j])\r\n\r\n \"\"\"\r\n # If input is scalar, this makes it an array\r\n p = atleast_1d(p)\r\n if p.ndim != 1:\r\n raise ValueError(\"Input must be a rank-1 array.\")\r\n\r\n # find non-zero array entries\r\n non_zero = NX.nonzero(NX.ravel(p))[0]\r\n\r\n # Return an empty array if polynomial is all zeros\r\n if len(non_zero) == 0:\r\n return NX.array([])\r\n\r\n # find the number of trailing zeros -- this is the number of roots at 0.\r\n trailing_zeros = len(p) - non_zero[-1] - 1\r\n\r\n # strip leading and trailing zeros\r\n p = p[int(non_zero[0]):int(non_zero[-1])+1]\r\n\r\n # casting: if incoming array isn't floating point, make it floating point.\r\n if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):\r\n p = p.astype(float)\r\n\r\n N = len(p)\r\n if N > 1:\r\n # build companion matrix and find its eigenvalues (the roots)\r\n A = diag(NX.ones((N-2,), p.dtype), -1)\r\n A[0,:] = -p[1:] / p[0]\r\n roots = eigvals(A)\r\n else:\r\n roots = NX.array([])\r\n\r\n # tack any zeros onto the back of the array\r\n roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))\r\n return roots\r\n\r\n\r\ndef _polyint_dispatcher(p, m=None, k=None):\r\n return (p,)\r\n\r\n\r\n@array_function_dispatch(_polyint_dispatcher)\r\ndef polyint(p, m=1, k=None):\r\n \"\"\"\r\n Return an antiderivative (indefinite integral) of a polynomial.\r\n\r\n .. note::\r\n This forms part of the old polynomial API. Since version 1.4, the\r\n new polynomial API defined in `numpy.polynomial` is preferred.\r\n A summary of the differences can be found in the\r\n :doc:`transition guide </reference/routines.polynomials>`.\r\n\r\n The returned order `m` antiderivative `P` of polynomial `p` satisfies\r\n :math:`\\\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`\r\n integration constants `k`. The constants determine the low-order\r\n polynomial part\r\n\r\n .. math:: \\\\frac{k_{m-1}}{0!} x^0 + \\\\ldots + \\\\frac{k_0}{(m-1)!}x^{m-1}\r\n\r\n of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.\r\n\r\n Parameters\r\n ----------\r\n p : array_like or poly1d\r\n Polynomial to integrate.\r\n A sequence is interpreted as polynomial coefficients, see `poly1d`.\r\n m : int, optional\r\n Order of the antiderivative. (Default: 1)\r\n k : list of `m` scalars or scalar, optional\r\n Integration constants. They are given in the order of integration:\r\n those corresponding to highest-order terms come first.\r\n\r\n If ``None`` (default), all constants are assumed to be zero.\r\n If `m = 1`, a single scalar can be given instead of a list.\r\n\r\n See Also\r\n --------\r\n polyder : derivative of a polynomial\r\n poly1d.integ : equivalent method\r\n\r\n Examples\r\n --------\r\n The defining property of the antiderivative:\r\n\r\n >>> p = np.poly1d([1,1,1])\r\n >>> P = np.polyint(p)\r\n >>> P\r\n poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary\r\n >>> np.polyder(P) == p\r\n True\r\n\r\n The integration constants default to zero, but can be specified:\r\n\r\n >>> P = np.polyint(p, 3)\r\n >>> P(0)\r\n 0.0\r\n >>> np.polyder(P)(0)\r\n 0.0\r\n >>> np.polyder(P, 2)(0)\r\n 0.0\r\n >>> P = np.polyint(p, 3, k=[6,5,3])\r\n >>> P\r\n poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary\r\n\r\n Note that 3 = 6 / 2!, and that the constants are given in the order of\r\n integrations. Constant of the highest-order polynomial term comes first:\r\n\r\n >>> np.polyder(P, 2)(0)\r\n 6.0\r\n >>> np.polyder(P, 1)(0)\r\n 5.0\r\n >>> P(0)\r\n 3.0\r\n\r\n \"\"\"\r\n m = int(m)\r\n if m < 0:\r\n raise ValueError(\"Order of integral must be positive (see polyder)\")\r\n if k is None:\r\n k = NX.zeros(m, float)\r\n k = atleast_1d(k)\r\n if len(k) == 1 and m > 1:\r\n k = k[0]*NX.ones(m, float)\r\n if len(k) < m:\r\n raise ValueError(\r\n \"k must be a scalar or a rank-1 array of length 1 or >m.\")\r\n\r\n truepoly = isinstance(p, poly1d)\r\n p = NX.asarray(p)\r\n if m == 0:\r\n if truepoly:\r\n return poly1d(p)\r\n return p\r\n else:\r\n # Note: this must work also with object and integer arrays\r\n y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))\r\n val = polyint(y, m - 1, k=k[1:])\r\n if truepoly:\r\n return poly1d(val)\r\n return val\r\n\r\n\r\ndef _polyder_dispatcher(p, m=None):\r\n return (p,)\r\n\r\n\r\n@array_function_dispatch(_polyder_dispatcher)\r\ndef polyder(p, m=1):\r\n \"\"\"\r\n Return the derivative of the specified order of a polynomial.\r\n\r\n .. note::\r\n This forms part of the old polynomial API. Since version 1.4, the\r\n new polynomial API defined in `numpy.polynomial` is preferred.\r\n A summary of the differences can be found in the\r\n :doc:`transition guide </reference/routines.polynomials>`.\r\n\r\n Parameters\r\n ----------\r\n p : poly1d or sequence\r\n Polynomial to differentiate.\r\n A sequence is interpreted as polynomial coefficients, see `poly1d`.\r\n m : int, optional\r\n Order of differentiation (default: 1)\r\n\r\n Returns\r\n -------\r\n der : poly1d\r\n A new polynomial representing the derivative.\r\n\r\n See Also\r\n --------\r\n polyint : Anti-derivative of a polynomial.\r\n poly1d : Class for one-dimensional polynomials.\r\n\r\n Examples\r\n --------\r\n The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:\r\n\r\n >>> p = np.poly1d([1,1,1,1])\r\n >>> p2 = np.polyder(p)\r\n >>> p2\r\n poly1d([3, 2, 1])\r\n\r\n which evaluates to:\r\n\r\n >>> p2(2.)\r\n 17.0\r\n\r\n We can verify this, approximating the derivative with\r\n ``(f(x + h) - f(x))/h``:\r\n\r\n >>> (p(2. + 0.001) - p(2.)) / 0.001\r\n 17.007000999997857\r\n\r\n The fourth-order derivative of a 3rd-order polynomial is zero:\r\n\r\n >>> np.polyder(p, 2)\r\n poly1d([6, 2])\r\n >>> np.polyder(p, 3)\r\n poly1d([6])\r\n >>> np.polyder(p, 4)\r\n poly1d([0])\r\n\r\n \"\"\"\r\n m = int(m)\r\n if m < 0:\r\n raise ValueError(\"Order of derivative must be positive (see polyint)\")\r\n\r\n truepoly = isinstance(p, poly1d)\r\n p = NX.asarray(p)\r\n n = len(p) - 1\r\n y = p[:-1] * NX.arange(n, 0, -1)\r\n if m == 0:\r\n val = p\r\n else:\r\n val = polyder(y, m - 1)\r\n if truepoly:\r\n val = poly1d(val)\r\n return val\r\n\r\n\r\ndef _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):\r\n return (x, y, w)\r\n\r\n\r\n@array_function_dispatch(_polyfit_dispatcher)\r\ndef polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):\r\n \"\"\"\r\n Least squares polynomial fit.\r\n\r\n .. note::\r\n This forms part of the old polynomial API. Since version 1.4, the\r\n new polynomial API defined in `numpy.polynomial` is preferred.\r\n A summary of the differences can be found in the\r\n :doc:`transition guide </reference/routines.polynomials>`.\r\n\r\n Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`\r\n to points `(x, y)`. Returns a vector of coefficients `p` that minimises\r\n the squared error in the order `deg`, `deg-1`, ... `0`.\r\n\r\n The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class\r\n method is recommended for new code as it is more stable numerically. See\r\n the documentation of the method for more information.\r\n\r\n Parameters\r\n ----------\r\n x : array_like, shape (M,)\r\n x-coordinates of the M sample points ``(x[i], y[i])``.\r\n y : array_like, shape (M,) or (M, K)\r\n y-coordinates of the sample points. Several data sets of sample\r\n points sharing the same x-coordinates can be fitted at once by\r\n passing in a 2D-array that contains one dataset per column.\r\n deg : int\r\n Degree of the fitting polynomial\r\n rcond : float, optional\r\n Relative condition number of the fit. Singular values smaller than\r\n this relative to the largest singular value will be ignored. The\r\n default value is len(x)*eps, where eps is the relative precision of\r\n the float type, about 2e-16 in most cases.\r\n full : bool, optional\r\n Switch determining nature of return value. When it is False (the\r\n default) just the coefficients are returned, when True diagnostic\r\n information from the singular value decomposition is also returned.\r\n w : array_like, shape (M,), optional\r\n Weights. If not None, the weight ``w[i]`` applies to the unsquared\r\n residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are\r\n chosen so that the errors of the products ``w[i]*y[i]`` all have the\r\n same variance. When using inverse-variance weighting, use\r\n ``w[i] = 1/sigma(y[i])``. The default value is None.\r\n cov : bool or str, optional\r\n If given and not `False`, return not just the estimate but also its\r\n covariance matrix. By default, the covariance are scaled by\r\n chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed\r\n to be unreliable except in a relative sense and everything is scaled\r\n such that the reduced chi2 is unity. This scaling is omitted if\r\n ``cov='unscaled'``, as is relevant for the case that the weights are\r\n w = 1/sigma, with sigma known to be a reliable estimate of the\r\n uncertainty.\r\n\r\n Returns\r\n -------\r\n p : ndarray, shape (deg + 1,) or (deg + 1, K)\r\n Polynomial coefficients, highest power first. If `y` was 2-D, the\r\n coefficients for `k`-th data set are in ``p[:,k]``.\r\n\r\n residuals, rank, singular_values, rcond\r\n These values are only returned if ``full == True``\r\n\r\n - residuals -- sum of squared residuals of the least squares fit\r\n - rank -- the effective rank of the scaled Vandermonde\r\n coefficient matrix\r\n - singular_values -- singular values of the scaled Vandermonde\r\n coefficient matrix\r\n - rcond -- value of `rcond`.\r\n\r\n For more details, see `numpy.linalg.lstsq`.\r\n\r\n V : ndarray, shape (M,M) or (M,M,K)\r\n Present only if ``full == False`` and ``cov == True``. The covariance\r\n matrix of the polynomial coefficient estimates. The diagonal of\r\n this matrix are the variance estimates for each coefficient. If y\r\n is a 2-D array, then the covariance matrix for the `k`-th data set\r\n are in ``V[:,:,k]``\r\n\r\n\r\n Warns\r\n -----\r\n RankWarning\r\n The rank of the coefficient matrix in the least-squares fit is\r\n deficient. The warning is only raised if ``full == False``.\r\n\r\n The warnings can be turned off by\r\n\r\n >>> import warnings\r\n >>> warnings.simplefilter('ignore', np.RankWarning)\r\n\r\n See Also\r\n --------\r\n polyval : Compute polynomial values.\r\n linalg.lstsq : Computes a least-squares fit.\r\n scipy.interpolate.UnivariateSpline : Computes spline fits.\r\n\r\n Notes\r\n -----\r\n The solution minimizes the squared error\r\n\r\n .. math::\r\n E = \\\\sum_{j=0}^k |p(x_j) - y_j|^2\r\n\r\n in the equations::\r\n\r\n x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]\r\n x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]\r\n ...\r\n x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]\r\n\r\n The coefficient matrix of the coefficients `p` is a Vandermonde matrix.\r\n\r\n `polyfit` issues a `RankWarning` when the least-squares fit is badly\r\n conditioned. This implies that the best fit is not well-defined due\r\n to numerical error. The results may be improved by lowering the polynomial\r\n degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter\r\n can also be set to a value smaller than its default, but the resulting\r\n fit may be spurious: including contributions from the small singular\r\n values can add numerical noise to the result.\r\n\r\n Note that fitting polynomial coefficients is inherently badly conditioned\r\n when the degree of the polynomial is large or the interval of sample points\r\n is badly centered. The quality of the fit should always be checked in these\r\n cases. When polynomial fits are not satisfactory, splines may be a good\r\n alternative.\r\n\r\n References\r\n ----------\r\n .. [1] Wikipedia, \"Curve fitting\",\r\n https://en.wikipedia.org/wiki/Curve_fitting\r\n .. [2] Wikipedia, \"Polynomial interpolation\",\r\n https://en.wikipedia.org/wiki/Polynomial_interpolation\r\n\r\n Examples\r\n --------\r\n >>> import warnings\r\n >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])\r\n >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])\r\n >>> z = np.polyfit(x, y, 3)\r\n >>> z\r\n array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary\r\n\r\n It is convenient to use `poly1d` objects for dealing with polynomials:\r\n\r\n >>> p = np.poly1d(z)\r\n >>> p(0.5)\r\n 0.6143849206349179 # may vary\r\n >>> p(3.5)\r\n -0.34732142857143039 # may vary\r\n >>> p(10)\r\n 22.579365079365115 # may vary\r\n\r\n High-order polynomials may oscillate wildly:\r\n\r\n >>> with warnings.catch_warnings():\r\n ... warnings.simplefilter('ignore', np.RankWarning)\r\n ... p30 = np.poly1d(np.polyfit(x, y, 30))\r\n ...\r\n >>> p30(4)\r\n -0.80000000000000204 # may vary\r\n >>> p30(5)\r\n -0.99999999999999445 # may vary\r\n >>> p30(4.5)\r\n -0.10547061179440398 # may vary\r\n\r\n Illustration:\r\n\r\n >>> import matplotlib.pyplot as plt\r\n >>> xp = np.linspace(-2, 6, 100)\r\n >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')\r\n >>> plt.ylim(-2,2)\r\n (-2, 2)\r\n >>> plt.show()\r\n\r\n \"\"\"\r\n order = int(deg) + 1\r\n x = NX.asarray(x) + 0.0\r\n y = NX.asarray(y) + 0.0\r\n\r\n # check arguments.\r\n if deg < 0:\r\n raise ValueError(\"expected deg >= 0\")\r\n if x.ndim != 1:\r\n raise TypeError(\"expected 1D vector for x\")\r\n if x.size == 0:\r\n raise TypeError(\"expected non-empty vector for x\")\r\n if y.ndim < 1 or y.ndim > 2:\r\n raise TypeError(\"expected 1D or 2D array for y\")\r\n if x.shape[0] != y.shape[0]:\r\n raise TypeError(\"expected x and y to have same length\")\r\n\r\n # set rcond\r\n if rcond is None:\r\n rcond = len(x)*finfo(x.dtype).eps\r\n\r\n # set up least squares equation for powers of x\r\n lhs = vander(x, order)\r\n rhs = y\r\n\r\n # apply weighting\r\n if w is not None:\r\n w = NX.asarray(w) + 0.0\r\n if w.ndim != 1:\r\n raise TypeError(\"expected a 1-d array for weights\")\r\n if w.shape[0] != y.shape[0]:\r\n raise TypeError(\"expected w and y to have the same length\")\r\n lhs *= w[:, NX.newaxis]\r\n if rhs.ndim == 2:\r\n rhs *= w[:, NX.newaxis]\r\n else:\r\n rhs *= w\r\n\r\n # scale lhs to improve condition number and solve\r\n scale = NX.sqrt((lhs*lhs).sum(axis=0))\r\n lhs /= scale\r\n c, resids, rank, s = lstsq(lhs, rhs, rcond)\r\n c = (c.T/scale).T # broadcast scale coefficients\r\n\r\n # warn on rank reduction, which indicates an ill conditioned matrix\r\n if rank != order and not full:\r\n msg = \"Polyfit may be poorly conditioned\"\r\n warnings.warn(msg, RankWarning, stacklevel=4)\r\n\r\n if full:\r\n return c, resids, rank, s, rcond\r\n elif cov:\r\n Vbase = inv(dot(lhs.T, lhs))\r\n Vbase /= NX.outer(scale, scale)\r\n if cov == \"unscaled\":\r\n fac = 1\r\n else:\r\n if len(x) <= order:\r\n raise ValueError(\"the number of data points must exceed order \"\r\n \"to scale the covariance matrix\")\r\n # note, this used to be: fac = resids / (len(x) - order - 2.0)\r\n # it was deciced that the \"- 2\" (originally justified by \"Bayesian\r\n # uncertainty analysis\") is not was the user expects\r\n # (see gh-11196 and gh-11197)\r\n fac = resids / (len(x) - order)\r\n if y.ndim == 1:\r\n return c, Vbase * fac\r\n else:\r\n return c, Vbase[:,:, NX.newaxis] * fac\r\n else:\r\n return c\r\n\r\n\r\ndef _polyval_dispatcher(p, x):\r\n return (p, x)\r\n\r\n\r\n@array_function_dispatch(_polyval_dispatcher)\r\ndef polyval(p, x):\r\n \"\"\"\r\n Evaluate a polynomial at specific values.\r\n\r\n .. note::\r\n This forms part of the old polynomial API. Since version 1.4, the\r\n new polynomial API defined in `numpy.polynomial` is preferred.\r\n A summary of the differences can be found in the\r\n :doc:`transition guide </reference/routines.polynomials>`.\r\n\r\n If `p` is of length N, this function returns the value:\r\n\r\n ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``\r\n\r\n If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``.\r\n If `x` is another polynomial then the composite polynomial ``p(x(t))``\r\n is returned.\r\n\r\n Parameters\r\n ----------\r\n p : array_like or poly1d object\r\n 1D array of polynomial coefficients (including coefficients equal\r\n to zero) from highest degree to the constant term, or an\r\n instance of poly1d.\r\n x : array_like or poly1d object\r\n A number, an array of numbers, or an instance of poly1d, at\r\n which to evaluate `p`.\r\n\r\n Returns\r\n -------\r\n values : ndarray or poly1d\r\n If `x` is a poly1d instance, the result is the composition of the two\r\n polynomials, i.e., `x` is \"substituted\" in `p` and the simplified\r\n result is returned. In addition, the type of `x` - array_like or\r\n poly1d - governs the type of the output: `x` array_like => `values`\r\n array_like, `x` a poly1d object => `values` is also.\r\n\r\n See Also\r\n --------\r\n poly1d: A polynomial class.\r\n\r\n Notes\r\n -----\r\n Horner's scheme [1]_ is used to evaluate the polynomial. Even so,\r\n for polynomials of high degree the values may be inaccurate due to\r\n rounding errors. Use carefully.\r\n\r\n If `x` is a subtype of `ndarray` the return value will be of the same type.\r\n\r\n References\r\n ----------\r\n .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.\r\n trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand\r\n Reinhold Co., 1985, pg. 720.\r\n\r\n Examples\r\n --------\r\n >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1\r\n 76\r\n >>> np.polyval([3,0,1], np.poly1d(5))\r\n poly1d([76])\r\n >>> np.polyval(np.poly1d([3,0,1]), 5)\r\n 76\r\n >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))\r\n poly1d([76])\r\n\r\n \"\"\"\r\n p = NX.asarray(p)\r\n if isinstance(x, poly1d):\r\n y = 0\r\n else:\r\n x = NX.asanyarray(x)\r\n y = NX.zeros_like(x)\r\n for pv in p:\r\n y = y * x + pv\r\n return y\r\n\r\n\r\ndef _binary_op_dispatcher(a1, a2):\r\n return (a1, a2)\r\n\r\n\r\n@array_function_dispatch(_binary_op_dispatcher)\r\ndef polyadd(a1, a2):\r\n \"\"\"\r\n Find the sum of two polynomials.\r\n\r\n .. note::\r\n This forms part of the old polynomial API. Since version 1.4, the\r\n new polynomial API defined in `numpy.polynomial` is preferred.\r\n A summary of the differences can be found in the\r\n :doc:`transition guide </reference/routines.polynomials>`.\r\n\r\n Returns the polynomial resulting from the sum of two input polynomials.\r\n Each input must be either a poly1d object or a 1D sequence of polynomial\r\n coefficients, from highest to lowest degree.\r\n\r\n Parameters\r\n ----------\r\n a1, a2 : array_like or poly1d object\r\n Input polynomials.\r\n\r\n Returns\r\n -------\r\n out : ndarray or poly1d object\r\n The sum of the inputs. If either input is a poly1d object, then the\r\n output is also a poly1d object. Otherwise, it is a 1D array of\r\n polynomial coefficients from highest to lowest degree.\r\n\r\n See Also\r\n --------\r\n poly1d : A one-dimensional polynomial class.\r\n poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval\r\n\r\n Examples\r\n --------\r\n >>> np.polyadd([1, 2], [9, 5, 4])\r\n array([9, 6, 6])\r\n\r\n Using poly1d objects:\r\n\r\n >>> p1 = np.poly1d([1, 2])\r\n >>> p2 = np.poly1d([9, 5, 4])\r\n >>> print(p1)\r\n 1 x + 2\r\n >>> print(p2)\r\n 2\r\n 9 x + 5 x + 4\r\n >>> print(np.polyadd(p1, p2))\r\n 2\r\n 9 x + 6 x + 6\r\n\r\n \"\"\"\r\n truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))\r\n a1 = atleast_1d(a1)\r\n a2 = atleast_1d(a2)\r\n diff = len(a2) - len(a1)\r\n if diff == 0:\r\n val = a1 + a2\r\n elif diff > 0:\r\n zr = NX.zeros(diff, a1.dtype)\r\n val = NX.concatenate((zr, a1)) + a2\r\n else:\r\n zr = NX.zeros(abs(diff), a2.dtype)\r\n val = a1 + NX.concatenate((zr, a2))\r\n if truepoly:\r\n val = poly1d(val)\r\n return val\r\n\r\n\r\n@array_function_dispatch(_binary_op_dispatcher)\r\ndef polysub(a1, a2):\r\n \"\"\"\r\n Difference (subtraction) of two polynomials.\r\n\r\n .. note::\r\n This forms part of the old polynomial API. Since version 1.4, the\r\n new polynomial API defined in `numpy.polynomial` is preferred.\r\n A summary of the differences can be found in the\r\n :doc:`transition guide </reference/routines.polynomials>`.\r\n\r\n Given two polynomials `a1` and `a2`, returns ``a1 - a2``.\r\n `a1` and `a2` can be either array_like sequences of the polynomials'\r\n coefficients (including coefficients equal to zero), or `poly1d` objects.\r\n\r\n Parameters\r\n ----------\r\n a1, a2 : array_like or poly1d\r\n Minuend and subtrahend polynomials, respectively.\r\n\r\n Returns\r\n -------\r\n out : ndarray or poly1d\r\n Array or `poly1d` object of the difference polynomial's coefficients.\r\n\r\n See Also\r\n --------\r\n polyval, polydiv, polymul, polyadd\r\n\r\n Examples\r\n --------\r\n .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)\r\n\r\n >>> np.polysub([2, 10, -2], [3, 10, -4])\r\n array([-1, 0, 2])\r\n\r\n \"\"\"\r\n truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))\r\n a1 = atleast_1d(a1)\r\n a2 = atleast_1d(a2)\r\n diff = len(a2) - len(a1)\r\n if diff == 0:\r\n val = a1 - a2\r\n elif diff > 0:\r\n zr = NX.zeros(diff, a1.dtype)\r\n val = NX.concatenate((zr, a1)) - a2\r\n else:\r\n zr = NX.zeros(abs(diff), a2.dtype)\r\n val = a1 - NX.concatenate((zr, a2))\r\n if truepoly:\r\n val = poly1d(val)\r\n return val\r\n\r\n\r\n@array_function_dispatch(_binary_op_dispatcher)\r\ndef polymul(a1, a2):\r\n \"\"\"\r\n Find the product of two polynomials.\r\n\r\n .. note::\r\n This forms part of the old polynomial API. Since version 1.4, the\r\n new polynomial API defined in `numpy.polynomial` is preferred.\r\n A summary of the differences can be found in the\r\n :doc:`transition guide </reference/routines.polynomials>`.\r\n\r\n Finds the polynomial resulting from the multiplication of the two input\r\n polynomials. Each input must be either a poly1d object or a 1D sequence\r\n of polynomial coefficients, from highest to lowest degree.\r\n\r\n Parameters\r\n ----------\r\n a1, a2 : array_like or poly1d object\r\n Input polynomials.\r\n\r\n Returns\r\n -------\r\n out : ndarray or poly1d object\r\n The polynomial resulting from the multiplication of the inputs. If\r\n either inputs is a poly1d object, then the output is also a poly1d\r\n object. Otherwise, it is a 1D array of polynomial coefficients from\r\n highest to lowest degree.\r\n\r\n See Also\r\n --------\r\n poly1d : A one-dimensional polynomial class.\r\n poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval\r\n convolve : Array convolution. Same output as polymul, but has parameter\r\n for overlap mode.\r\n\r\n Examples\r\n --------\r\n >>> np.polymul([1, 2, 3], [9, 5, 1])\r\n array([ 9, 23, 38, 17, 3])\r\n\r\n Using poly1d objects:\r\n\r\n >>> p1 = np.poly1d([1, 2, 3])\r\n >>> p2 = np.poly1d([9, 5, 1])\r\n >>> print(p1)\r\n 2\r\n 1 x + 2 x + 3\r\n >>> print(p2)\r\n 2\r\n 9 x + 5 x + 1\r\n >>> print(np.polymul(p1, p2))\r\n 4 3 2\r\n 9 x + 23 x + 38 x + 17 x + 3\r\n\r\n \"\"\"\r\n truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))\r\n a1, a2 = poly1d(a1), poly1d(a2)\r\n val = NX.convolve(a1, a2)\r\n if truepoly:\r\n val = poly1d(val)\r\n return val\r\n\r\n\r\ndef _polydiv_dispatcher(u, v):\r\n return (u, v)\r\n\r\n\r\n@array_function_dispatch(_polydiv_dispatcher)\r\ndef polydiv(u, v):\r\n \"\"\"\r\n Returns the quotient and remainder of polynomial division.\r\n\r\n .. note::\r\n This forms part of the old polynomial API. Since version 1.4, the\r\n new polynomial API defined in `numpy.polynomial` is preferred.\r\n A summary of the differences can be found in the\r\n :doc:`transition guide </reference/routines.polynomials>`.\r\n\r\n The input arrays are the coefficients (including any coefficients\r\n equal to zero) of the \"numerator\" (dividend) and \"denominator\"\r\n (divisor) polynomials, respectively.\r\n\r\n Parameters\r\n ----------\r\n u : array_like or poly1d\r\n Dividend polynomial's coefficients.\r\n\r\n v : array_like or poly1d\r\n Divisor polynomial's coefficients.\r\n\r\n Returns\r\n -------\r\n q : ndarray\r\n Coefficients, including those equal to zero, of the quotient.\r\n r : ndarray\r\n Coefficients, including those equal to zero, of the remainder.\r\n\r\n See Also\r\n --------\r\n poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub\r\n polyval\r\n\r\n Notes\r\n -----\r\n Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need\r\n not equal `v.ndim`. In other words, all four possible combinations -\r\n ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,\r\n ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.\r\n\r\n Examples\r\n --------\r\n .. math:: \\\\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25\r\n\r\n >>> x = np.array([3.0, 5.0, 2.0])\r\n >>> y = np.array([2.0, 1.0])\r\n >>> np.polydiv(x, y)\r\n (array([1.5 , 1.75]), array([0.25]))\r\n\r\n \"\"\"\r\n truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d))\r\n u = atleast_1d(u) + 0.0\r\n v = atleast_1d(v) + 0.0\r\n # w has the common type\r\n w = u[0] + v[0]\r\n m = len(u) - 1\r\n n = len(v) - 1\r\n scale = 1. / v[0]\r\n q = NX.zeros((max(m - n + 1, 1),), w.dtype)\r\n r = u.astype(w.dtype)\r\n for k in range(0, m-n+1):\r\n d = scale * r[k]\r\n q[k] = d\r\n r[k:k+n+1] -= d*v\r\n while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):\r\n r = r[1:]\r\n if truepoly:\r\n return poly1d(q), poly1d(r)\r\n return q, r\r\n\r\n_poly_mat = re.compile(r\"\\*\\*([0-9]*)\")\r\ndef _raise_power(astr, wrap=70):\r\n n = 0\r\n line1 = ''\r\n line2 = ''\r\n output = ' '\r\n while True:\r\n mat = _poly_mat.search(astr, n)\r\n if mat is None:\r\n break\r\n span = mat.span()\r\n power = mat.groups()[0]\r\n partstr = astr[n:span[0]]\r\n n = span[1]\r\n toadd2 = partstr + ' '*(len(power)-1)\r\n toadd1 = ' '*(len(partstr)-1) + power\r\n if ((len(line2) + len(toadd2) > wrap) or\r\n (len(line1) + len(toadd1) > wrap)):\r\n output += line1 + \"\\n\" + line2 + \"\\n \"\r\n line1 = toadd1\r\n line2 = toadd2\r\n else:\r\n line2 += partstr + ' '*(len(power)-1)\r\n line1 += ' '*(len(partstr)-1) + power\r\n output += line1 + \"\\n\" + line2\r\n return output + astr[n:]\r\n\r\n\r\n@set_module('numpy')\r\nclass poly1d:\r\n \"\"\"\r\n A one-dimensional polynomial class.\r\n\r\n .. note::\r\n This forms part of the old polynomial API. Since version 1.4, the\r\n new polynomial API defined in `numpy.polynomial` is preferred.\r\n A summary of the differences can be found in the\r\n :doc:`transition guide </reference/routines.polynomials>`.\r\n\r\n A convenience class, used to encapsulate \"natural\" operations on\r\n polynomials so that said operations may take on their customary\r\n form in code (see Examples).\r\n\r\n Parameters\r\n ----------\r\n c_or_r : array_like\r\n The polynomial's coefficients, in decreasing powers, or if\r\n the value of the second parameter is True, the polynomial's\r\n roots (values where the polynomial evaluates to 0). For example,\r\n ``poly1d([1, 2, 3])`` returns an object that represents\r\n :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns\r\n one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.\r\n r : bool, optional\r\n If True, `c_or_r` specifies the polynomial's roots; the default\r\n is False.\r\n variable : str, optional\r\n Changes the variable used when printing `p` from `x` to `variable`\r\n (see Examples).\r\n\r\n Examples\r\n --------\r\n Construct the polynomial :math:`x^2 + 2x + 3`:\r\n\r\n >>> p = np.poly1d([1, 2, 3])\r\n >>> print(np.poly1d(p))\r\n 2\r\n 1 x + 2 x + 3\r\n\r\n Evaluate the polynomial at :math:`x = 0.5`:\r\n\r\n >>> p(0.5)\r\n 4.25\r\n\r\n Find the roots:\r\n\r\n >>> p.r\r\n array([-1.+1.41421356j, -1.-1.41421356j])\r\n >>> p(p.r)\r\n array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary\r\n\r\n These numbers in the previous line represent (0, 0) to machine precision\r\n\r\n Show the coefficients:\r\n\r\n >>> p.c\r\n array([1, 2, 3])\r\n\r\n Display the order (the leading zero-coefficients are removed):\r\n\r\n >>> p.order\r\n 2\r\n\r\n Show the coefficient of the k-th power in the polynomial\r\n (which is equivalent to ``p.c[-(i+1)]``):\r\n\r\n >>> p[1]\r\n 2\r\n\r\n Polynomials can be added, subtracted, multiplied, and divided\r\n (returns quotient and remainder):\r\n\r\n >>> p * p\r\n poly1d([ 1, 4, 10, 12, 9])\r\n\r\n >>> (p**3 + 4) / p\r\n (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))\r\n\r\n ``asarray(p)`` gives the coefficient array, so polynomials can be\r\n used in all functions that accept arrays:\r\n\r\n >>> p**2 # square of polynomial\r\n poly1d([ 1, 4, 10, 12, 9])\r\n\r\n >>> np.square(p) # square of individual coefficients\r\n array([1, 4, 9])\r\n\r\n The variable used in the string representation of `p` can be modified,\r\n using the `variable` parameter:\r\n\r\n >>> p = np.poly1d([1,2,3], variable='z')\r\n >>> print(p)\r\n 2\r\n 1 z + 2 z + 3\r\n\r\n Construct a polynomial from its roots:\r\n\r\n >>> np.poly1d([1, 2], True)\r\n poly1d([ 1., -3., 2.])\r\n\r\n This is the same polynomial as obtained by:\r\n\r\n >>> np.poly1d([1, -1]) * np.poly1d([1, -2])\r\n poly1d([ 1, -3, 2])\r\n\r\n \"\"\"\r\n __hash__ = None\r\n\r\n @property\r\n def coeffs(self):\r\n \"\"\" The polynomial coefficients \"\"\"\r\n return self._coeffs\r\n\r\n @coeffs.setter\r\n def coeffs(self, value):\r\n # allowing this makes p.coeffs *= 2 legal\r\n if value is not self._coeffs:\r\n raise AttributeError(\"Cannot set attribute\")\r\n\r\n @property\r\n def variable(self):\r\n \"\"\" The name of the polynomial variable \"\"\"\r\n return self._variable\r\n\r\n # calculated attributes\r\n @property\r\n def order(self):\r\n \"\"\" The order or degree of the polynomial \"\"\"\r\n return len(self._coeffs) - 1\r\n\r\n @property\r\n def roots(self):\r\n \"\"\" The roots of the polynomial, where self(x) == 0 \"\"\"\r\n return roots(self._coeffs)\r\n\r\n # our internal _coeffs property need to be backed by __dict__['coeffs'] for\r\n # scipy to work correctly.\r\n @property\r\n def _coeffs(self):\r\n return self.__dict__['coeffs']\r\n @_coeffs.setter\r\n def _coeffs(self, coeffs):\r\n self.__dict__['coeffs'] = coeffs\r\n\r\n # alias attributes\r\n r = roots\r\n c = coef = coefficients = coeffs\r\n o = order\r\n\r\n def __init__(self, c_or_r, r=False, variable=None):\r\n if isinstance(c_or_r, poly1d):\r\n self._variable = c_or_r._variable\r\n self._coeffs = c_or_r._coeffs\r\n\r\n if set(c_or_r.__dict__) - set(self.__dict__):\r\n msg = (\"In the future extra properties will not be copied \"\r\n \"across when constructing one poly1d from another\")\r\n warnings.warn(msg, FutureWarning, stacklevel=2)\r\n self.__dict__.update(c_or_r.__dict__)\r\n\r\n if variable is not None:\r\n self._variable = variable\r\n return\r\n if r:\r\n c_or_r = poly(c_or_r)\r\n c_or_r = atleast_1d(c_or_r)\r\n if c_or_r.ndim > 1:\r\n raise ValueError(\"Polynomial must be 1d only.\")\r\n c_or_r = trim_zeros(c_or_r, trim='f')\r\n if len(c_or_r) == 0:\r\n c_or_r = NX.array([0], dtype=c_or_r.dtype)\r\n self._coeffs = c_or_r\r\n if variable is None:\r\n variable = 'x'\r\n self._variable = variable\r\n\r\n def __array__(self, t=None):\r\n if t:\r\n return NX.asarray(self.coeffs, t)\r\n else:\r\n return NX.asarray(self.coeffs)\r\n\r\n def __repr__(self):\r\n vals = repr(self.coeffs)\r\n vals = vals[6:-1]\r\n return \"poly1d(%s)\" % vals\r\n\r\n def __len__(self):\r\n return self.order\r\n\r\n def __str__(self):\r\n thestr = \"0\"\r\n var = self.variable\r\n\r\n # Remove leading zeros\r\n coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]\r\n N = len(coeffs)-1\r\n\r\n def fmt_float(q):\r\n s = '%.4g' % q\r\n if s.endswith('.0000'):\r\n s = s[:-5]\r\n return s\r\n\r\n for k, coeff in enumerate(coeffs):\r\n if not iscomplex(coeff):\r\n coefstr = fmt_float(real(coeff))\r\n elif real(coeff) == 0:\r\n coefstr = '%sj' % fmt_float(imag(coeff))\r\n else:\r\n coefstr = '(%s + %sj)' % (fmt_float(real(coeff)),\r\n fmt_float(imag(coeff)))\r\n\r\n power = (N-k)\r\n if power == 0:\r\n if coefstr != '0':\r\n newstr = '%s' % (coefstr,)\r\n else:\r\n if k == 0:\r\n newstr = '0'\r\n else:\r\n newstr = ''\r\n elif power == 1:\r\n if coefstr == '0':\r\n newstr = ''\r\n elif coefstr == 'b':\r\n newstr = var\r\n else:\r\n newstr = '%s %s' % (coefstr, var)\r\n else:\r\n if coefstr == '0':\r\n newstr = ''\r\n elif coefstr == 'b':\r\n newstr = '%s**%d' % (var, power,)\r\n else:\r\n newstr = '%s %s**%d' % (coefstr, var, power)\r\n\r\n if k > 0:\r\n if newstr != '':\r\n if newstr.startswith('-'):\r\n thestr = \"%s - %s\" % (thestr, newstr[1:])\r\n else:\r\n thestr = \"%s + %s\" % (thestr, newstr)\r\n else:\r\n thestr = newstr\r\n return _raise_power(thestr)\r\n\r\n def __call__(self, val):\r\n return polyval(self.coeffs, val)\r\n\r\n def __neg__(self):\r\n return poly1d(-self.coeffs)\r\n\r\n def __pos__(self):\r\n return self\r\n\r\n def __mul__(self, other):\r\n if isscalar(other):\r\n return poly1d(self.coeffs * other)\r\n else:\r\n other = poly1d(other)\r\n return poly1d(polymul(self.coeffs, other.coeffs))\r\n\r\n def __rmul__(self, other):\r\n if isscalar(other):\r\n return poly1d(other * self.coeffs)\r\n else:\r\n other = poly1d(other)\r\n return poly1d(polymul(self.coeffs, other.coeffs))\r\n\r\n def __add__(self, other):\r\n other = poly1d(other)\r\n return poly1d(polyadd(self.coeffs, other.coeffs))\r\n\r\n def __radd__(self, other):\r\n other = poly1d(other)\r\n return poly1d(polyadd(self.coeffs, other.coeffs))\r\n\r\n def __pow__(self, val):\r\n if not isscalar(val) or int(val) != val or val < 0:\r\n raise ValueError(\"Power to non-negative integers only.\")\r\n res = [1]\r\n for _ in range(val):\r\n res = polymul(self.coeffs, res)\r\n return poly1d(res)\r\n\r\n def __sub__(self, other):\r\n other = poly1d(other)\r\n return poly1d(polysub(self.coeffs, other.coeffs))\r\n\r\n def __rsub__(self, other):\r\n other = poly1d(other)\r\n return poly1d(polysub(other.coeffs, self.coeffs))\r\n\r\n def __div__(self, other):\r\n if isscalar(other):\r\n return poly1d(self.coeffs/other)\r\n else:\r\n other = poly1d(other)\r\n return polydiv(self, other)\r\n\r\n __truediv__ = __div__\r\n\r\n def __rdiv__(self, other):\r\n if isscalar(other):\r\n return poly1d(other/self.coeffs)\r\n else:\r\n other = poly1d(other)\r\n return polydiv(other, self)\r\n\r\n __rtruediv__ = __rdiv__\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, poly1d):\r\n return NotImplemented\r\n if self.coeffs.shape != other.coeffs.shape:\r\n return False\r\n return (self.coeffs == other.coeffs).all()\r\n\r\n def __ne__(self, other):\r\n if not isinstance(other, poly1d):\r\n return NotImplemented\r\n return not self.__eq__(other)\r\n\r\n\r\n def __getitem__(self, val):\r\n ind = self.order - val\r\n if val > self.order:\r\n return self.coeffs.dtype.type(0)\r\n if val < 0:\r\n return self.coeffs.dtype.type(0)\r\n return self.coeffs[ind]\r\n\r\n def __setitem__(self, key, val):\r\n ind = self.order - key\r\n if key < 0:\r\n raise ValueError(\"Does not support negative powers.\")\r\n if key > self.order:\r\n zr = NX.zeros(key-self.order, self.coeffs.dtype)\r\n self._coeffs = NX.concatenate((zr, self.coeffs))\r\n ind = 0\r\n self._coeffs[ind] = val\r\n return\r\n\r\n def __iter__(self):\r\n return iter(self.coeffs)\r\n\r\n def integ(self, m=1, k=0):\r\n \"\"\"\r\n Return an antiderivative (indefinite integral) of this polynomial.\r\n\r\n Refer to `polyint` for full documentation.\r\n\r\n See Also\r\n --------\r\n polyint : equivalent function\r\n\r\n \"\"\"\r\n return poly1d(polyint(self.coeffs, m=m, k=k))\r\n\r\n def deriv(self, m=1):\r\n \"\"\"\r\n Return a derivative of this polynomial.\r\n\r\n Refer to `polyder` for full documentation.\r\n\r\n See Also\r\n --------\r\n polyder : equivalent function\r\n\r\n \"\"\"\r\n return poly1d(polyder(self.coeffs, m=m))\r\n\r\n# Stuff to do on module import\r\n\r\nwarnings.simplefilter('always', RankWarning)\r\n",
"\r\n\r\n\r\n\r\n\r\nfrom caffe2.python import core\r\nfrom hypothesis import given\r\nimport caffe2.python.hypothesis_test_util as hu\r\nimport numpy as np\r\n\r\n\r\nclass SparseItemwiseDropoutWithReplacementTest(hu.HypothesisTestCase):\r\n @given(**hu.gcs_cpu_only)\r\n def test_no_dropout(self, gc, dc):\r\n X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.int64)\r\n Lengths = np.array([2, 2, 2, 2, 2]).astype(np.int32)\r\n replacement_value = -1\r\n self.ws.create_blob(\"X\").feed(X)\r\n self.ws.create_blob(\"Lengths\").feed(Lengths)\r\n sparse_dropout_op = core.CreateOperator(\r\n \"SparseItemwiseDropoutWithReplacement\", [\"X\", \"Lengths\"], [\"Y\", \"LY\"],\r\n ratio=0.0, replacement_value=replacement_value)\r\n self.ws.run(sparse_dropout_op)\r\n Y = self.ws.blobs[\"Y\"].fetch()\r\n OutputLengths = self.ws.blobs[\"LY\"].fetch()\r\n self.assertListEqual(X.tolist(), Y.tolist(),\r\n \"Values should stay unchanged\")\r\n self.assertListEqual(Lengths.tolist(), OutputLengths.tolist(),\r\n \"Lengths should stay unchanged.\")\r\n\r\n @given(**hu.gcs_cpu_only)\r\n def test_all_dropout(self, gc, dc):\r\n X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.int64)\r\n Lengths = np.array([2, 2, 2, 2, 2]).astype(np.int32)\r\n replacement_value = -1\r\n self.ws.create_blob(\"X\").feed(X)\r\n self.ws.create_blob(\"Lengths\").feed(Lengths)\r\n sparse_dropout_op = core.CreateOperator(\r\n \"SparseItemwiseDropoutWithReplacement\", [\"X\", \"Lengths\"], [\"Y\", \"LY\"],\r\n ratio=1.0, replacement_value=replacement_value)\r\n self.ws.run(sparse_dropout_op)\r\n y = self.ws.blobs[\"Y\"].fetch()\r\n lengths = self.ws.blobs[\"LY\"].fetch()\r\n for elem in y:\r\n self.assertEqual(elem, replacement_value, \"Expected all \\\r\n negative elements when dropout ratio is 1.\")\r\n for length in lengths:\r\n self.assertEqual(length, 2)\r\n self.assertEqual(sum(lengths), len(y))\r\n\r\n @given(**hu.gcs_cpu_only)\r\n def test_all_dropout_empty_input(self, gc, dc):\r\n X = np.array([]).astype(np.int64)\r\n Lengths = np.array([0]).astype(np.int32)\r\n replacement_value = -1\r\n self.ws.create_blob(\"X\").feed(X)\r\n self.ws.create_blob(\"Lengths\").feed(Lengths)\r\n sparse_dropout_op = core.CreateOperator(\r\n \"SparseItemwiseDropoutWithReplacement\", [\"X\", \"Lengths\"], [\"Y\", \"LY\"],\r\n ratio=1.0, replacement_value=replacement_value)\r\n self.ws.run(sparse_dropout_op)\r\n y = self.ws.blobs[\"Y\"].fetch()\r\n lengths = self.ws.blobs[\"LY\"].fetch()\r\n self.assertEqual(len(y), 0, \"Expected no dropout value\")\r\n self.assertEqual(len(lengths), 1, \"Expected single element \\\r\n in lengths array\")\r\n self.assertEqual(lengths[0], 0, \"Expected 0 as sole length\")\r\n self.assertEqual(sum(lengths), len(y))\r\n",
"from torch.fx.graph_module import GraphModule\r\nfrom typing import Any, Callable, Dict, List, Tuple, Type\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nfrom torch.fx._compatibility import compatibility\r\n\r\n\r\n# Matching method matches the attribute name of current version to the attribute name of `target_version`\r\n@compatibility(is_backward_compatible=False)\r\ndef default_matching(name: str, target_version: int) -> str:\r\n \"\"\"Default matching method\r\n \"\"\"\r\n return name\r\n\r\n# This dict maps the nn.Module class name to the attribute name list that we want to fetch for lowering.\r\n# The first integer in the tuple is the version number of the nn.Module class when we create the parameter list.\r\n# If there's a version mismatch then it means the parameter names in the book might be mismatched with nn.Module.\r\nmodule_fetch_book: Dict[Type, Tuple[int, List[str], Callable[[str, int], str]]] = {\r\n torch.nn.modules.linear.Linear: (1, [\"weight\", \"bias\"], default_matching),\r\n torch.nn.modules.conv.Conv2d: (\r\n 1, [\"weight\", \"bias\", \"kernel_size\", \"stride\", \"padding\", \"dilation\", \"groups\", \"padding_mode\"], default_matching\r\n ),\r\n torch.nn.modules.batchnorm.BatchNorm2d: (2, [\"weight\", \"bias\", \"running_mean\", \"running_var\", \"eps\"], default_matching),\r\n torch.nn.modules.pooling.AdaptiveAvgPool2d: (1, [], default_matching),\r\n torch.nn.modules.pooling.MaxPool2d: (\r\n 1, [\"kernel_size\", \"stride\", \"padding\", \"dilation\", \"return_indices\", \"ceil_mode\"], default_matching\r\n ),\r\n torch.nn.modules.activation.ReLU: (1, [\"inplace\"], default_matching),\r\n}\r\n\r\n@compatibility(is_backward_compatible=False)\r\ndef extract_attrs_for_lowering(mod: nn.Module) -> Dict[str, Any]:\r\n \"\"\"If `mod` is in `module_fetch_book`, fetch the mod's attributes that in the `module_fetch_book`\r\n after checking module's version is compatible with the `module_fetch_book`.\r\n \"\"\"\r\n attrs_for_lowering: Dict[str, Any] = {}\r\n attrs_for_lowering[\"name\"] = torch.typename(mod)\r\n\r\n if type(mod) in module_fetch_book:\r\n version, param_to_fetch, matching_method = module_fetch_book[type(mod)]\r\n if version < mod._version:\r\n raise RuntimeError(f\"Fetcher version {version} try to fetch {torch.typename(mod)} version {mod._version}, \"\r\n \"please upgrade the module_fetch_book, open an issue and @842974287 \"\r\n \"or report a bug to AIACC team directly.\")\r\n for attr in param_to_fetch:\r\n attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version))\r\n else:\r\n raise RuntimeError(f\"{torch.typename(mod)} is not in the module_fetch_book yet, \"\r\n \"please add it to the module_fetch_book, open an issue and @842974287 \"\r\n \"or report a bug to AIACC team directly.\")\r\n return attrs_for_lowering\r\n\r\n@compatibility(is_backward_compatible=False)\r\ndef lift_lowering_attrs_to_nodes(fx_module: GraphModule) -> None:\r\n \"\"\"Recursively traverse all `fx_module` nodes and fetch the module's attributes if the node is a leaf module.\r\n \"\"\"\r\n submodules = dict(fx_module.named_modules())\r\n\r\n for node in fx_module.graph.nodes:\r\n if node.op == \"call_module\":\r\n if isinstance(submodules[node.target], GraphModule):\r\n lift_lowering_attrs_to_nodes(submodules[node.target])\r\n else:\r\n node.attrs_for_lowering = extract_attrs_for_lowering(submodules[node.target])\r\n",
"\r\n\r\n\r\n\r\n\r\nimport unittest\r\nimport hypothesis.strategies as st\r\nfrom hypothesis import given, settings\r\nimport numpy as np\r\nfrom caffe2.python import core, workspace\r\nimport caffe2.python.hypothesis_test_util as hu\r\nimport caffe2.python.ideep_test_util as mu\r\n\r\[email protected](not workspace.C.use_mkldnn, \"No MKLDNN support.\")\r\nclass TransposeTest(hu.HypothesisTestCase):\r\n @given(\r\n X=hu.tensor(min_dim=1, max_dim=5, dtype=np.float32), use_axes=st.booleans(), **mu.gcs)\r\n @settings(deadline=None, max_examples=50)\r\n def test_transpose(self, X, use_axes, gc, dc):\r\n ndim = len(X.shape)\r\n axes = np.arange(ndim)\r\n np.random.shuffle(axes)\r\n\r\n if use_axes:\r\n op = core.CreateOperator(\r\n \"Transpose\", [\"X\"], [\"Y\"], axes=axes, device_option=gc)\r\n else:\r\n op = core.CreateOperator(\r\n \"Transpose\", [\"X\"], [\"Y\"], device_option=gc)\r\n\r\n def transpose_ref(X):\r\n if use_axes:\r\n return [np.transpose(X, axes=axes)]\r\n else:\r\n return [np.transpose(X)]\r\n\r\n self.assertReferenceChecks(gc, op, [X], transpose_ref)\r\n self.assertDeviceChecks(dc, op, [X], [0])\r\n self.assertGradientChecks(gc, op, [X], 0, [0])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n",
"\r\n\r\n\r\n\r\n\r\nfrom caffe2.python import core\r\nimport caffe2.python.hypothesis_test_util as hu\r\nimport caffe2.python.serialized_test.serialized_test_util as serial\r\n\r\nimport hypothesis.strategies as st\r\nfrom hypothesis import given, settings\r\nimport numpy as np\r\n\r\n\r\nclass TestFindOperator(serial.SerializedTestCase):\r\n\r\n @given(n=st.sampled_from([1, 4, 8, 31, 79, 150]),\r\n idxsize=st.sampled_from([2, 4, 8, 1000, 5000]),\r\n **hu.gcs)\r\n @settings(deadline=10000)\r\n def test_find(self, n, idxsize, gc, dc):\r\n maxval = 10\r\n\r\n def findop(idx, X):\r\n res = []\r\n for j in list(X.flatten()):\r\n i = np.where(idx == j)[0]\r\n if len(i) == 0:\r\n res.append(-1)\r\n else:\r\n res.append(i[-1])\r\n\r\n print(\"Idx: {} X: {}\".format(idx, X))\r\n print(\"Res: {}\".format(res))\r\n return [np.array(res).astype(np.int32)]\r\n\r\n X = (np.random.rand(n) * maxval).astype(np.int32)\r\n idx = (np.random.rand(idxsize) * maxval).astype(np.int32)\r\n\r\n op = core.CreateOperator(\r\n \"Find\",\r\n [\"idx\", \"X\"],\r\n [\"y\"],\r\n )\r\n\r\n self.assertReferenceChecks(\r\n device_option=gc,\r\n op=op,\r\n inputs=[idx, X],\r\n reference=findop,\r\n )\r\n",
"#!/usr/bin/env python3\r\n\r\nimport caffe2.python.hypothesis_test_util as hu\r\nimport hypothesis.strategies as st\r\nimport numpy as np\r\nimport numpy.testing as npt\r\nfrom caffe2.python import core, workspace\r\nfrom hypothesis import given\r\n\r\n\r\nclass TestUnsafeCoalesceOp(hu.HypothesisTestCase):\r\n @given(\r\n n=st.integers(1, 5),\r\n shape=st.lists(st.integers(0, 5), min_size=1, max_size=3),\r\n **hu.gcs\r\n )\r\n def test_unsafe_coalesce_op(self, n, shape, dc, gc):\r\n workspace.ResetWorkspace()\r\n test_inputs = [(100 * np.random.random(shape)).astype(np.float32) for _ in range(n)]\r\n test_input_blobs = [\"x_{}\".format(i) for i in range(n)]\r\n\r\n coalesce_op = core.CreateOperator(\r\n \"UnsafeCoalesce\",\r\n test_input_blobs,\r\n test_input_blobs + [\"shared_memory_blob\"],\r\n device_option=gc,\r\n )\r\n\r\n def reference_func(*args):\r\n self.assertEquals(len(args), n)\r\n return list(args) + [np.concatenate([x.flatten() for x in args])]\r\n\r\n self.assertReferenceChecks(gc, coalesce_op, test_inputs, reference_func)\r\n\r\n @given(\r\n n=st.integers(1, 5),\r\n shape=st.lists(st.integers(1, 5), min_size=1, max_size=3),\r\n seed=st.integers(0, 65535),\r\n **hu.gcs\r\n )\r\n def test_unsafe_coalesce_op_blob_sharing(self, n, shape, seed, dc, gc):\r\n workspace.ResetWorkspace()\r\n # Can make debugging of the test more predictable\r\n np.random.seed(seed)\r\n test_inputs = [(np.random.random(shape)).astype(np.float32) for _ in range(n)]\r\n test_input_blobs = [\"x_{}\".format(i) for i in range(n)]\r\n\r\n coalesce_op = core.CreateOperator(\r\n \"UnsafeCoalesce\",\r\n test_input_blobs,\r\n test_input_blobs + [\"shared_memory_blob\"],\r\n device_option=gc,\r\n )\r\n for name, value in zip(test_input_blobs, test_inputs):\r\n workspace.FeedBlob(name, value, device_option=gc)\r\n\r\n workspace.RunOperatorOnce(coalesce_op)\r\n blob_value = workspace.blobs[\"shared_memory_blob\"]\r\n npt.assert_almost_equal(\r\n blob_value,\r\n np.concatenate([x.flatten() for x in test_inputs]),\r\n decimal=4\r\n )\r\n # np.random generates values in range [0, 1), so -2 is outside of range\r\n blob_value.fill(-2.0)\r\n self.assertTrue((blob_value != workspace.blobs[\"shared_memory_blob\"]).all())\r\n workspace.FeedBlob(\"shared_memory_blob\", blob_value, device_option=gc)\r\n\r\n # All blobs preserved shape, but got overwritted to -2\r\n for name, value in zip(test_input_blobs, test_inputs):\r\n self.assertEqual(value.shape, workspace.blobs[name].shape)\r\n self.assertTrue((value != workspace.blobs[name]).all())\r\n self.assertTrue((workspace.blobs[name] == -2).all())\r\n\r\n # It should be OK to reuse operator as long as it's blob shapes are not changing\r\n workspace.RunOperatorOnce(coalesce_op)\r\n",
"\r\n\r\n\r\n\r\n\r\nfrom caffe2.python import core\r\nfrom collections import defaultdict, Counter\r\nfrom hypothesis import given, settings\r\nimport caffe2.python.hypothesis_test_util as hu\r\nimport caffe2.python.serialized_test.serialized_test_util as serial\r\nimport hypothesis.strategies as st\r\nimport numpy as np\r\n\r\nimport unittest\r\n\r\nDEFAULT_BEAM_WIDTH = 10\r\nDEFAULT_PRUNE_THRESHOLD = 0.001\r\n\r\n\r\nclass TestCTCBeamSearchDecoderOp(serial.SerializedTestCase):\r\n @given(\r\n batch=st.sampled_from([1, 2, 4]),\r\n max_time=st.sampled_from([1, 8, 64]),\r\n alphabet_size=st.sampled_from([1, 2, 32, 128, 512]),\r\n beam_width=st.sampled_from([1, 2, 16, None]),\r\n num_candidates=st.sampled_from([1, 2]),\r\n **hu.gcs_cpu_only\r\n )\r\n @settings(deadline=None, max_examples=30)\r\n def test_ctc_beam_search_decoder(\r\n self, batch, max_time, alphabet_size, beam_width, num_candidates, gc, dc\r\n ):\r\n if not beam_width:\r\n beam_width = DEFAULT_BEAM_WIDTH\r\n op_seq_len = core.CreateOperator('CTCBeamSearchDecoder',\r\n ['INPUTS', 'SEQ_LEN'],\r\n ['OUTPUT_LEN', 'VALUES', 'OUTPUT_PROB'],\r\n num_candidates=num_candidates)\r\n\r\n op_no_seq_len = core.CreateOperator('CTCBeamSearchDecoder',\r\n ['INPUTS'],\r\n ['OUTPUT_LEN', 'VALUES', 'OUTPUT_PROB'],\r\n num_candidates=num_candidates)\r\n else:\r\n num_candidates = min(num_candidates, beam_width)\r\n op_seq_len = core.CreateOperator('CTCBeamSearchDecoder',\r\n ['INPUTS', 'SEQ_LEN'],\r\n ['OUTPUT_LEN', 'VALUES', 'OUTPUT_PROB'],\r\n beam_width=beam_width,\r\n num_candidates=num_candidates)\r\n\r\n op_no_seq_len = core.CreateOperator('CTCBeamSearchDecoder',\r\n ['INPUTS'],\r\n ['OUTPUT_LEN', 'VALUES', 'OUTPUT_PROB'],\r\n beam_width=beam_width,\r\n num_candidates=num_candidates)\r\n\r\n def input_generater():\r\n inputs = np.random.rand(max_time, batch, alphabet_size)\\\r\n .astype(np.float32)\r\n seq_len = np.random.randint(1, max_time + 1, size=batch)\\\r\n .astype(np.int32)\r\n return inputs, seq_len\r\n\r\n def ref_ctc_decoder(inputs, seq_len):\r\n output_len = np.zeros(batch * num_candidates, dtype=np.int32)\r\n output_prob = np.zeros(batch * num_candidates, dtype=np.float32)\r\n val = np.array([]).astype(np.int32)\r\n\r\n for i in range(batch):\r\n Pb, Pnb = defaultdict(Counter), defaultdict(Counter)\r\n Pb[0][()] = 1\r\n Pnb[0][()] = 0\r\n A_prev = [()]\r\n ctc = inputs[:, i, :]\r\n ctc = np.vstack((np.zeros(alphabet_size), ctc))\r\n len_i = seq_len[i] if seq_len is not None else max_time\r\n\r\n for t in range(1, len_i + 1):\r\n pruned_alphabet = np.where(ctc[t] > DEFAULT_PRUNE_THRESHOLD)[0]\r\n for l in A_prev:\r\n for c in pruned_alphabet:\r\n if c == 0:\r\n Pb[t][l] += ctc[t][c] * (Pb[t - 1][l] + Pnb[t - 1][l])\r\n else:\r\n l_plus = l + (c,)\r\n if len(l) > 0 and c == l[-1]:\r\n Pnb[t][l_plus] += ctc[t][c] * Pb[t - 1][l]\r\n Pnb[t][l] += ctc[t][c] * Pnb[t - 1][l]\r\n else:\r\n Pnb[t][l_plus] += \\\r\n ctc[t][c] * (Pb[t - 1][l] + Pnb[t - 1][l])\r\n\r\n if l_plus not in A_prev:\r\n Pb[t][l_plus] += \\\r\n ctc[t][0] * \\\r\n (Pb[t - 1][l_plus] + Pnb[t - 1][l_plus])\r\n Pnb[t][l_plus] += ctc[t][c] * Pnb[t - 1][l_plus]\r\n\r\n A_next = Pb[t] + Pnb[t]\r\n A_prev = sorted(A_next, key=A_next.get, reverse=True)\r\n A_prev = A_prev[:beam_width]\r\n\r\n candidates = A_prev[:num_candidates]\r\n index = 0\r\n for candidate in candidates:\r\n val = np.hstack((val, candidate))\r\n output_len[i * num_candidates + index] = len(candidate)\r\n output_prob[i * num_candidates + index] = Pb[t][candidate] + Pnb[t][candidate]\r\n index += 1\r\n\r\n return [output_len, val, output_prob]\r\n\r\n def ref_ctc_decoder_max_time(inputs):\r\n return ref_ctc_decoder(inputs, None)\r\n\r\n inputs, seq_len = input_generater()\r\n\r\n self.assertReferenceChecks(\r\n device_option=gc,\r\n op=op_seq_len,\r\n inputs=[inputs, seq_len],\r\n reference=ref_ctc_decoder,\r\n )\r\n\r\n self.assertReferenceChecks(\r\n device_option=gc,\r\n op=op_no_seq_len,\r\n inputs=[inputs],\r\n reference=ref_ctc_decoder_max_time,\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import random\r\n random.seed(2603)\r\n unittest.main()\r\n"
] | [
[
"numpy.random.rand"
],
[
"numpy.testing.assert_array_equal",
"numpy.random.rand"
],
[
"numpy.random.rand"
],
[
"torch.fx.operator_schemas.normalize_function",
"torch.fx.operator_schemas.normalize_module",
"torch.typename"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.testing.assert_equal",
"numpy.random.seed",
"numpy.min",
"numpy.max",
"numpy.random.rand"
],
[
"numpy.ones"
],
[
"numpy.random.seed",
"numpy.stack",
"numpy.full",
"numpy.testing.assert_array_equal",
"numpy.random.randn",
"numpy.sum"
],
[
"torch.distributed.PrefixStore"
],
[
"torch.distributions.transforms.ExpTransform",
"torch.full_like",
"torch.distributions.uniform.Uniform",
"torch.finfo",
"torch.distributions.utils.broadcast_all",
"torch.ones_like",
"torch.distributions.transforms.AffineTransform"
],
[
"torch.is_tensor",
"torch.cat"
],
[
"numpy.distutils.misc_util.make_temp_file",
"numpy.distutils.log.debug",
"numpy.distutils.misc_util.is_sequence",
"numpy.distutils.log.warn",
"numpy.distutils.log.info"
],
[
"numpy.maximum",
"numpy.minimum",
"numpy.abs",
"numpy.arange",
"numpy.random.rand",
"numpy.floor",
"numpy.array"
],
[
"numpy.true_divide",
"numpy.can_cast",
"numpy.compat.pickle.loads",
"numpy.minimum",
"numpy.datetime_as_string",
"numpy.isnat",
"numpy.subtract.accumulate",
"numpy.dtype",
"numpy.zeros_like",
"numpy.iinfo",
"numpy.busday_offset",
"numpy.busday_count",
"numpy.negative",
"numpy.bool_",
"numpy.divide",
"numpy.testing.assert_equal",
"numpy.datetime_data",
"numpy.ones_like",
"numpy.greater",
"numpy.testing.suppress_warnings",
"numpy.arange",
"numpy.empty_like",
"numpy.less",
"numpy.subtract",
"numpy.compat.pickle.dumps",
"numpy.greater_equal",
"numpy.less_equal",
"numpy.bytes_",
"numpy.zeros",
"numpy.testing.assert_raises_regex",
"numpy.busdaycalendar",
"numpy.isnan",
"numpy.timedelta64",
"numpy.int64",
"numpy.testing.assert_raises",
"numpy.equal",
"numpy.fmax",
"numpy.subtract.reduce",
"numpy.subtract.reduceat",
"numpy.argsort",
"numpy.not_equal",
"numpy.array",
"numpy.testing.assert_warns",
"numpy.positive",
"numpy.fmin",
"numpy.maximum.reduce",
"numpy.absolute",
"numpy.maximum",
"numpy.isfinite",
"numpy.datetime64",
"numpy.testing.assert_array_equal",
"numpy.ones",
"numpy.is_busday",
"numpy.sign",
"numpy.add",
"numpy.isinf",
"numpy.empty"
],
[
"torch.distributions.utils.probs_to_logits",
"torch._C._get_tracing_state",
"torch.Size",
"torch.distributions.utils.logits_to_probs",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.no_grad",
"torch.rand",
"torch.finfo",
"torch.distributions.utils.broadcast_all"
],
[
"torch.ao.quantization.utils.get_combined_dict"
],
[
"numpy.linalg.eigvals",
"numpy.core.numeric.array",
"numpy.lib.type_check.real",
"numpy.lib.type_check.imag",
"numpy.core.numeric.allclose",
"numpy.core.numeric.asanyarray",
"numpy.core.numeric.logical_or.accumulate",
"numpy.core.numeric.zeros_like",
"numpy.core.numeric.arange",
"numpy.lib.twodim_base.vander",
"numpy.core.numeric.concatenate",
"numpy.core.numeric.zeros",
"numpy.core.array",
"numpy.core.overrides.set_module",
"numpy.core.dot",
"numpy.core.finfo",
"numpy.linalg.lstsq",
"numpy.core.abs",
"numpy.core.numeric.ravel",
"numpy.core.numeric.convolve",
"numpy.lib.type_check.iscomplex",
"numpy.core.atleast_1d",
"numpy.lib.type_check.mintypecode",
"numpy.lib.function_base.trim_zeros",
"numpy.core.isscalar",
"numpy.core.numeric.asarray",
"numpy.core.numeric.outer",
"numpy.core.numeric.sort",
"numpy.core.ones",
"numpy.core.numeric.ones"
],
[
"numpy.array"
],
[
"torch.fx._compatibility.compatibility",
"torch.typename"
],
[
"numpy.arange",
"numpy.random.shuffle",
"numpy.transpose"
],
[
"numpy.array",
"numpy.where",
"numpy.random.rand"
],
[
"numpy.random.random",
"numpy.random.seed"
],
[
"numpy.hstack",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.24",
"1.22",
"1.23"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.10",
"1.12",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LothairKizardjian/EfficientNeuralSearch | [
"45dd9c052fb74f9bb56efd9b914761dafb1a7ac9"
] | [
"enas/src/chess/data_utils.py"
] | [
"import os\nimport sys\nimport _pickle as pickle\nimport numpy as np\nimport tensorflow as tf\nimport chess.pgn\nimport pgn_tensors_utils\nimport bz2\n\ndef read_data(data_path, num_valids=20000):\n print(\"-\" * 80)\n print(\"Reading data\")\n\n nb_games = 200\n #nb_games = sys.maxsize\n boards, labels, results = {}, {}, {}\n\n train_files = [\n \"pgn_games/chess_games_2000.pgn\",\n \"pgn_games/chess_games_2001.pgn\",\n \"pgn_games/chess_games_2002.pgn\",\n \"pgn_games/chess_games_2003.pgn\",\n \"pgn_games/chess_games_2004.pgn\"\n ]\n test_file = [ \n \"pgn_games/chess_games_2005.pgn\"\n ]\n boards[\"train\"], labels[\"train\"], results[\"train\"] = load_data(data_path, train_files, nb_games)\n\n num_valids = int(len(boards[\"train\"])*0.1)\n \n if num_valids:\n boards[\"valid\"] = boards[\"train\"][-num_valids:]\n labels[\"valid\"] = labels[\"train\"][-num_valids:]\n results[\"valid\"]= results[\"train\"][-num_valids:]\n\n boards[\"train\"] = boards[\"train\"][:-num_valids]\n labels[\"train\"] = labels[\"train\"][:-num_valids]\n results[\"train\"]= results[\"train\"][:-num_valids]\n else:\n boards[\"valid\"], labels[\"valid\"], results[\"valid\"] = None, None, None\n\n boards[\"test\"], labels[\"test\"], results[\"test\"] = load_data(data_path, test_file, nb_games)\n\n return boards, results\n\ndef load_pgn_from_bz2(bz2_path):\n bz2_file_exist = os.path.isfile(bz2_path)\n if bz2_file_exist == False:\n print('File {} not found'.format(bz2_path))\n return 0\n with open(bz2_path, 'rb') as source, open(bz2_path.replace('.bz2',''), 'wb') as dest:\n dest.write(bz2.decompress(source.read()))\n return 1\n \ndef load_games_from_pgn_path(pgn_path,game_nb):\n pgn_file_exist = os.path.isfile(pgn_path)\n if pgn_file_exist == False:\n if(load_pgn_from_bz2(pgn_path+'.bz2')==0): \n print('File {} not found'.format(pgn_path))\n return 0\n pgn = open(pgn_path)\n return load_games_from_pgn(pgn,game_nb)\n\ndef load_games_from_pgn(pgn,game_nb):\n name = pgn.name.split('/')[3].replace('.pgn','')\n print('Loading games for pgn {} ...'.format(name))\n games = []\n game = chess.pgn.read_game(pgn)\n counter = 0\n \n while game != None and counter < game_nb:\n games.append(game)\n game = chess.pgn.read_game(pgn)\n counter+=1\n \n print(\"{} games loaded\".format(counter))\n return games\n \n\ndef _load_data(data_path,pgn_path,game_nb):\n '''\n Load the tensors, the according labels and store them in a .npy file\n '''\n suffixe = pgn_path.split('/') \n suffixe = suffixe[1].replace('.pgn','')\n tensors_file_exist = os.path.isfile(data_path+'/tensors/tensors_numpy_{}games_{}.npy'.format(game_nb,suffixe))\n labels_files_exist = os.path.isfile(data_path+'/tensors/labels_numpy_{}games_{}.npy'.format(game_nb,suffixe))\n\n \n print(\"Loading data for {}\".format(suffixe))\n \n if tensors_file_exist == False or labels_files_exist == False: \n full_name = os.path.join(data_path,pgn_path)\n games = load_games_from_pgn_path(full_name,game_nb)\n print(\"loading tensors and according labels ...\")\n tensors,labels,results = pgn_tensors_utils.tensors_labels_from_games(games)\n np.save(data_path+'/tensors/labels_numpy_{}games_{}.npy'.format(game_nb,suffixe), labels)\n np.save(data_path+'/tensors/tensors_numpy_{}games_{}.npy'.format(game_nb,suffixe), tensors)\n np.save(data_path+'/tensors/results_numpy_{}games_{}.npy'.format(game_nb,suffixe), results)\n else:\n tensors = np.load(data_path+'/tensors/tensors_numpy_{}games_{}.npy'.format(game_nb,suffixe))\n labels = np.load(data_path+'/tensors/labels_numpy_{}games_{}.npy'.format(game_nb,suffixe)) \n results = np.load(data_path+'/tensors/results_numpy_{}games_{}.npy'.format(game_nb,suffixe))\n return tensors,labels,results\n\ndef load_data(data_path,paths,game_nb):\n print(\"Loading data ...\")\n tensors = []\n labels = []\n results = []\n for pgn_path in paths:\n t,l,r = _load_data(data_path,pgn_path,game_nb)\n for row in t:\n tensors.append(row)\n for row in l:\n labels.append(row)\n for row in r:\n results.append(row)\n \n tensors = np.asarray(tensors)\n labels = np.asarray(labels)\n results = np.asarray(results)\n \n tensors = np.concatenate(tensors, axis=0)\n tensors = np.reshape(tensors, [-1, 7, 8, 8])\n tensors = np.transpose(tensors, [0, 2, 3, 1])\n \n return tensors,labels,results\n\ndef select_random_examples(tensors,labels,nb_ex):\n if len(tensors) != len(labels) :\n raise('Tensors and labels have different length')\n else:\n samples = np.random.choice(len(tensors), size=nb_ex, replace=False)\n x = tensors[samples]\n y = labels[samples]\n return x,y\n"
] | [
[
"numpy.asarray",
"numpy.concatenate",
"numpy.reshape",
"numpy.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NSLS-II-ISS/isstools | [
"54102529384f2c76633ca0393d637225a4104f93",
"54102529384f2c76633ca0393d637225a4104f93"
] | [
"isstools/widgets/widget_camera.py",
"isstools/widgets/widget_johann_tools.py"
] | [
"import datetime\nfrom timeit import default_timer as timer\n\nimport numpy as np\nimport pkg_resources\nfrom PyQt5 import uic, QtWidgets, QtCore\nfrom PyQt5.QtCore import QThread, QSettings\n\nfrom matplotlib.backends.backend_qt5agg import (\n FigureCanvasQTAgg as FigureCanvas,\n NavigationToolbar2QT as NavigationToolbar)\nfrom matplotlib.figure import Figure\nimport matplotlib.patches as patches\nimport time\n\nimport bluesky.plan_stubs as bps\nfrom xas.xray import generate_energy_grid\n\nfrom isstools.dialogs.BasicDialogs import question_message_box, message_box\nfrom isstools.elements.figure_update import update_figure\nfrom isstools.elements.parameter_handler import parse_plan_parameters, return_parameters_from_widget\nfrom isstools.widgets import widget_energy_selector\nfrom isstools.elements.batch_motion import SamplePositioner\nimport time as ttime\nfrom isstools.widgets import widget_sample_positioner\nfrom isstools.widgets import widget_sample_registry\n# from isstools.process_callbacks.callback import run_router\n\n\nui_path = pkg_resources.resource_filename('isstools', 'ui/ui_camera.ui')\n\nclass UICamera(*uic.loadUiType(ui_path)):\n def __init__(self,\n camera_dict={},\n sample_stage = {},\n sample_positioner=None,\n RE = None,\n parent_gui = None,\n sample_registry=None,\n *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n self.setupUi(self)\n # beamline controls\n self.camera_dict = camera_dict\n self.sample_stage = sample_stage\n self.RE = RE\n self.parent = parent_gui\n\n # figure management\n self.addCanvas()\n self.cid = self.canvas_c1.mpl_connect('button_press_event', self.set_hcursor)\n self.cid = self.canvas_c2.mpl_connect('button_press_event', self.set_vcursor)\n self.cid = self.canvas_qr.mpl_connect('button_press_event', self.set_qr_roi)\n self.h_vc = None\n self.h_hc = None\n\n self.qr_roi_patch = None\n self.qr_hlines = None\n self.qr_vlines = None\n\n # taking images management\n self.push_show_image.clicked.connect(self.show_image)\n self.timer_track_camera = QtCore.QTimer(self)\n self.timer_track_camera.setInterval(1000)\n self.timer_track_camera.timeout.connect(self.track_camera)\n\n # stage positioning management\n self.push_stage_up.clicked.connect(self.stage_up)\n self.push_stage_down.clicked.connect(self.stage_down)\n self.push_stage_left.clicked.connect(self.stage_left)\n self.push_stage_right.clicked.connect(self.stage_right)\n self.push_update_stage_parking.clicked.connect(self.update_stage_parking)\n self.push_park_stage.clicked.connect(self.park_stage)\n self.push_update_sample_parking.clicked.connect(self.update_sample_parking)\n\n self.sample_registry=sample_registry\n\n self.sample_positioner = sample_positioner\n self.settings = parent_gui.settings\n self.widget_sample_positioner = widget_sample_positioner.UISamplePositioner(parent=self,\n settings=self.settings,\n RE=RE,\n sample_positioner=sample_positioner)\n self.layout_sample_positioner.addWidget(self.widget_sample_positioner)\n\n self.widget_sample_registry = widget_sample_registry.UISampleRegistry(parent=self,\n settings=self.settings,\n RE=RE,\n sample_registry=sample_registry)\n self.layout_sample_registry.addWidget(self.widget_sample_registry)\n\n # persistence management\n\n # stage_park_x = self.settings.value('stage_park_x', defaultValue=0, type=float)\n # stage_park_y = self.settings.value('stage_park_y', defaultValue=0, type=float)\n\n stage_park_x = sample_positioner.stage_park_x\n stage_park_y = sample_positioner.stage_park_y\n self.spinBox_stage_x.setValue(stage_park_x)\n self.spinBox_stage_y.setValue(stage_park_y)\n\n # sample_park_x = self.settings.value('sample_park_x', defaultValue=0, type=float)\n # sample_park_y = self.settings.value('sample_park_y', defaultValue=0, type=float)\n sample_park_x = sample_positioner.stage_park_x + sample_positioner.delta_first_holder_x\n sample_park_y = sample_positioner.stage_park_y + sample_positioner.delta_first_holder_y\n self.spinBox_sample_x.setValue(sample_park_x)\n self.spinBox_sample_y.setValue(sample_park_y)\n\n self.beam_x_position_on_camera = self.settings.value('beam_x_position_on_camera', defaultValue=250)\n self.beam_y_position_on_camera = self.settings.value('beam_y_position_on_camera', defaultValue=250)\n\n\n x1 = self.settings.value('qr_roi_x1', defaultValue=0, type=int)\n x2 = self.settings.value('qr_roi_x2', defaultValue=0, type=int)\n y1 = self.settings.value('qr_roi_y1', defaultValue=0, type=int)\n y2 = self.settings.value('qr_roi_y2', defaultValue=0, type=int)\n self.qr_roi = [(x1, y1), [x2, y2]]\n\n # sample positioner handle\n\n\n # get pictures on the GUI upon opening\n self.show_image()\n #self.timer_track_camera.start()\n\n # self.pushButton_sreg_get_start.clicked.connect(self.set_start_sreg_points)\n # self.pushButton_sreg_get_end.clicked.connect(self.set_end_sreg_points)\n # self.pushButton_sreg_initialize.clicked.connect(self.sreg_initialize)\n # self.pushButton_sreg_reset.clicked.connect(self.sreg_reset)\n # self.pushButton_sreg_move_to_beg.clicked.connect(self.sreg_move_to_beginning)\n # self.pushButton_sreg_move_to_next.clicked.connect(self.sreg_move_to_next)\n # self.pushButton_sreg_move_to_unexposed.clicked.connect(self.sreg_move_to_unexposed)\n # self.pushButton_sreg_save.clicked.connect(self.sreg_save_to_file)\n # self.pushButton_sreg_load.clicked.connect(self.sreg_load_file)\n # def _save_sample_index_settings(self):\n # self.settings.setValue('index_stack', self.spinBox_index_stack.value())\n # self.settings.setValue('index_holder', self.spinBox_index_holder.value())\n # self.settings.setValue('index_sample', self.spinBox_index_sample.value())\n\n\n\n def addCanvas(self):\n self.figure_c1 = Figure()\n self.figure_c1.set_facecolor(color='#FcF9F6')\n self.canvas_c1 = FigureCanvas(self.figure_c1)\n self.figure_c1.ax = self.figure_c1.add_subplot(111)\n\n self.figure_c1.tight_layout()\n self.toolbar_c1 = NavigationToolbar(self.canvas_c1, self, coordinates=True)\n self.plot_camera1.addWidget(self.toolbar_c1)\n self.plot_camera1.addWidget(self.canvas_c1)\n self.canvas_c1.draw_idle()\n\n self.figure_c2 = Figure()\n self.figure_c2.set_facecolor(color='#FcF9F6')\n self.canvas_c2 = FigureCanvas(self.figure_c2)\n self.figure_c2.ax = self.figure_c2.add_subplot(111)\n\n self.figure_c2.tight_layout()\n self.toolbar_c2 = NavigationToolbar(self.canvas_c2, self, coordinates=True)\n self.plot_camera2.addWidget(self.toolbar_c2)\n self.plot_camera2.addWidget(self.canvas_c2)\n self.canvas_c2.draw_idle()\n\n self.figure_qr = Figure()\n self.figure_qr.set_facecolor(color='#FcF9F6')\n self.canvas_qr = FigureCanvas(self.figure_qr)\n self.figure_qr.ax = self.figure_qr.add_subplot(111)\n\n self.figure_qr.tight_layout()\n self.toolbar_qr = NavigationToolbar(self.canvas_qr, self, coordinates=True)\n self.plot_camera_qr.addWidget(self.toolbar_qr)\n self.plot_camera_qr.addWidget(self.canvas_qr)\n self.canvas_qr.draw_idle()\n\n\n # def show_image(self, camera_):\n\n\n def show_image(self):\n if self.push_track.isChecked():\n self.timer_track_camera.start()\n else:\n self.timer_track_camera.singleShot(0, self.track_camera)\n\n def track_camera(self):\n init_time = ttime.time()\n camera1 = self.camera_dict['camera_sample1']\n camera2 = self.camera_dict['camera_sample2']\n camera_qr = self.camera_dict['camera_sample4']\n image1 = camera1.image.image\n image2 = camera2.image.image\n image_qr = camera_qr.image.image\n vmin1, vmax1 = np.percentile(image1, 5), np.percentile(image1, 90)\n vmin2, vmax2 = np.percentile(image2, 5), np.percentile(image2, 90)\n vminqr, vmaxqr = np.percentile(image_qr, 5), np.percentile(image_qr, 90)\n print(f'Got images from PV {ttime.time()-init_time}')\n self.figure_c1.ax.imshow(image1, cmap='gray', vmin=vmin1, vmax=vmax1)\n self.figure_c2.ax.imshow(image2, cmap='gray', vmin=vmin2, vmax=vmax2)\n self.figure_qr.ax.imshow(image_qr, cmap='gray', origin='lower', vmin=vminqr, vmax=vmaxqr)\n print(f'Imshow {ttime.time() - init_time}')\n # beam position from previous session\n self._set_vcursor()\n self._set_hcursor()\n self.plot_qr_roi()\n # pretty cross\n # self.set_qr_cursor()\n\n self.canvas_c1.draw_idle()\n self.canvas_c2.draw_idle()\n self.canvas_qr.draw_idle()\n print(f'Done with images {ttime.time() - init_time}')\n\n\n def stage_up(self):\n v_step = self.spinBox_ver_step.value()\n self.RE(bps.mvr(self.sample_stage.y, v_step))\n self.show_image()\n\n def stage_down(self):\n v_step = self.spinBox_ver_step.value()\n self.RE(bps.mvr(self.sample_stage.y, -v_step))\n self.show_image()\n\n def stage_right(self):\n h_step = self.spinBox_hor_step.value()\n self.RE(bps.mvr(self.sample_stage.x, -h_step))\n self.show_image()\n\n def stage_left(self):\n h_step = self.spinBox_hor_step.value()\n self.RE(bps.mvr(self.sample_stage.x, h_step))\n self.show_image()\n\n\n def park_stage(self):\n # stage_x = self.spinBox_zero_x_rbk.value()\n # stage_y = self.spinBox_zero_y_rbk.value()\n # self.RE(bps.mv(self.sample_stage.x, stage_x))\n # self.RE(bps.mv(self.sample_stage.y, stage_y))\n self.sample_positioner.goto_park()\n self.show_image()\n\n\n def update_stage_parking(self):\n\n ret = question_message_box(self, 'Stage Parking Update',\n ('Are you sure you want to update stage parking position?\\n' +\n 'You may need to recalibrate the stage/sample positioning'))\n if ret:\n stage_park_x = self.sample_stage.x.read()[self.sample_stage.x.name]['value']\n stage_park_y = self.sample_stage.y.read()[self.sample_stage.y.name]['value']\n\n self.spinBox_stage_x.setValue(stage_park_x)\n self.spinBox_stage_y.setValue(stage_park_y)\n\n self.settings.setValue('stage_park_x', stage_park_x)\n self.settings.setValue('stage_park_y', stage_park_y)\n\n sample_park_x = self.spinBox_sample_x.value()\n sample_park_y = self.spinBox_sample_y.value()\n\n self.sample_positioner = SamplePositioner(self.RE,\n self.sample_stage,\n stage_park_x,\n stage_park_y,\n delta_first_holder_x=sample_park_x - stage_park_x,\n delta_first_holder_y=sample_park_y - stage_park_y)\n\n def update_sample_parking(self):\n\n ret = question_message_box(self, 'Sample Parking Update',\n ('Are you sure you want to update sample parking position?\\n' +\n 'You may need to recalibrate the stage/sample positioning'))\n if ret:\n sample_park_x = self.sample_stage.x.read()[self.sample_stage.x.name]['value']\n sample_park_y = self.sample_stage.y.read()[self.sample_stage.y.name]['value']\n\n self.spinBox_sample_x.setValue(sample_park_x)\n self.spinBox_sample_y.setValue(sample_park_y)\n\n self.settings.setValue('sample_park_x', sample_park_x)\n self.settings.setValue('sample_park_y', sample_park_y)\n\n stage_park_x = self.spinBox_stage_x.value()\n stage_park_y = self.spinBox_stage_y.value()\n\n self.sample_positioner = SamplePositioner(self.RE,\n self.sample_stage,\n stage_park_x,\n stage_park_y,\n delta_first_holder_x=sample_park_x - stage_park_x,\n delta_first_holder_y=sample_park_y - stage_park_y)\n\n # def zero_stage(self):\n # camera_qr = self.camera_dict['camera_sample4']\n # image_qr = camera_qr.image.image\n # qr_codes = pzDecode(image_qr)\n # if qr_codes:\n # for qr_code in qr_codes:\n # qr_text = qr_code.data.decode('utf8')\n # if qr_text == '0 position':\n # # self.label_qrcode.setText(qr_text)\n #\n # # print('qr code center:',\n # # qr_code.rect.left + qr_code.rect.width/2,\n # # qr_code.rect.top + qr_code.rect.height/2)\n # # print('qr code should be moved by these pixels:',\n # # qr_code.rect.left + qr_code.rect.width/2 - self.spinBox_zero_x.value(),\n # # qr_code.rect.top + qr_code.rect.height/2 - self.spinBox_zero_y.value())\n #\n # delta_x, delta_y = shift_stage_to_zero( qr_code.rect.left + qr_code.rect.width/2,\n # qr_code.rect.top + qr_code.rect.height/2,\n # self.spinBox_zero_x.value(),\n # self.spinBox_zero_y.value())\n # print('moving the giant_xy stage by (', delta_x, ',', delta_y, ')')\n # self.RE(bps.mvr(self.sample_stage.x, delta_x))\n # self.RE(bps.mvr(self.sample_stage.y, delta_y))\n # self.show_image()\n #\n # self.sample_x_zero_pos = self.sample_stage.x.read()[self.sample_stage.x.name]['value']\n # self.sample_y_zero_pos = self.sample_stage.y.read()[self.sample_stage.y.name]['value']\n #\n # self.spinBox_zero_x_rbk.setValue(self.sample_x_zero_pos)\n # self.spinBox_zero_y_rbk.setValue(self.sample_y_zero_pos)\n #\n # # need to change the (delta_first_holder_x, delta_first_holder_y) upon update\n # self.sample_positioner = SamplePositioner(self.sample_x_zero_pos,\n # self.sample_y_zero_pos,\n # 10.0, # delta_first_holder_x\n # 10.0, # delta_first_holder_y\n # self.RE,\n # self.sample_stage)\n #\n # self.settings.setValue('sample_stage_zero_x_pix', self.spinBox_zero_x.value())\n # self.settings.setValue('sample_stage_zero_y_pix', self.spinBox_zero_y.value())\n # self.settings.setValue('sample_stage_zero_x_rbk', self.spinBox_zero_x_rbk.value())\n # self.settings.setValue('sample_stage_zero_y_rbk', self.spinBox_zero_y_rbk.value())\n\n\n\n\n def move_to_sample(self):\n self._save_sample_index_settings()\n index_stack = self.spinBox_index_stack.value()\n index_holder = self.spinBox_index_holder.value()\n index_sample = self.spinBox_index_sample.value()\n self.sample_positioner.goto_sample(index_stack, index_holder, index_sample)\n self.RE(bps.sleep(0.1))\n self.show_image()\n\n\n def set_qr_roi(self, event):\n if event.button == 3:\n x, y = int(event.xdata), int(event.ydata)\n if self.qr_roi is None:\n self.qr_roi = [(x, y)]\n else:\n if len(self.qr_roi) == 1:\n self.qr_roi.append((x, y))\n self.settings.setValue('qr_roi_x1', self.qr_roi[0][0])\n self.settings.setValue('qr_roi_x2', self.qr_roi[1][0])\n self.settings.setValue('qr_roi_y1', self.qr_roi[0][1])\n self.settings.setValue('qr_roi_y2', self.qr_roi[1][1])\n\n elif len(self.qr_roi) == 2:\n self.qr_roi = [(x, y)]\n self.show_image()\n\n\n\n def plot_qr_roi(self):\n if self.qr_vlines:\n self.qr_vlines.remove()\n if self.qr_hlines:\n self.qr_hlines.remove()\n try:\n if self.qr_roi_patch:\n self.qr_roi_patch.remove()\n except ValueError:\n pass\n\n if self.qr_roi:\n xlim = self.figure_qr.ax.get_xlim()\n ylim = self.figure_qr.ax.get_ylim()\n xs = [i[0] for i in self.qr_roi]\n ys = [i[1] for i in self.qr_roi]\n self.qr_vlines = self.figure_qr.ax.vlines(xs, ylim[0], ylim[1], linestyles='--', colors='r', linewidths=0.5)\n self.qr_hlines = self.figure_qr.ax.hlines(ys, xlim[0], xlim[1], linestyles='--', colors='r', linewidths=0.5)\n if len(self.qr_roi) == 2:\n x1, x2 = self.qr_roi[0][0], self.qr_roi[1][0]\n y1, y2 = self.qr_roi[0][1], self.qr_roi[1][1]\n rect = patches.Rectangle((min(x1, x2), min(y1, y2)), abs(x1-x2), abs(y1-y2), linewidth=1, edgecolor='r', facecolor='none')\n\n self.qr_roi_patch = self.figure_qr.ax.add_patch(rect)\n\n\n\n\n def set_vcursor(self, event):\n # wrapper for separation of event and xdata\n if event.button == 3:\n self.beam_x_position_on_camera = event.xdata\n self.settings.setValue('beam_x_position_on_camera', self.beam_x_position_on_camera)\n self._set_vcursor()\n\n def _set_vcursor(self):\n xdata = self.beam_x_position_on_camera\n if self.h_vc:\n self.h_vc.remove()\n y1, y2 = self.figure_c2.ax.get_ylim()\n self.h_vc = self.figure_c2.ax.vlines(xdata, y1,y2, color = 'green' )\n self.canvas_c2.draw_idle()\n\n\n def set_hcursor(self, event):\n # wrapper for separation of event and ydata\n if event.button == 3:\n self.beam_y_position_on_camera = event.ydata\n self.settings.setValue('beam_y_position_on_camera', self.beam_y_position_on_camera)\n self._set_hcursor()\n\n\n def _set_hcursor(self):\n ydata = self.beam_y_position_on_camera\n if self.h_hc:\n self.h_hc.remove()\n x1, x2 = self.figure_c1.ax.get_xlim()\n self.h_hc = self.figure_c1.ax.hlines(ydata, x1, x2, color='green')\n self.canvas_c1.draw_idle()\n\n\n\n\n\n\n # def set_qr_cursor(self):\n # color = [0.0, 0.7, 0.0]\n # y_lo, y_hi = self.figure_qr.ax.get_xlim()\n # x_lo, x_hi = self.figure_qr.ax.get_ylim()\n # if self.qr_vc:\n # self.qr_vc.remove()\n # if self.qr_hc:\n # self.qr_hc.remove()\n #\n # self.qr_vc = self.figure_qr.ax.vlines(self.spinBox_zero_x.value(), x_lo, x_hi, colors=color, linewidths=0.5)\n # self.qr_hc = self.figure_qr.ax.hlines(self.spinBox_zero_y.value(), y_lo, y_hi, colors=color, linewidths=0.5)\n # self.figure_qr.ax.set_xlim(y_low, y_high)\n # self.figure_qr.ax.set_ylim(x_low, x_high)\n\n\n\n\n\n\n",
"import json\nimport pkg_resources\nfrom PyQt5 import uic, QtWidgets, QtCore\nfrom isstools.widgets import widget_emission_energy_selector\nimport bluesky.plan_stubs as bps\nfrom xas.spectrometer import Crystal\nimport pandas as pd\nui_path = pkg_resources.resource_filename('isstools', 'ui/ui_johann_spectrometer.ui')\nfrom isstools.elements.figure_update import update_figure_with_colorbar, update_figure, setup_figure\n# from isstools.dialogs import (MoveMotorDialog)\nfrom xas.spectrometer import analyze_elastic_scan\nimport os\nfrom isstools.dialogs.BasicDialogs import message_box\nimport numpy as np\nfrom xas.spectrometer import analyze_many_elastic_scans\nfrom xas.fitting import Nominal2ActualConverter\nfrom bluesky.callbacks import LivePlot\n\n\nclass UIJohannTools(*uic.loadUiType(ui_path)):\n def __init__(self, parent=None,\n db=None,\n RE=None,\n motor_dictionary=None,\n detector_dictionary=None,\n aux_plan_funcs=None,\n service_plan_funcs=None,\n embedded_run_scan_func=None,\n figure_proc=None,\n canvas_proc=None,\n toolbar_proc=None,\n *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n self.setupUi(self)\n self.parent = parent\n self.motor_dictionary = motor_dictionary\n self.detector_dictionary = detector_dictionary\n self.aux_plan_funcs = aux_plan_funcs\n self.service_plan_funcs = service_plan_funcs\n self.RE = RE\n self.db = db\n\n self.motor_emission = self.motor_dictionary['motor_emission']['object']\n\n self.figure_proc = figure_proc,\n self.canvas_proc = canvas_proc,\n self.toolbar_proc = toolbar_proc\n\n self._run_any_scan = embedded_run_scan_func\n\n self.settings = parent.parent.settings\n self._cur_alignment_motor = None\n\n self.widget_emission_energy = widget_emission_energy_selector.UIEmissionEnergySelector(parent=self)\n self.layout_emission_energy.addWidget(self.widget_emission_energy)\n\n self.push_update_crystal_parking.clicked.connect(self.update_crystal_parking)\n self.push_park_crystal.clicked.connect(self.park_crystal)\n self.push_set_default_soft_limits.clicked.connect(self.set_default_soft_limits)\n self.push_compute_crystal_position.clicked.connect(self.compute_crystal_position)\n self.push_move_crystal.clicked.connect(self.move_crystal)\n self.comboBox_johann_tweak_motor.currentIndexChanged.connect(self.update_tweak_motor)\n\n self.push_tweak_down.clicked.connect(self.tweak_down)\n self.push_tweak_up.clicked.connect(self.tweak_up)\n\n for chan in self.detector_dictionary['Pilatus 100k']['channels']:\n self.comboBox_pilatus_channels.addItem(chan)\n\n self.comboBox_pilatus_channels.setCurrentIndex(self.settings.value('johann_alignment_pilatus_channel', defaultValue=0, type=int))\n self.comboBox_pilatus_channels.currentIndexChanged.connect(self.update_pilatus_channel_selection)\n\n self.push_scan_crystal_y.clicked.connect(self.scan_crystal_y)\n self.push_scan_energy.clicked.connect(self.scan_energy)\n\n self.spinBox_crystal_park_x.setValue(self.settings.value('johann_crystal_park_x', defaultValue=0, type=float))\n self.spinBox_crystal_park_y.setValue(self.settings.value('johann_crystal_park_y', defaultValue=0, type=float))\n\n self.spinBox_bragg_angle_nom.setValue(self.settings.value('johann_bragg_angle_nom', defaultValue=0))\n self.spinBox_crystal_nom_x.setValue(self.settings.value('johann_crystal_x_nom', defaultValue=0))\n self.spinBox_crystal_nom_y.setValue(self.settings.value('johann_crystal_y_nom', defaultValue=0))\n self.spinBox_det_nom_y.setValue(self.settings.value('johann_det_nom_y', defaultValue=0))\n self.spinBox_crystal_stage_nom_x.setValue(self.settings.value('crystal_stage_nom_x', defaultValue=0))\n self.spinBox_crystal_stage_nom_y.setValue(self.settings.value('crystal_stage_nom_y', defaultValue=0))\n\n self.update_tweak_motor()\n\n _tweak_motor_list = [self.comboBox_johann_tweak_motor.itemText(i)\n for i in range(self.comboBox_johann_tweak_motor.count())]\n self._alignment_data = pd.DataFrame(columns=_tweak_motor_list + ['fwhm', 'ecen', 'uid'])\n self._calibration_data = pd.DataFrame(columns=['energy_nom', 'energy_act', 'fwhm', 'uid'])\n\n\n self.edit_reg_E.setText(self.settings.value('johann_registration_energy', defaultValue='9000'))\n self.edit_reg_E_lo.setText(self.settings.value('johann_registration_energy_lo', defaultValue='8900'))\n self.edit_reg_E_hi.setText(self.settings.value('johann_registration_energy_hi', defaultValue='9100'))\n self.edit_reg_E.textChanged.connect(self.update_soft_energy_limits)\n self.edit_reg_E_lo.textChanged.connect(self.update_settings_reg_E_lo)\n self.edit_reg_E_hi.textChanged.connect(self.update_settings_reg_E_hi)\n\n self.lineEdit_current_spectrometer_file.setText(self.settings.value('johann_registration_file_str', defaultValue=''))\n self.lineEdit_current_spectrometer_file.textChanged.connect(self.update_settings_current_spectrometer_file)\n\n self.align_motor_dict = {'Crystal X' : 'auxxy_x',\n 'Bender' : 'bender',\n 'Crystal Z' : 'usermotor1'}\n\n self.push_initialize_emission_motor.clicked.connect(self.initialize_emission_motor)\n self.push_save_emission_motor.clicked.connect(self.save_emission_motor)\n self.push_select_config_file.clicked.connect(self.select_config_file)\n self.push_load_emission_motor.clicked.connect(self.load_emission_motor)\n self.push_calibrate_energy.clicked.connect(self.calibrate_energy)\n self.push_save_calibration.clicked.connect(self.save_energy_calibration)\n self.push_select_calibration.clicked.connect(self.select_energy_calibration_file)\n self.push_load_calibration.clicked.connect(self.load_energy_calibration)\n\n self.edit_E_calib_min.textChanged.connect(self.update_settings_calib_E_min)\n\n\n\n\n self.lineEdit_current_calibration_file.setText(self.settings.value('johann_calibration_file_str', defaultValue=''))\n self.lineEdit_current_calibration_file.textChanged.connect(self.update_settings_current_calibration_file)\n\n self._update_crystal_info()\n\n\n\n def update_crystal_parking(self):\n park_x = self.motor_dictionary['auxxy_x']['object'].user_readback.get()\n park_y = self.motor_dictionary['auxxy_y']['object'].user_readback.get()\n self.spinBox_crystal_park_x.setValue(park_x)\n self.spinBox_crystal_park_y.setValue(park_y)\n self.settings.setValue('johann_crystal_park_x', park_x)\n self.settings.setValue('johann_crystal_park_y', park_y)\n\n\n def park_crystal(self):\n x = self.spinBox_crystal_park_x.value()\n y = self.spinBox_crystal_park_y.value()\n\n self.RE(bps.mv(self.motor_dictionary['auxxy_x']['object'], x))\n self.RE(bps.mv(self.motor_dictionary['auxxy_y']['object'], y))\n\n\n def set_default_soft_limits(self, *, dx_hi=50, dx_lo=50, dy_hi=50, dy_lo=50):\n motor_x = self.motor_dictionary['auxxy_x']['object']\n motor_y = self.motor_dictionary['auxxy_y']['object']\n\n x_cur = motor_x.user_readback.get()\n y_cur = motor_y.user_readback.get()\n\n motor_x.high_limit_travel.put(x_cur + dx_hi)\n motor_x.low_limit_travel.put(x_cur - dx_lo)\n\n motor_y.high_limit_travel.put(y_cur + dy_hi)\n motor_y.low_limit_travel.put(y_cur - dy_lo)\n\n\n def compute_crystal_position(self):\n energy = float(self.widget_emission_energy.edit_E.text())\n self._update_crystal_info()\n R = float(self.widget_emission_energy.edit_crystal_R.text())\n cr = Crystal(R, 100, self._hkl, self._kind)\n cr.place_E(energy)\n bragg_angle = cr.ba_deg\n cr_x = cr.x\n cr_y = cr.y\n det_y = cr.d_y\n cr_x_stage = self.spinBox_crystal_park_x.value() + (R - cr_x)\n cr_y_stage = self.spinBox_crystal_park_y.value() + cr_y\n\n self.spinBox_bragg_angle_nom.setValue(bragg_angle)\n self.spinBox_crystal_nom_x.setValue(cr_x)\n self.spinBox_crystal_nom_y.setValue(cr_y)\n self.spinBox_det_nom_y.setValue(det_y)\n\n self.spinBox_crystal_stage_nom_x.setValue(cr_x_stage)\n self.spinBox_crystal_stage_nom_y.setValue(cr_y_stage)\n\n self.settings.setValue('johann_bragg_angle_nom', bragg_angle)\n self.settings.setValue('johann_crystal_x_nom', cr_x)\n self.settings.setValue('johann_crystal_y_nom', cr_y)\n self.settings.setValue('johann_det_nom_y', det_y)\n\n self.settings.setValue('crystal_stage_nom_x', cr_x_stage)\n self.settings.setValue('crystal_stage_nom_y', cr_y_stage)\n\n def _update_crystal_info(self):\n self._R = float(self.widget_emission_energy.edit_crystal_R.text())\n self._kind = self.widget_emission_energy.comboBox_crystal_kind.currentText()\n _reflection = self.widget_emission_energy.lineEdit_reflection.text()\n self._hkl = [int(i) for i in _reflection[1:-1].split(',')]\n\n\n def move_crystal(self):\n motor_x = self.motor_dictionary['auxxy_x']['object']\n motor_y = self.motor_dictionary['auxxy_y']['object']\n\n x = self.spinBox_crystal_stage_nom_x.value()\n y = self.spinBox_crystal_stage_nom_y.value()\n\n self.RE(bps.mv(motor_x, x))\n self.RE(bps.mv(motor_y, y))\n\n def update_tweak_motor(self):\n value = self.comboBox_johann_tweak_motor.currentText()\n if value == 'Crystal X':\n motor = self.motor_dictionary['auxxy_x']['object']\n step_size = 2.5\n elif value == 'Bender':\n motor = self.motor_dictionary['bender']['object']\n step_size = 5\n elif value == 'Crystal Z':\n motor = self.motor_dictionary['usermotor1']['object']\n step_size = 2.5\n\n self.doubleSpinBox_tweak_motor_step.setValue(step_size)\n pos = motor.user_readback.get()\n self.doubleSpinBox_tweak_motor_pos.setValue(pos)\n self._cur_alignment_motor = motor\n\n def update_pilatus_channel_selection(self):\n idx = self.comboBox_pilatus_channels.currentIndex()\n self.settings.setValue('johann_alignment_pilatus_channel', idx)\n\n def tweak_up(self):\n self._tweak(1)\n\n def tweak_down(self):\n self._tweak(-1)\n\n def _tweak(self, direction):\n motor = self._cur_alignment_motor\n step = self.doubleSpinBox_tweak_motor_step.value()\n self.RE(bps.mvr(motor, direction * step))\n pos = motor.user_readback.get()\n self.doubleSpinBox_tweak_motor_pos.setValue(pos)\n\n def scan_crystal_y(self):\n detector = self.detector_dictionary['Pilatus 100k']['device']\n channel = self.comboBox_pilatus_channels.currentText()\n motor = self.motor_dictionary['auxxy_y']['object']\n scan_range = self.doubleSpinBox_range_crystal_y.value()\n scan_step = self.doubleSpinBox_step_crystal_y.value()\n self._run_any_scan(detector, channel, motor, scan_range, scan_step)\n\n\n def scan_energy(self):\n detector = self.detector_dictionary['Pilatus 100k']['device']\n channel = self.comboBox_pilatus_channels.currentText()\n motor = self.motor_dictionary['hhm_energy']['object']\n scan_range = self.doubleSpinBox_range_energy.value()\n scan_step = self.doubleSpinBox_step_energy.value()\n uids = self._run_any_scan(detector, channel, motor, scan_range, scan_step)\n self.analyze_resolution_scan(uids)\n\n\n\n def analyze_resolution_scan(self, uids):\n uid = uids[0]\n Ecen, fwhm, I_cor, I_fit, I_fit_raw, E = analyze_elastic_scan(self.db, uid)\n print(f'Peak width: {fwhm}\\n Estimated resolution: {np.sqrt(fwhm**2 - (1.3e-4 * Ecen)**2)}')\n data_dict = {}\n for k in self.align_motor_dict.keys():\n proper_key = self.align_motor_dict[k]\n motor = self.motor_dictionary[proper_key]['object']\n data_dict[k] = motor.user_readback.get()\n data_dict['fwhm'] = fwhm\n data_dict['ecen'] = Ecen\n data_dict['uid'] = uid\n\n self._alignment_data = self._alignment_data.append(data_dict, ignore_index=True)\n\n self.parent.update_scan_figure_for_energy_scan(E, I_fit_raw)\n key = self.comboBox_johann_tweak_motor.currentText()\n self.parent.update_proc_figure(key)\n\n def update_soft_energy_limits(self, *, dE_lo=50, dE_hi=50):\n current_value = float(self.edit_reg_E.text())\n e_lo = current_value - dE_lo\n e_hi = current_value + dE_hi\n self.edit_reg_E_lo.setText(str(e_lo))\n self.edit_reg_E_hi.setText(str(e_hi))\n self.settings.setValue('johann_registration_energy', str(current_value))\n\n\n def update_settings_reg_E_lo(self):\n e_lo = self.edit_reg_E_lo.text()\n self.settings.setValue('johann_registration_energy_lo', str(e_lo))\n\n def update_settings_reg_E_hi(self):\n e_hi = self.edit_reg_E_hi.text()\n self.settings.setValue('johann_registration_energy_hi', str(e_hi))\n\n\n def update_settings_current_spectrometer_file(self):\n value = self.lineEdit_current_spectrometer_file.text()\n self.settings.setValue('johann_registration_file_str', value)\n\n def _initialize_emission_motor(self, registration_energy, R, kind, hkl, cr_x0=None, cr_y0=None, det_y0=None, energy_limits=None):\n self.motor_emission.define_motor_coordinates(registration_energy, R, kind, hkl,\n cr_x0=cr_x0, cr_y0=cr_y0, det_y0=det_y0, energy_limits=energy_limits)\n self.parent.parent.widget_info_beamline.push_set_emission_energy.setEnabled(1)\n\n def initialize_emission_motor(self):\n registration_energy = float(self.edit_reg_E.text())\n R = self._R\n kind = self._kind\n hkl = self._hkl\n\n energy_limits_lo = float(self.edit_reg_E_lo.text())\n energy_limits_hi = float(self.edit_reg_E_hi.text())\n energy_limits = (energy_limits_lo, energy_limits_hi)\n\n self._initialize_emission_motor(registration_energy, R, kind, hkl, energy_limits=energy_limits)\n print('Successfully initialized the emission motor')\n\n def save_emission_motor(self):\n user_folder_path = (self.motor_dictionary['motor_emission']['object'].spectrometer_root_path +\n f\"/{self.RE.md['year']}/{self.RE.md['cycle']}/{self.RE.md['PROPOSAL']}\")\n filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save spectrometer motor config...', user_folder_path, '*.jcfg',\n options=QtWidgets.QFileDialog.DontConfirmOverwrite)[0]\n if not filename.endswith('.jcfg'):\n filename = filename + '.jcfg'\n print(filename)\n\n spectrometer_dict = {}\n\n spectrometer_dict['registration_energy'] = self.motor_emission.energy0\n spectrometer_dict['R'] = self.motor_emission.crystal.R\n spectrometer_dict['kind'] = self.motor_emission.crystal.kind\n spectrometer_dict['hkl'] = self.motor_emission.crystal.hkl\n spectrometer_dict['cr_x0'] = self.motor_emission.cr_x0\n spectrometer_dict['cr_y0'] = self.motor_emission.cr_y0\n spectrometer_dict['det_y0'] = self.motor_emission.det_y0\n spectrometer_dict['energy_limits_lo'] = self.motor_emission.energy.limits[0]\n spectrometer_dict['energy_limits_hi'] = self.motor_emission.energy.limits[1]\n\n with open(filename, 'w') as f:\n f.write(json.dumps(spectrometer_dict))\n print('Successfully saved the spectrometer config')\n self.lineEdit_current_spectrometer_file.setText(filename)\n # self.settings.setValue('johann_registration_file_str', filename)\n\n def select_config_file(self):\n user_folder_path = (self.motor_emission.spectrometer_root_path +\n f\"/{self.RE.md['year']}/{self.RE.md['cycle']}/{self.RE.md['PROPOSAL']}\")\n filename = QtWidgets.QFileDialog.getOpenFileName(directory=user_folder_path,\n filter='*.jcfg', parent=self)[0]\n self.lineEdit_current_spectrometer_file.setText(filename)\n # self.settings.setValue('johann_registration_file_str', filename)\n\n def load_emission_motor(self):\n filename = self.lineEdit_current_spectrometer_file.text()\n print(filename)\n if filename:\n with open(filename, 'r') as f:\n spectrometer_dict = json.loads(f.read())\n energy_limits = (spectrometer_dict['energy_limits_lo'], spectrometer_dict['energy_limits_hi'])\n self._initialize_emission_motor(spectrometer_dict['registration_energy'],\n spectrometer_dict['R'],\n spectrometer_dict['kind'],\n spectrometer_dict['hkl'],\n cr_x0=spectrometer_dict['cr_x0'],\n cr_y0=spectrometer_dict['cr_y0'],\n det_y0=spectrometer_dict['det_y0'],\n energy_limits=energy_limits)\n\n print('Successfully loaded the spectrometer config')\n\n def update_settings_calib_E_min(self):\n value = self.edit_E_calib_min.text()\n self.settings.setValue('johann_calibration_energy_min', str(value))\n\n def update_settings_calib_E_max(self):\n value = self.edit_E_calib_max.text()\n self.settings.setValue('johann_calibration_energy_max', str(value))\n\n def update_settings_calib_E_step(self):\n value = self.edit_E_calib_step.text()\n self.settings.setValue('johann_calibration_energy_step', str(value))\n\n\n def calibrate_energy(self):\n # update the data\n self._calibration_data = pd.DataFrame(columns=['energy_nom', 'energy_act', 'resolution', 'uid'])\n\n e_min = float(self.edit_E_calib_min.text())\n e_max = float(self.edit_E_calib_max.text())\n e_step = float(self.edit_E_calib_step.text())\n if e_min>e_max:\n message_box('Incorrect energy range','Calibration energy min should be less than max')\n return\n if (e_max - e_min) < e_step:\n message_box('Incorrect energy range','energy step size bigger than range')\n return\n\n energies = np.arange(e_min, e_max + e_step, e_step)\n each_scan_range = self.doubleSpinBox_range_energy.value()\n each_scan_step = self.doubleSpinBox_step_energy.value()\n\n plan = self.service_plan_funcs['johann_calibration_scan_plan'](energies, DE=each_scan_range, dE=each_scan_step)\n channel = self.comboBox_pilatus_channels.currentText()\n motor = self.motor_dictionary['hhm_energy']['object']\n uids = self.RE(plan, LivePlot(channel, motor.name, ax=self.parent.figure_scan.ax))\n\n energy_converter, energies_act, resolutions, I_fit_raws = analyze_many_elastic_scans(self.db, uids, energies, short_output=False)\n self.motor_emission.append_energy_converter(energy_converter)\n\n for each_energy_nom, each_energy_act, each_resolution, each_uid in zip(energies, energies_act, resolutions, uids):\n data_dict = {'energy_nom' : each_energy_nom,\n 'energy_act' : each_energy_act,\n 'resolution' : each_resolution,\n 'uid' : each_uid}\n self._calibration_data = self._calibration_data.append(data_dict, ignore_index=True)\n self.parent.update_proc_figure('calibration')\n\n\n\n def save_energy_calibration(self):\n user_folder_path = (self.motor_dictionary['motor_emission']['object'].spectrometer_root_path +\n f\"/{self.RE.md['year']}/{self.RE.md['cycle']}/{self.RE.md['PROPOSAL']}\")\n filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save spectrometer motor config...', user_folder_path, '*.jcalib',\n options=QtWidgets.QFileDialog.DontConfirmOverwrite)[0]\n if not filename.endswith('.jcalib'):\n filename = filename + '.jcalib'\n print(filename)\n\n self._calibration_data.to_json(filename)\n print('Successfully saved the spectrometer calibration')\n\n def select_energy_calibration_file(self):\n user_folder_path = (self.motor_emission.spectrometer_root_path +\n f\"/{self.RE.md['year']}/{self.RE.md['cycle']}/{self.RE.md['PROPOSAL']}\")\n filename = QtWidgets.QFileDialog.getOpenFileName(directory=user_folder_path,\n filter='*.jcalib', parent=self)[0]\n self.lineEdit_current_calibration_file.setText(filename)\n\n def update_settings_current_calibration_file(self):\n value = self.lineEdit_current_calibration_file.text()\n self.settings.setValue('johann_calibration_file_str', value)\n\n def load_energy_calibration(self):\n filename = self.lineEdit_current_calibration_file.text()\n self._calibration_data = pd.read_json(filename)\n energies_nom = self._calibration_data['energy_nom'].values\n energies_act = self._calibration_data['energy_act'].values\n energy_converter = Nominal2ActualConverter(energies_nom, energies_act)\n self.motor_emission.append_energy_converter(energy_converter)\n print('Successfully loaded the spectrometer calibration')\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"numpy.percentile",
"matplotlib.figure.Figure"
],
[
"numpy.arange",
"numpy.sqrt",
"pandas.read_json",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
hlin09/metrics | [
"cceced613f4323a1f5124099a969f2cf32a80d7e"
] | [
"tests/classification/test_auc.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom collections import namedtuple\nfrom functools import partial\n\nimport numpy as np\nimport pytest\nfrom sklearn.metrics import auc as _sk_auc\nfrom torch import tensor\n\nfrom tests.helpers import seed_all\nfrom tests.helpers.testers import NUM_BATCHES, MetricTester\nfrom torchmetrics.classification.auc import AUC\nfrom torchmetrics.functional import auc\n\nseed_all(42)\n\n\ndef sk_auc(x, y, reorder=False):\n x = x.flatten()\n y = y.flatten()\n if reorder:\n idx = np.argsort(x, kind='stable')\n x = x[idx]\n y = y[idx]\n return _sk_auc(x, y)\n\n\nInput = namedtuple('Input', [\"x\", \"y\"])\n\n_examples = []\n# generate already ordered samples, sorted in both directions\nfor batch_size in (8, 4049):\n for i in range(4):\n x = np.random.rand((NUM_BATCHES * batch_size))\n y = np.random.rand((NUM_BATCHES * batch_size))\n idx = np.argsort(x, kind='stable')\n x = x[idx] if i % 2 == 0 else x[idx[::-1]]\n y = y[idx] if i % 2 == 0 else x[idx[::-1]]\n x = x.reshape(NUM_BATCHES, batch_size)\n y = y.reshape(NUM_BATCHES, batch_size)\n _examples.append(Input(x=tensor(x), y=tensor(y)))\n\n\[email protected](\"x, y\", _examples)\nclass TestAUC(MetricTester):\n\n @pytest.mark.parametrize(\"ddp\", [False])\n @pytest.mark.parametrize(\"dist_sync_on_step\", [True, False])\n def test_auc(self, x, y, ddp, dist_sync_on_step):\n self.run_class_metric_test(\n ddp=ddp,\n preds=x,\n target=y,\n metric_class=AUC,\n sk_metric=sk_auc,\n dist_sync_on_step=dist_sync_on_step,\n )\n\n @pytest.mark.parametrize(\"reorder\", [True, False])\n def test_auc_functional(self, x, y, reorder):\n self.run_functional_metric_test(\n x, y, metric_functional=auc, sk_metric=partial(sk_auc, reorder=reorder), metric_args={\"reorder\": reorder}\n )\n\n\[email protected](['x', 'y', 'expected'], [\n pytest.param([0, 1], [0, 1], 0.5),\n pytest.param([1, 0], [0, 1], 0.5),\n pytest.param([1, 0, 0], [0, 1, 1], 0.5),\n pytest.param([0, 1], [1, 1], 1),\n pytest.param([0, 0.5, 1], [0, 0.5, 1], 0.5),\n])\ndef test_auc(x, y, expected):\n # Test Area Under Curve (AUC) computation\n assert auc(tensor(x), tensor(y), reorder=True) == expected\n"
] | [
[
"numpy.argsort",
"sklearn.metrics.auc",
"numpy.random.rand",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PEtab-dev/petab_test_suite | [
"1ff2d0c895dec90bd519e72ec226acb7573b54c9"
] | [
"petabtests/cases/0015/0015.py"
] | [
"from petabtests import *\nfrom petab.C import *\nimport petab\n\nimport pandas as pd\n\n\ntest_id = 15\n\n# problem --------------------------------------------------------------------\n\nmodel = DEFAULT_SBML_FILE\n\ncondition_df = pd.DataFrame(data={\n CONDITION_ID: ['c0'],\n}).set_index([CONDITION_ID])\n\nmeasurement_df = pd.DataFrame(data={\n OBSERVABLE_ID: ['obs_a', 'obs_a'],\n SIMULATION_CONDITION_ID: ['c0', 'c0'],\n TIME: [0, 10],\n MEASUREMENT: [0.7, 0.1],\n NOISE_PARAMETERS: ['noise', 'noise']\n})\n\nobservable_df = pd.DataFrame(data={\n OBSERVABLE_ID: ['obs_a'],\n OBSERVABLE_FORMULA: ['A'],\n NOISE_FORMULA: ['noiseParameter1_obs_a']\n}).set_index([OBSERVABLE_ID])\n\nparameter_df = pd.DataFrame(data={\n PARAMETER_ID: ['a0', 'b0', 'k1', 'k2', 'noise'],\n PARAMETER_SCALE: [LIN] * 5,\n LOWER_BOUND: [0] * 5,\n UPPER_BOUND: [10] * 5,\n NOMINAL_VALUE: [1, 0, 0.8, 0.6, 5],\n ESTIMATE: [1] * 5,\n}).set_index(PARAMETER_ID)\n\n\n# solutions ------------------------------------------------------------------\n\nsimulation_df = measurement_df.copy(deep=True).rename(\n columns={MEASUREMENT: SIMULATION})\nsimulation_df[SIMULATION] = [analytical_a(t, 1, 0, 0.8, 0.6)\n for t in simulation_df[TIME]]\n\nchi2 = petab.calculate_chi2(\n measurement_df, simulation_df, observable_df, parameter_df)\n\nllh = petab.calculate_llh(\n measurement_df, simulation_df, observable_df, parameter_df)\nprint(llh)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
shonaka/pytorch_challenge | [
"6e7d357e1ec1f01687664714efa2f04e64014f94"
] | [
"main.py"
] | [
"\"\"\"\nMain file for PyTorch Challenge Final Project\n\"\"\"\nfrom __future__ import print_function, division\nimport json\nimport yaml\nimport time\nimport torch\nimport optuna\nfrom torchvision import transforms\nfrom pathlib import Path\nfrom torchsummary import summary\nfrom torchvision import models\n# Custom functions and classes\nfrom trainer.trainer import train_and_eval, objective\nfrom utils.arg_yaml import get_args, get_parser\nfrom utils.util import download_data, check_dir_and_create\nfrom utils.logfun import set_logger, timer\nfrom utils.visualization import fig_loss_acc\nfrom data_loader.data_loaders import create_dataloader\nfrom model.model import SimpleCNN, Pretrained\n# Just for debugging purpose. You could delete this later.\nimport pdb\n\n\nif __name__ == '__main__':\n # Get the default and specified arguments\n args = get_args(get_parser(\"config.yaml\"))\n\n # Defining a name for the model to be saved\n header = args.trial + '_' + args.model_type\n model_name = header + '.pth.tar'\n\n # Specifying some paths\n DATA_DIR = Path(\"data\")\n RESULTS_DIR = Path(\"results\") / header\n LOG_DIR = RESULTS_DIR / \"logs\"\n FIG_DIR = RESULTS_DIR / \"figures\"\n # Just checking if the directory exists, if not creating\n check_dir_and_create(str(DATA_DIR))\n check_dir_and_create(str(RESULTS_DIR))\n check_dir_and_create(str(LOG_DIR))\n check_dir_and_create(str(FIG_DIR))\n\n # Custom function for logging\n log = set_logger(str(LOG_DIR), args.log_name)\n\n # Using a custom function to download the data\n download_data(data_dir=DATA_DIR, data_name=args.file_name, zip_name=args.zip_name, url=args.url)\n\n # Use GPU if available\n torch_gpu = torch.cuda.is_available()\n\n # Directories to training and validation\n directories = {x: DATA_DIR / args.file_name / x for x in ['train', 'valid']}\n # If you were to use transfer learning on pre-trained network that was trained on\n # ImageNet, you need to specifically use the following normalization parameters\n # https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html\n if args.model_type == 'simplecnn':\n normalization = transforms.Normalize(\n mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5]\n )\n else:\n normalization = transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n d_size, d_loaders = create_dataloader(normalization, directories, batch_size=args.batch_size)\n\n # Get some other parameters\n num_classes = len(d_loaders['train'].dataset.classes)\n\n # Logging some information\n log.info(\"PyTorch version: {}\".format(torch.__version__))\n log.info(\"Model using: {}\".format(args.model_type))\n log.info(\"Hyperparameter optimization: {}\".format(args.optuna_tune))\n log.info(\"Batch size: {}\".format(args.batch_size))\n log.info(\"Using GPU: {}\".format(str(torch_gpu)))\n log.info(\"Number of training samples: {}\".format(len(d_loaders['train'].dataset.samples)))\n log.info(\"Number of classes: {}\".format(num_classes))\n log.info(\"Dimensions of an image: {}\".format(str(next(iter(d_loaders['train']))[0].shape)))\n\n # Loading labels provided by Udacity\n # https://github.com/udacity/pytorch_challenge\n with open('cat_to_name.json', 'r') as f:\n cat_to_name = json.load(f)\n\n # Building a model\n params = {\n 'nc': num_classes,\n }\n\n # Define the model\n if args.model_type == 'simplecnn':\n model = SimpleCNN(args, params)\n else:\n model = Pretrained(args, params)\n # Make sure to put the model into GPU\n model.cuda() if torch_gpu else model.cpu()\n # Using multiple GPUs\n model = torch.nn.DataParallel(model)\n\n # Good for checking the architecture\n summary(model, input_size=(3, 224, 224), batch_size=args.batch_size)\n\n # A function to perform training and validation\n log.info(\"Start Training\")\n start = time.time()\n if args.optuna_tune:\n # Running hyperparameter optimization using optuna\n # n_warmup_steps = 10 to at least run 10 epochs before deciding to prune\n study = optuna.create_study(pruner=optuna.pruners.MedianPruner(n_warmup_steps=10))\n # use lambda if you want to pass more agruments other than the \"trial\"\n study.optimize(lambda trial: objective(trial, params, d_size, d_loaders, torch_gpu, args),\n n_trials=args.optuna_trials)\n # after the optimization, this is how you get the best parameters\n best_params = study.best_params\n # The best error_rate\n best_error = study.best_value\n log.info(\"Best params are: {}\".format(str(best_params)))\n log.info(\"Best error_rate is: {:.4f}\".format(best_error))\n # now running with the best parameters for saving results purposes\n # TODO: optimize this part a little, I shouldn't have to run twice\n # TODO: make parsing of the best parameters automatic as well\n args.optim_lr = float(best_params['lr'])\n args.optim_type = str(best_params['optimizer'])\n args.optim_amsgrad = bool(best_params['amsgrad'])\n args.optim_weight_decay = float(best_params['weight_decay'])\n log.info(\"Final testing with the best parameters\")\n t_loss, t_acc, v_loss, v_acc = train_and_eval(model,\n d_size,\n d_loaders,\n torch_gpu,\n log,\n args)\n else:\n t_loss, t_acc, v_loss, v_acc = train_and_eval(model,\n d_size,\n d_loaders,\n torch_gpu,\n log,\n args)\n end = time.time()\n log.info(\"Finsihed Training\")\n hours, mins, seconds = timer(start, end)\n log.info(\"Training and testing took: {:0>2} Hours {:0>2} minutes {:05.2f} seconds\".format(int(hours), int(mins), seconds))\n\n # Save the model\n torch.save(model.state_dict(), str(RESULTS_DIR / model_name))\n\n # Log the results and save the figures\n fig_loss_acc(t_loss, v_loss, \"loss\", FIG_DIR)\n fig_loss_acc(t_acc, v_acc, \"acc\", FIG_DIR)\n\n # Log the parameters and results\n dict_params = vars(args)\n dict_params['final_train_loss'] = round(t_loss[-1], 4)\n dict_params['final_train_acc'] = round(t_acc[-1], 4)\n dict_params['final_valid_loss'] = round(v_loss[-1], 4)\n dict_params['final_valid_acc'] = round(v_acc[-1], 4)\n print(type(dict_params))\n print(dict_params)\n with open(str(RESULTS_DIR / \"results.yaml\"), 'w') as output_file:\n yaml.dump(dict_params, output_file, default_flow_style=False)\n with open(str(RESULTS_DIR / \"results.json\"), 'w') as output_file:\n json.dump(dict_params, output_file)\n"
] | [
[
"torch.nn.DataParallel",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PeterouZh/SemiNAS | [
"41e044b49bc7fbc3cfb832ebef988fb690024b2e",
"41e044b49bc7fbc3cfb832ebef988fb690024b2e"
] | [
"tts/tasks.py",
"tts/test.py"
] | [
"import os\nimport sys\nimport json\nimport logging\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport torch\nfrom torch import nn\nimport torch.optim\nimport torch.utils.data\nimport torch.nn.functional as F\nimport utils\nimport logging\nfrom text_encoder import TokenTextEncoder\nfrom preprocessor import _process_utterance\nimport audio\nimport model, model_ws\n\n\ndef collate(samples, pad_idx, eos_idx):\n if len(samples) == 0:\n return {}\n\n id = torch.LongTensor([s['id'] for s in samples])\n utt_id = torch.LongTensor([s['utt_id'] for s in samples]) if samples[0]['utt_id'] is not None else None\n text = [s['text'] for s in samples] if samples[0]['text'] is not None else None\n src_tokens = utils.collate_tokens([s['source'] for s in samples], pad_idx)\n target = utils.collate_mels([s['target'] for s in samples], pad_idx) if samples[0]['target'] is not None else None\n prev_output_mels = utils.collate_mels([s['target'] for s in samples], pad_idx, shift_right=True) if target is not None else None\n # sort by descending source length\n src_lengths = torch.LongTensor([s['source'].numel() for s in samples])\n target_lengths = torch.LongTensor([s['target'].shape[0] for s in samples]) if target is not None else None\n if target is not None and target_lengths is not None:\n target_lengths, sort_order = target_lengths.sort(descending=True)\n target = target.index_select(0, sort_order)\n prev_output_mels = prev_output_mels.index_select(0, sort_order)\n src_tokens = src_tokens.index_select(0, sort_order)\n src_lengths = src_lengths.index_select(0, sort_order)\n else:\n src_lengths, sort_order = src_lengths.sort(descending=True) \n src_tokens = src_tokens.index_select(0, sort_order)\n id = id.index_select(0, sort_order)\n utt_id = utt_id.index_select(0, sort_order) if utt_id is not None else None\n text = [text[i] for i in sort_order] if text is not None else None\n ntokens = sum(len(s['source']) for s in samples)\n nmels = sum(len(s['target']) for s in samples) if target is not None else None\n \n batch = {\n 'id': id,\n 'utt_id': utt_id,\n 'nsamples': len(samples),\n 'ntokens': ntokens,\n 'nmels': nmels,\n 'text': text,\n 'src_tokens': src_tokens,\n 'src_lengths': src_lengths,\n 'targets': target,\n 'target_lengths': target_lengths,\n 'prev_output_mels': prev_output_mels\n }\n return batch\n\n\nclass LJSpeechRawDataset(torch.utils.data.Dataset):\n def __init__(self, data_dir, model_hparams, phone_encoder, utt_ids=None, shuffle=False):\n super().__init__()\n self.model_hparams = model_hparams\n self.phone_encoder = phone_encoder\n self.shuffle = shuffle\n\n self.utt_ids = None\n self.texts = None\n self.phones = None\n self.mels = None\n self.sizes = None \n self.read_data(data_dir, utt_ids)\n\n def produce_result(self, utt_id, text, phone, mel):\n phones = phone.split(\" \")\n phones += ['|']\n phones = ' '.join(phones)\n try:\n utt_id = int(utt_id)\n phone_encoded = torch.LongTensor(self.phone_encoder.encode(phones) + [self.phone_encoder.eos()])\n mel = torch.Tensor(mel.T) #(T*80)\n except Exception as e:\n logging.info('{} {}'.format(e, text))\n return None\n\n return utt_id, text, phone_encoded, mel\n\n def read_data(self, data_dir, utt_ids=None):\n data_df = pd.read_csv(os.path.join(data_dir, 'metadata_phone.csv'))\n self.utt_ids = []\n self.texts = []\n self.phones = []\n self.mels = []\n self.sizes = [] \n if utt_ids is None:\n for idx, r in data_df.iterrows():\n utt_id, text, phone, mel = self.produce_result(idx, r['txt2'], r['phone2'], self.process_wav(data_dir, r['wav'])[1])\n self.utt_ids.append(utt_id)\n self.texts.append(text)\n self.phones.append(phone)\n self.mels.append(mel)\n self.sizes.append(len(mel))\n else:\n for utt_id in utt_ids:\n r = data_df.iloc[utt_id]\n utt_id, text, phone, mel = self.produce_result(utt_id, r['txt2'], r['phone2'], self.process_wav(data_dir, r['wav'])[1])\n self.utt_ids.append(utt_id)\n self.texts.append(text)\n self.phones.append(phone)\n self.mels.append(mel)\n self.sizes.append(len(mel))\n \n def __getitem__(self, index):\n sample = {\"id\": index, \n \"utt_id\": self.utt_ids[index] if self.utt_ids is not None else None,\n \"text\": self.texts[index] if self.texts is not None else None,\n \"source\": self.phones[index] if self.phones is not None else None,\n \"target\": self.mels[index] if self.mels is not None else None}\n return sample\n\n def __len__(self):\n return len(self.sizes)\n\n def collater(self, samples):\n return collate(\n samples, pad_idx=self.phone_encoder.pad(), eos_idx=self.phone_encoder.eos()\n )\n\n def num_tokens(self, index):\n return self.size(index)\n\n def size(self, index):\n \"\"\"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``.\"\"\"\n return min(self.sizes[index], self.model_hparams.max_sample_size)\n\n def ordered_indices(self):\n \"\"\"Return an ordered list of indices. Batches will be constructed based\n on this order.\"\"\"\n if self.shuffle:\n indices = np.random.permutation(len(self))\n else:\n indices = np.arange(len(self))\n return indices[np.argsort(np.array(self.sizes)[indices], kind='mergesort')]\n\n\nclass LJSpeechDataset(LJSpeechRawDataset):\n def __init__(self, data_dir, prefix, model_hparams, phone_encoder, shuffle=False):\n super().__init__(data_dir, model_hparams, phone_encoder, prefix, shuffle)\n\n def read_data(self, data_dir, prefix):\n if os.path.exists(os.path.join(data_dir, '{}.idx'.format(prefix))):\n with open(os.path.join(data_dir, '{}.idx'.format(prefix)), 'r') as f:\n self.utt_ids = [int(line) for line in f.readlines()]\n if os.path.exists(os.path.join(data_dir, '{}.text'.format(prefix))):\n with open(os.path.join(data_dir, '{}.text'.format(prefix)), 'r') as f:\n self.texts = [line.strip() for line in f.readlines()]\n if os.path.exists(os.path.join(data_dir, '{}.phone'.format(prefix))):\n with open(os.path.join(data_dir, '{}.phone'.format(prefix)), 'r') as f:\n self.phones = [torch.LongTensor(list(map(int, line.strip().split()))) for line in f.readlines()]\n if os.path.exists(os.path.join(data_dir, '{}.mel'.format(prefix))):\n with open(os.path.join(data_dir, '{}.mel'.format(prefix)), 'rb') as f:\n mels = np.load(f, allow_pickle=True)\n self.mels = [torch.Tensor(mel) for mel in mels]\n\n if self.mels:\n self.sizes = [len(mel) for mel in self.mels]\n else:\n self.sizes =[len(text) for text in self.texts]\n \n\ndef set_ljspeech_hparams(model_hparams):\n model_hparams.max_sample_size = 1500\n model_hparams.symbol_size = None\n model_hparams.save_npz = False\n model_hparams.audio_num_mel_bins = 80\n model_hparams.audio_sample_rate = 22050\n model_hparams.num_freq = 513 # (= n_fft / 2 + 1) only used when adding linear spectrograms post processing network\n model_hparams.hop_size = 256 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate)\n model_hparams.win_size = 1024 # For 22050Hz, 1100 ~= 50 ms (If None, win_size = n_fft) (0.05 * sample_rate)\n model_hparams.fmin = 0 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])\n model_hparams.fmax = 8000 # To be increased/reduced depending on data.\n model_hparams.n_fft = 1024 # Extra window size is filled with 0 paddings to match this parameter\n model_hparams.min_level_db = -100\n model_hparams.ref_level_db = 20\n # Griffin Lim\n model_hparams.power = 1.5 # Only used in G&L inversion, usually values between 1.2 and 1.5 are a good choice.\n model_hparams.magnitude_power = 1 # The power of the spectrogram magnitude (1. for energy, 2. for power)\n model_hparams.griffin_lim_iters = 60 # Number of G&L iterations, typically 30 is enough but we use 60 to ensure convergence.\n\n # #M-AILABS (and other datasets) trim params (there parameters are usually correct for any data, but definitely must be tuned for specific speakers)\n model_hparams.trim_fft_size = 512\n model_hparams.trim_hop_size = 128\n model_hparams.trim_top_db = 23\n model_hparams.frame_shift_ms = None # Can replace hop_size parameter. (Recommended: 12.5)\n model_hparams.use_lws = False\n model_hparams.silence_threshold = 2 # silence threshold used for sound trimming for wavenet preprocessing\n model_hparams.trim_silence = True # Whether to clip silence in Audio (at beginning and end of audio only, not the middle)\n model_hparams.vocoder = 'gl'\n model_hparams.preemphasize = False # whether to apply filter\n model_hparams.preemphasis = 0.97 # filter coefficient.\n\n\nclass RSQRTSchedule(object):\n def __init__(self, args, optimizer):\n super().__init__()\n self.optimizer = optimizer\n self.constant_lr = args.lr \n self.warmup_updates = args.warmup_updates\n self.hidden_size = args.hidden_size\n self.lr = args.lr\n for param_group in optimizer.param_groups:\n param_group['lr'] = self.lr\n\n def step(self, num_updates):\n constant_lr = self.constant_lr\n warmup = min(num_updates / self.warmup_updates, 1.0)\n rsqrt_decay = max(self.warmup_updates, num_updates)**-0.5\n rsqrt_hidden = self.hidden_size**-0.5\n self.lr = constant_lr * warmup * rsqrt_decay * rsqrt_hidden\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.lr\n return self.lr\n\n def get_lr(self):\n return self.optimizer.param_groups[0]['lr']\n\n\nclass LJSpeechTask(object):\n\n def __init__(self, args):\n #set_ljspeech_hparams(args)\n self.args = args\n self.ws = getattr(args, 'ws', False)\n self.arch = getattr(args, 'arch', None)\n self.data_dir = args.data_dir\n self.output_dir = args.output_dir\n self.max_tokens = args.max_tokens\n self.max_sentences = args.max_sentences\n self.max_eval_tokens = args.max_eval_tokens if getattr(args, 'max_eval_tokens', None) is not None else self.max_tokens\n self.max_eval_sentences = args.max_eval_sentences if getattr(args, 'max_eval_sentences', None) is not None else self.max_sentences\n if isinstance(self.arch, str):\n self.arch = list(map(int, self.arch.strip().split()))\n if self.arch is not None:\n self.num_heads = utils.get_num_heads(self.arch[args.enc_layers:])\n \n def setup_task(self, model_state_dict=None, optimizer_state_dict=None, ws=False):\n self.phone_encoder = self.build_phone_encoder(self.data_dir)\n self.train_dataset, self.valid_dataset, self.test_dataset = self.build_dataset(self.data_dir, self.args.raw_data)\n self.train_queue = self.build_queue(self.train_dataset, True, self.max_tokens, self.max_sentences)\n self.valid_queue = self.build_queue(self.valid_dataset, False, self.max_eval_tokens, self.max_eval_sentences)\n self.test_queue = self.build_queue(self.test_dataset, False, self.max_eval_tokens, self.max_eval_sentences)\n self.model = self.build_model(arch=self.arch, model_state_dict=model_state_dict, ws=self.ws | ws)\n self.optimizer = self.build_optimizer(optimizer_state_dict=optimizer_state_dict)\n self.scheduler = self.build_scheduler()\n self.padding_idx = self.phone_encoder.pad()\n self.eos_idx = self.phone_encoder.eos()\n self.seg_idx = self.phone_encoder.seg()\n #if torch.cuda.device_count() > 1:\n # torch.distributed.init_process_group(backend='nccl')\n\n def setup_search_task(self):\n self.phone_encoder = self.build_phone_encoder(self.data_dir)\n self.train_dataset, self.valid_dataset, self.test_dataset = self.build_dataset(self.data_dir, self.args.raw_data)\n self.train_queue = self.build_queue(self.train_dataset, True, self.max_tokens, self.max_sentences)\n self.valid_queue = self.build_queue(self.valid_dataset, False, self.max_eval_tokens, self.max_eval_sentences)\n self.test_queue = self.build_queue(self.test_dataset, False, self.max_eval_tokens, self.max_eval_sentences)\n self.padding_idx = self.phone_encoder.pad()\n self.eos_idx = self.phone_encoder.eos()\n self.seg_idx = self.phone_encoder.seg()\n\n def build_model(self, arch=None, model_state_dict=None, ws=False):\n if arch is None:\n arch = self.arch\n assert (arch is not None) ^ ws, 'arch and ws are mutual'\n if ws:\n _model = model_ws.NASNetwork(self.args, self.phone_encoder)\n else:\n _model = model.NASNetwork(self.args, arch, self.phone_encoder)\n if model_state_dict is not None:\n _model.load_state_dict(model_state_dict)\n if torch.cuda.is_available():\n if torch.cuda.device_count() > 1:\n _model = nn.DataParallel(_model)\n _model = _model.cuda()\n return _model\n\n def build_optimizer(self, model=None, optimizer_state_dict=None):\n if model is None:\n model = self.model\n optimizer = torch.optim.AdamW(\n model.parameters(),\n lr=self.args.lr,\n betas=(self.args.optimizer_adam_beta1, self.args.optimizer_adam_beta2),\n weight_decay=self.args.weight_decay)\n if optimizer_state_dict is not None:\n optimizer.load_state_dict(optimizer_state_dict)\n return optimizer\n\n def build_scheduler(self, optimizer=None):\n if optimizer is None:\n optimizer = self.optimizer\n return RSQRTSchedule(self.args, optimizer)\n\n def build_phone_encoder(self, data_dir):\n phone_list_file = os.path.join(data_dir, 'phone_set.json')\n phone_list = json.load(open(phone_list_file))\n return TokenTextEncoder(None, vocab_list=phone_list)\n\n def split_train_test_set(self, data_dir, test_num=500):\n data_file_name = 'metadata_phone.csv'\n data_df = pd.read_csv(os.path.join(data_dir, data_file_name))\n total_num = len(data_df.index)\n train_uttids = [i for i in np.arange(0, total_num)]\n test_uttids = []\n for _ in range(test_num):\n random_index = int(np.random.randint(0, len(train_uttids)))\n test_uttids.append(train_uttids[random_index])\n del train_uttids[random_index]\n\n logging.info(\">>test {}\".format(len(test_uttids)))\n logging.info(\">>train {}\".format(len(train_uttids)))\n logging.info(\">>total {}\".format(len(list(set(test_uttids + train_uttids)))))\n return train_uttids, test_uttids\n\n def build_dataset(self, data_dir, raw_data):\n if raw_data:\n train_utt_ids, test_utt_ids = self.split_train_test_set(data_dir)\n train_dataset = LJSpeechRawDataset(data_dir, self.args, self.phone_encoder, utt_ids=train_utt_ids, shuffle=True)\n test_dataset = LJSpeechRawDataset(data_dir, self.args, self.phone_encoder, utt_ids=test_utt_ids, shuffle=False)\n valid_dataset = test_dataset\n else:\n train_dataset = LJSpeechDataset(data_dir, 'train', self.args, self.phone_encoder, shuffle=True)\n valid_dataset = LJSpeechDataset(data_dir, 'valid', self.args, self.phone_encoder, shuffle=False)\n test_dataset = LJSpeechDataset(data_dir, 'test', self.args, self.phone_encoder, shuffle=False)\n return train_dataset, valid_dataset, test_dataset\n\n def build_queue(self, dataset, shuffle, max_tokens=None, max_sentences=None, required_batch_size_multiple=8):\n def shuffle_batches(batches):\n np.random.shuffle(batches)\n return batches\n\n if max_tokens is not None:\n max_tokens *= torch.cuda.device_count()\n if max_sentences is not None:\n max_sentences *= torch.cuda.device_count()\n indices = dataset.ordered_indices()\n batch_sampler = utils.batch_by_size(\n indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,\n required_batch_size_multiple=required_batch_size_multiple,\n )\n if shuffle:\n batches = shuffle_batches(list(batch_sampler))\n else:\n batches = batch_sampler\n return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collater, batch_sampler=batches, num_workers=8)\n\n def remove_padding(self, x, hit_eos=None):\n if x is None:\n return None\n if len(x.shape) == 1: # wav\n hit_eos = np.sum(~hit_eos)\n hit_eos = (hit_eos - 1) * self.args.hop_size + self.args.win_size\n return x[:hit_eos]\n if x.shape[1] > 1: # mel\n if len(x.shape) == 3:\n x = x[:, :, :1]\n if hit_eos is not None:\n return x[~hit_eos]\n else:\n return x[np.abs(x).sum(1).reshape(-1) != 0]\n else: # text\n if len(np.where(x <= 1)[0]) > 0:\n x = x[:np.where(x <= 1)[0][0]]\n return x\n\n def weights_nonzero_speech(self, target):\n # target : B x T x mel\n # Assign weight 1.0 to all labels except for padding (id=0).\n dim = target.size(-1)\n return target.abs().sum(-1, keepdim=True).ne(0).float().repeat(1, 1, dim)\n\n def make_stop_target(self, target):\n # target : B x T x mel\n seq_mask = target.abs().sum(-1).ne(0).float()\n seq_length = seq_mask.sum(1)\n mask_r = 1 - utils.sequence_mask(seq_length - 1, target.size(1)).float()\n return seq_mask, mask_r\n\n def weighted_cross_entropy_with_logits(self, targets, logits, pos_weight=1):\n x = logits\n z = targets\n q = pos_weight\n l = 1 + (q - 1) * z\n return (1 - z) * x + l * (torch.log(1 + torch.exp(-x.abs())) + F.relu(-x))\n\n def loss(self, decoder_output, target):\n # decoder_output : B x T x (mel+1)\n # target : B x T x mel\n if target is None:\n return {\n 'decoder loss': decoder_output.new(1).fill_(0)[0],\n 'stop loss': decoder_output.new(1).fill_(0)[0],\n }\n\n predicted_mel = decoder_output[:, :, :self.args.audio_num_mel_bins]\n predicted_stop = decoder_output[:, :, -1]\n seq_mask, stop_mask = self.make_stop_target(target)\n\n decoder_loss = F.mse_loss(predicted_mel, target, reduction='none')\n weights = self.weights_nonzero_speech(target)\n decoder_loss = (decoder_loss * weights).sum() / weights.sum()\n stop_loss = (self.weighted_cross_entropy_with_logits(stop_mask, predicted_stop, self.args.stop_token_weight) * seq_mask).sum()\n stop_loss = stop_loss / (seq_mask.sum() + target.size(0) * (self.args.stop_token_weight - 1))\n \n return {\n 'decoder loss': decoder_loss,\n 'stop loss': stop_loss,\n }\n\n def train(self, model=None, optimizer=None, scheduler=None, epoch=1, num_updates=0, arch_pool=None, arch_pool_prob=None):\n if model is None:\n model = self.model\n if optimizer is None:\n optimizer = self.optimizer\n if scheduler is None:\n scheduler = self.scheduler\n \n decoder_loss_obj = utils.AvgrageMeter()\n stop_loss_obj = utils.AvgrageMeter()\n loss_obj = utils.AvgrageMeter()\n model.train()\n for step, sample in enumerate(self.train_queue):\n num_updates += 1\n scheduler.step(num_updates)\n input = utils.move_to_cuda(sample['src_tokens'])\n prev_output_mels = utils.move_to_cuda(sample['prev_output_mels'])\n target = utils.move_to_cuda(sample['targets'])\n optimizer.zero_grad()\n\n if arch_pool is not None:# ws train\n arch = utils.sample_arch(arch_pool, arch_pool_prob)\n output, _ = model(input, prev_output_mels, target, arch)\n else:# normal train\n output, _ = model(input, prev_output_mels, target)\n loss_output = self.loss(output, target)\n decoder_loss = loss_output['decoder loss']\n stop_loss = loss_output['stop loss']\n total_loss = decoder_loss + stop_loss\n total_loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), self.args.clip_grad_norm)\n optimizer.step()\n\n n = sample['nmels']\n decoder_loss_obj.update(decoder_loss.data, n)\n stop_loss_obj.update(stop_loss.data, n)\n loss_obj.update(total_loss.data, n)\n\n if (step+1) % self.args.log_interval == 0:\n lr = scheduler.get_lr()\n log_output = 'train {}@{} global step {} lr {:.6f} decoder loss {:.6f} stop loss {:.6f} total loss {:.6f}'.format(\n epoch, step+1, num_updates, lr, decoder_loss_obj.avg, stop_loss_obj.avg, loss_obj.avg)\n if arch_pool is not None:\n log_output = 'arch {}\\n'.format(arch) + log_output\n logging.info(log_output)\n\n return decoder_loss_obj.avg, stop_loss_obj.avg, loss_obj.avg, num_updates\n\n def valid(self, model=None):\n if model is None:\n model = self.model\n\n decoder_loss_obj = utils.AvgrageMeter()\n stop_loss_obj = utils.AvgrageMeter()\n loss_obj = utils.AvgrageMeter()\n fr_obj = utils.AvgrageMeter()\n pcr_obj = utils.AvgrageMeter()\n dfr_obj = utils.AvgrageMeter()\n model.eval()\n with torch.no_grad():\n for step, sample in enumerate(self.test_queue):\n input = utils.move_to_cuda(sample['src_tokens'])\n prev_output_mels = utils.move_to_cuda(sample['prev_output_mels'])\n target = utils.move_to_cuda(sample['targets'])\n if target is not None:\n output, attn_logits = model(input, prev_output_mels, target)\n else:\n bsz = input.size(0)\n max_input_len = input.size(1)\n if type(model) is nn.DataParallel:\n model = model.module\n decode_length = self.estimate_decode_length(max_input_len)\n encoder_outputs = model.forward_encoder(input)\n encoder_out = encoder_outputs['encoder_out']\n encoder_padding_mask = encoder_outputs['encoder_padding_mask']\n decoder_input = input.new(bsz, decode_length+1, self.args.audio_num_mel_bins).fill_(self.padding_idx).float()\n output = input.new(bsz, 0, self.args.audio_num_mel_bins+1).fill_(self.padding_idx).float()\n hit_eos = input.new(bsz, 1).fill_(0).bool()\n stop_logits = input.new(bsz, 0).fill_(0).float()\n encdec_attn_logits = []\n num_heads = self.num_heads\n for i in range(self.args.dec_layers):\n encdec_attn_logits.append(input.new(bsz, num_heads[i], 0, max_input_len).fill_(0).float())\n incremental_state = {}\n for step in range(decode_length):\n decoder_output, attn_logits = model.forward_decoder(decoder_input[:, :step+1], encoder_out, encoder_padding_mask, incremental_state=incremental_state)\n next_mel = decoder_output[:, -1:, :self.args.audio_num_mel_bins]\n stop_logit = decoder_output[:, -1:, -1]\n stop_logits = torch.cat((stop_logits, stop_logit), dim=1)\n output = torch.cat((output, decoder_output), dim=1)\n for i in range(self.args.dec_layers):\n encdec_attn_logits[i] = torch.cat((encdec_attn_logits[i], attn_logits[i]), dim=2)\n decoder_input[:, step+1] = next_mel[:, -1]\n attn_logits = encdec_attn_logits\n this_hit_eos = hit_eos[:, -1:]\n this_hit_eos |= torch.sigmoid(stop_logit) > 0.5\n hit_eos = torch.cat((hit_eos, this_hit_eos), dim=1)\n\n\n loss_output = self.loss(output, target)\n decoder_loss = loss_output['decoder loss']\n stop_loss = loss_output['stop loss']\n total_loss = decoder_loss + stop_loss\n \n n = sample['nmels'] if sample['nmels'] is not None else sample['nsamples']\n decoder_loss_obj.update(decoder_loss.data, n)\n stop_loss_obj.update(stop_loss.data, n)\n loss_obj.update(total_loss.data, n)\n \n encdec_attn = utils.select_attn(attn_logits)\n \n src_lengths = utils.move_to_cuda(sample['src_lengths']) #- 1 # exclude eos\n if target is not None:\n target_lengths = utils.move_to_cuda(sample['target_lengths'])\n target_padding_mask = target.abs().sum(-1).eq(self.padding_idx)\n else:\n hit_eos = hit_eos[:, 1:]\n target_lengths = (1.0 - hit_eos.float()).sum(dim=-1)\n target_padding_mask = output[:, :, :self.args.audio_num_mel_bins].abs().sum(-1).eq(self.padding_idx)\n src_padding_mask = input.eq(self.padding_idx)# | input.eq(self.eos_idx) # also exclude eos\n src_seg_mask = input.eq(self.seg_idx)\n focus_rate = utils.get_focus_rate(encdec_attn, src_padding_mask, target_padding_mask).mean()\n phone_coverage_rate = utils.get_phone_coverage_rate(encdec_attn, src_padding_mask, src_seg_mask, target_padding_mask).mean()\n attn_ks = src_lengths.float() / target_lengths.float()\n diagonal_focus_rate = utils.get_diagonal_focus_rate(encdec_attn, attn_ks, target_lengths, src_padding_mask, target_padding_mask).mean()\n\n fr_obj.update(focus_rate.data, sample['nsamples'])\n pcr_obj.update(phone_coverage_rate.data, sample['nsamples'])\n dfr_obj.update(diagonal_focus_rate.data, sample['nsamples'])\n\n return decoder_loss_obj.avg, stop_loss_obj.avg, loss_obj.avg, fr_obj.avg, pcr_obj.avg, dfr_obj.avg\n\n def infer_batch(self, model, sample):\n if model is None:\n model = self.model\n \n model.eval()\n if type(model) is nn.DataParallel:\n model = model.module\n with torch.no_grad():\n input = utils.move_to_cuda(sample['src_tokens'])\n prev_output_mels = utils.move_to_cuda(sample['prev_output_mels'])\n target = utils.move_to_cuda(sample['targets'])\n bsz = input.size(0)\n max_input_len = input.size(1)\n\n if self.args.gta:\n decode_length = target.size(1)\n else:\n decode_length = self.estimate_decode_length(max_input_len)\n \n encoder_outputs = model.forward_encoder(input)\n encoder_out = encoder_outputs['encoder_out']\n encoder_padding_mask = encoder_outputs['encoder_padding_mask']\n\n hit_eos = input.new(bsz, 1).fill_(0).bool()\n stop_logits = input.new(bsz, 0).fill_(0).float()\n stage = 0\n decoder_input = input.new(bsz, decode_length+1, self.args.audio_num_mel_bins).fill_(self.padding_idx).float()\n decoded_mel = input.new(bsz, 0, self.args.audio_num_mel_bins).fill_(self.padding_idx).float()\n encdec_attn_logits = []\n\n for i in range(self.args.dec_layers):\n encdec_attn_logits.append(input.new(bsz, self.num_heads[i], 0, max_input_len).fill_(0).float())\n #encdec_attn_logits = input.new(bsz, self.args.dec_layers, 0, max_input_len).fill_(0).float()\n attn_pos = input.new(bsz).fill_(0).int()\n use_masks = []\n for i in range(self.args.dec_layers):\n use_masks.append(input.new(self.num_heads[i]).fill_(0).float())\n #use_masks = input.new(self.args.dec_layers*2).fill_(0).float()\n \n incremental_state = {}\n step = 0\n if self.args.attn_constraint:\n for i, layer in enumerate(model.decoder.layers):\n enc_dec_attn_constraint_mask = input.new(bsz, self.num_heads[i], max_input_len).fill_(0).int()\n layer.set_buffer('enc_dec_attn_constraint_mask', enc_dec_attn_constraint_mask, incremental_state)\n while True:\n #import pdb; pdb.set_trace()\n if self.is_finished(step, decode_length, hit_eos, stage):\n break\n \n if self.args.gta:\n decoder_input[:, step] = prev_output_mels[:, step]\n\n decoder_output, attn_logits = model.forward_decoder(decoder_input[:, :step+1], encoder_out, encoder_padding_mask, incremental_state=incremental_state)\n next_mel = decoder_output[:, -1:, :self.args.audio_num_mel_bins]\n stop_logit = decoder_output[:, -1:, -1]\n stop_logits = torch.cat((stop_logits, stop_logit), dim=1)\n decoded_mel = torch.cat((decoded_mel, next_mel), dim=1)\n for i in range(self.args.dec_layers):\n encdec_attn_logits[i] = torch.cat((encdec_attn_logits[i], attn_logits[i]), dim=2)\n step += 1\n\n this_hit_eos = hit_eos[:, -1:]\n if self.args.attn_constraint:\n this_hit_eos |= (attn_pos[:, None] >= (encoder_padding_mask < 1.0).float().sum(dim=-1, keepdim=True).int() - 5) & (torch.sigmoid(stop_logit) > 0.5)\n else:\n this_hit_eos |= torch.sigmoid(stop_logit) > 0.5\n hit_eos = torch.cat((hit_eos, this_hit_eos), dim=1)\n\n \n if not self.args.gta:\n decoder_input[:, step] = next_mel[:, -1]\n\n if self.args.attn_constraint:\n stage_change_step = 50\n all_prev_weights = []\n for i in range(self.args.dec_layers):\n all_prev_weights.append(torch.softmax(encdec_attn_logits[i], dim=-1)) # bsz x head x L x L_kv\n\n # if the stage should change\n next_stage = (step == stage_change_step) | (step >= decode_length)\n if not self.args.gta:\n next_stage |= (hit_eos[:, -1].sum() == hit_eos.size(0)).cpu().numpy()\n next_stage &= (stage == 0)\n\n # choose the diagonal attention\n if next_stage:#TODO\n use_masks = []\n for i in range(self.args.dec_layers):\n use_mask = (all_prev_weights[i][:, :, :step].max(dim=-1).values.mean(dim=(0, 2)) > 0.6).float() # [head]\n use_masks.append(use_mask)\n attn_pos = input.new(bsz).fill_(0).int()\n\n # reseet when the stage changes\n for layer in model.decoder.layers:\n layer.clear_buffer(input, encoder_out, encoder_padding_mask, incremental_state)\n \n encdec_attn_logits = []\n for i in range(self.args.dec_layers):\n encdec_attn_logits.append(input.new(bsz, self.num_heads[i], 0, max_input_len).fill_(0).float())\n decoded_mel = input.new(bsz, 0, self.args.audio_num_mel_bins).fill_(0).float()\n decoder_input = input.new(bsz, decode_length+1, self.args.audio_num_mel_bins).fill_(0).float()\n hit_eos = input.new(bsz, 1).fill_(0).bool()\n stage = stage + 1\n step = 0\n\n prev_weights_mask1 = utils.sequence_mask(torch.max(attn_pos - 1, attn_pos.new(attn_pos.size()).fill_(0)).float(), encdec_attn_logits[0].size(-1)).float() # bsz x L_kv\n prev_weights_mask2 = 1.0 - utils.sequence_mask(attn_pos.float() + 4, encdec_attn_logits[0].size(-1)).float() # bsz x L_kv\n enc_dec_attn_constraint_masks = []\n for i in range(self.args.dec_layers):\n mask = (prev_weights_mask1 + prev_weights_mask2)[:, None, :] * use_masks[i][None, :, None] # bsz x head x L_kv\n enc_dec_attn_constraint_masks.append(mask)\n #enc_dec_attn_constraint_masks = (prev_weights_mask1 + prev_weights_mask2)[:, None, None, :] * use_masks[None, :, None, None] # bsz x (n_layers x head) x 1 x L_kv\n\n for i, layer in enumerate(model.decoder.layers):\n enc_dec_attn_constraint_mask = enc_dec_attn_constraint_masks[i]\n layer.set_buffer('enc_dec_attn_constraint_mask', enc_dec_attn_constraint_mask, incremental_state)\n\n def should_move_on():\n prev_weights = []\n for i in range(self.args.dec_layers):\n prev_weight = (all_prev_weights[i] * use_masks[i][None, :, None, None]).sum(dim=1)\n prev_weights.append(prev_weight)\n prev_weights = sum(prev_weights) / sum([mask.sum() for mask in use_masks])\n #prev_weights = (prev_weights * use_masks[None, :, None, None]).sum(dim=1) / use_masks.sum()\n move_on = (prev_weights[:, -3:].mean(dim=1).gather(1, attn_pos[:, None].long())).squeeze() < 0.7\n move_on &= torch.argmax(prev_weights[:, -1], -1) > attn_pos.long()\n return attn_pos + move_on.int()\n \n if step > 3 and stage == 1:\n attn_pos = should_move_on()\n\n #size = encdec_attn_logits.size()\n #encdec_attn_logits = encdec_attn_logits.view(size[0], size[1]*size[2], size[3], size[4])\n encdec_attn = utils.select_attn(encdec_attn_logits)\n\n src_lengths = utils.move_to_cuda(sample['src_lengths']) - 1 # exclude eos\n target_lengths = (1.0 - hit_eos[:, 1:].float()).sum(dim=-1) + 1\n src_padding_mask = input.eq(self.padding_idx) | input.eq(self.eos_idx) # also exclude eos\n src_seg_mask = input.eq(self.seg_idx)\n target_padding_mask = decoded_mel.abs().sum(-1).eq(self.padding_idx)\n focus_rate = utils.get_focus_rate(encdec_attn, src_padding_mask, target_padding_mask)\n phone_coverage_rate = utils.get_phone_coverage_rate(encdec_attn, src_padding_mask, src_seg_mask, target_padding_mask)\n attn_ks = src_lengths.float() / target_lengths.float()\n diagonal_focus_rate = utils.get_diagonal_focus_rate(encdec_attn, attn_ks, target_lengths, src_padding_mask, target_padding_mask)\n\n return decoded_mel, encdec_attn.unsqueeze(1), hit_eos, stop_logits, focus_rate, phone_coverage_rate, diagonal_focus_rate\n\n def is_finished(self, step, decode_length, hit_eos, stage):\n finished = step >= decode_length\n if not self.args.gta:\n finished |= (hit_eos[:, -1].sum() == hit_eos.size(0)).cpu().numpy()\n if self.args.attn_constraint:\n finished &= stage != 0\n return finished\n\n def infer(self, model=None, split='test'):\n if model is None:\n model = self.model\n if split == 'train':\n queue = self.train_queue\n elif split == 'valid':\n queue = self.valid_queue\n else:\n assert split == 'test'\n queue = self.test_queue\n\n nsamples_finished = 0\n for batch, sample in enumerate(queue):\n logging.info('inferring batch {} with {} samples'.format(batch+1, sample['nsamples']))\n decoded_mel, encdec_attn, hit_eos, _, focus_rate, phone_coverage_rate, diagonal_focus_rate = self.infer_batch(model, sample)\n\n hit_eos = hit_eos[:, 1:]\n outputs = decoded_mel\n predict_lengths = (1.0 - hit_eos.float()).sum(dim=-1)\n outputs *= (1.0 - hit_eos.float())[:, :, None]\n\n sample['outputs'] = outputs\n sample['predict_mels'] = decoded_mel\n sample['predict_lengths'] = predict_lengths\n sample['encdec_attn'] = encdec_attn\n sample['focus_rate'] = focus_rate\n sample['phone_coverage_rate'] = phone_coverage_rate\n sample['diagonal_focus_rate'] = diagonal_focus_rate\n self.after_infer(sample)\n nsamples_finished += sample['nsamples']\n \n def valid_for_search(self, model=None, gta=False, arch_pool=None, layer_norm_training=False):\n if model is None:\n model = self.model\n if arch_pool is not None:\n loss_res, fr_res, pcr_res, dfr_res = [], [], [] ,[]\n for arch in arch_pool:\n loss_obj = utils.AvgrageMeter()\n fr_obj = utils.AvgrageMeter()\n pcr_obj = utils.AvgrageMeter()\n dfr_obj = utils.AvgrageMeter()\n for batch, sample in enumerate(self.valid_queue):\n ret = self.valid_for_search_batch(model, sample, gta, arch, layer_norm_training)\n loss_obj.update(ret['loss'], ret['nsamples'])\n fr_obj.update(ret['focus_rate'], ret['nsamples'])\n pcr_obj.update(ret['phone_coverage_rate'], ret['nsamples'])\n dfr_obj.update(ret['diagonal_focus_rate'], ret['nsamples'])\n loss_res.append(loss_obj.avg)\n fr_res.append(fr_obj.avg)\n pcr_res.append(pcr_obj.avg)\n dfr_res.append(dfr_obj.avg)\n return fr_res, pcr_res, dfr_res, loss_res\n \n loss_obj = utils.AvgrageMeter()\n fr_obj = utils.AvgrageMeter()\n pcr_obj = utils.AvgrageMeter()\n dfr_obj = utils.AvgrageMeter()\n for batch, sample in enumerate(self.valid_queue):\n ret = self.valid_for_search_batch(model, sample, gta)\n loss_obj.update(ret['loss'], ret['nsamples'])\n fr_obj.update(ret['focus_rate'], ret['nsamples'])\n pcr_obj.update(ret['phone_coverage_rate'], ret['nsamples'])\n dfr_obj.update(ret['diagonal_focus_rate'], ret['nsamples'])\n return fr_obj.avg, pcr_obj.avg, dfr_obj.avg, loss_obj.avg\n\n def valid_for_search_batch(self, model, sample, gta=False, arch=None, layer_norm_training=False):\n if model is None:\n model = self.model\n model.eval()\n\n with torch.no_grad():\n input = utils.move_to_cuda(sample['src_tokens'])\n prev_output_mels = utils.move_to_cuda(sample['prev_output_mels'])\n target = utils.move_to_cuda(sample['targets'])\n bsz = input.size(0)\n max_input_len = input.size(1)\n\n if gta:\n output, attn_logits = model(input, prev_output_mels, target, arch=arch, layer_norm_training=layer_norm_training)\n encdec_attn_logits = attn_logits\n else:\n if type(model) is nn.DataParallel:\n model = model.module\n decode_length = target.size(1) if target is not None else self.estimate_decode_length(input.size(1))\n encoder_outputs = model.forward_encoder(input, arch=arch, layer_norm_training=layer_norm_training)\n encoder_out = encoder_outputs['encoder_out']\n encoder_padding_mask = encoder_outputs['encoder_padding_mask']\n decoder_input = input.new(bsz, decode_length+1, self.args.audio_num_mel_bins).fill_(self.padding_idx).float()\n output = input.new(bsz, 0, self.args.audio_num_mel_bins+1).fill_(self.padding_idx).float()\n hit_eos = input.new(bsz, 1).fill_(0).bool()\n stop_logits = input.new(bsz, 0).fill_(0).float()\n encdec_attn_logits = []\n if arch is not None: # in ws mode, arch is provided at run\n num_heads = utils.get_num_heads(arch[self.args.enc_layers:])\n else: # in general mode, arch is defined at the begining\n num_heads = self.num_heads\n for i in range(self.args.dec_layers):\n encdec_attn_logits.append(input.new(bsz, num_heads[i], 0, max_input_len).fill_(0).float())\n incremental_state = {}\n for step in range(decode_length):\n decoder_output, attn_logits = model.forward_decoder(decoder_input[:, :step+1], encoder_out, encoder_padding_mask, incremental_state=incremental_state, arch=arch, layer_norm_training=layer_norm_training)\n next_mel = decoder_output[:, -1:, :self.args.audio_num_mel_bins]\n stop_logit = decoder_output[:, -1:, -1]\n stop_logits = torch.cat((stop_logits, stop_logit), dim=1)\n output = torch.cat((output, decoder_output), dim=1)\n for i in range(self.args.dec_layers):\n encdec_attn_logits[i] = torch.cat((encdec_attn_logits[i], attn_logits[i]), dim=2)\n decoder_input[:, step+1] = next_mel[:, -1]\n this_hit_eos = hit_eos[:, -1:]\n this_hit_eos |= torch.sigmoid(stop_logit) > 0.5\n hit_eos = torch.cat((hit_eos, this_hit_eos), dim=1)\n \n loss_output = self.loss(output, target)\n decoder_loss = loss_output['decoder loss']\n stop_loss = loss_output['stop loss']\n total_loss = decoder_loss + stop_loss\n encdec_attn = utils.select_attn(encdec_attn_logits)\n\n src_lengths = utils.move_to_cuda(sample['src_lengths']) - 1 # exclude eos\n if target is not None:\n target_lengths = utils.move_to_cuda(sample['target_lengths'])\n target_padding_mask = target.abs().sum(-1).eq(self.padding_idx)\n else:\n hit_eos = hit_eos[:, 1:]\n target_lengths = (1.0 - hit_eos.float()).sum(dim=-1)\n target_padding_mask = output[:, :, :self.args.audio_num_mel_bins].abs().sum(-1).eq(self.padding_idx)\n src_padding_mask = input.eq(self.padding_idx) | input.eq(self.eos_idx) # also exclude eos\n src_seg_mask = input.eq(self.seg_idx)\n focus_rate = utils.get_focus_rate(encdec_attn, src_padding_mask, target_padding_mask)\n phone_coverage_rate = utils.get_phone_coverage_rate(encdec_attn, src_padding_mask, src_seg_mask, target_padding_mask)\n attn_ks = src_lengths.float() / target_lengths.float()\n diagonal_focus_rate = utils.get_diagonal_focus_rate(encdec_attn, attn_ks, target_lengths, src_padding_mask, target_padding_mask)\n\n ret = {\n 'focus_rate': focus_rate.mean().data,\n 'phone_coverage_rate': phone_coverage_rate.mean().data,\n 'diagonal_focus_rate': diagonal_focus_rate.mean().data,\n 'loss': total_loss.data,\n 'nsamples': sample['nsamples'],\n }\n return ret\n\n def estimate_decode_length(self, input_length):\n return input_length * 5 + 100\n\n def after_infer(self, predictions):\n predictions = utils.unpack_dict_to_list(predictions)\n for num_predictions, prediction in enumerate(predictions):\n for k, v in prediction.items():\n if type(v) is torch.Tensor:\n prediction[k] = v.cpu().numpy()\n\n utt_id = prediction.get('utt_id', None)\n text = prediction.get('text', None)\n src_tokens = prediction.get('src_tokens')\n src_lengths = prediction.get('src_lengths')\n targets = prediction.get(\"targets\", None)\n outputs = prediction[\"outputs\"]\n focus_rate = prediction['focus_rate']\n phone_coverage_rate = prediction['phone_coverage_rate']\n diagonal_focus_rate = prediction['diagonal_focus_rate']\n decoded_inputs_txt = self.phone_encoder.decode(src_tokens, strip_eos=True, strip_padding=True)\n out_wav = audio.inv_mel_spectrogram(outputs.T, self.args)\n prediction['out_wav'] = out_wav\n\n if prediction.get('hit_eos') is None:\n assert prediction.get('predict_lengths') is not None\n prediction['hit_eos'] = np.arange(outputs.shape[0]) >= prediction['predict_lengths']\n \n targets = self.remove_padding(targets) if targets is not None else None # speech\n outputs = self.remove_padding(outputs, prediction.get('hit_eos'))\n if out_wav is not None:\n outputs = self.remove_padding(out_wav, prediction.get('hit_eos'))\n\n prediction['predict_mels'] = self.remove_padding(prediction['predict_mels'], prediction.get('hit_eos'))\n\n if 'encdec_attn' in prediction:\n encdec_attn = prediction['encdec_attn']\n encdec_attn = encdec_attn[encdec_attn.max(-1).sum(-1).argmax(-1)]\n prediction['encdec_attn'] = self.remove_padding(encdec_attn, prediction.get('hit_eos'))\n prediction['encdec_attn'] = prediction['encdec_attn'].T[:src_lengths]\n\n if hasattr(self.args, 'checkpoint_path') and self.args.checkpoint_path is not None:\n steps = os.path.split(self.args.checkpoint_path)[-1].strip().split('checkpoint')[1].split('.pt')[0]\n output_dir = os.path.join(self.args.output_dir, f'generated_{steps}')\n else:\n output_dir = os.path.join(self.args.output_dir, f'generated')\n os.makedirs(output_dir, exist_ok=True)\n\n def log_audio(outputs, prefix, alignment=None, mels=None, decoded_txt=None):\n if len(outputs.shape) == 1:\n wav_out = outputs\n else:\n mel = outputs.reshape(-1, self.args.audio_num_mel_bins)\n wav_out = audio.inv_mel_spectrogram(mel.T, self.args)\n wav_out = audio.inv_preemphasis(wav_out, self.args.preemphasis, self.args.preemphasize)\n #audio.save_wav(wav_out, os.path.join(output_dir, '[W][{}][{}]{}.wav'.format(prefix, utt_id, text.replace(':', '%3A') if text is not None else '')),\n # self.args.audio_sample_rate)\n audio.save_wav(wav_out, os.path.join(output_dir, '[W][{}][{}].wav'.format(prefix, utt_id)),\n self.args.audio_sample_rate)\n #audio.plot_spec(mels.reshape(-1, 80).T,\n # os.path.join(output_dir, '[P][{}][{}]{}.png'.format(prefix, utt_id, text.replace(':', '%3A') if text is not None else '')))\n audio.plot_spec(mels.reshape(-1, 80).T,\n os.path.join(output_dir, '[P][{}][{}].png'.format(prefix, utt_id)))\n if alignment is not None:\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(figsize=(12, 16))\n im = ax.imshow(alignment, aspect='auto', origin='lower',\n interpolation='none')\n decoded_txt = decoded_txt.split(\" \")\n ax.set_yticks(np.arange(len(decoded_txt)))\n ax.set_yticklabels(list(decoded_txt), fontsize=6)\n\n fig.colorbar(im, ax=ax)\n #fig.savefig(os.path.join(output_dir, '[A][{}][{}]{}.png'.format(prefix, utt_id, text.replace(':', '%3A') if text is not None else '')), format='png')\n fig.savefig(os.path.join(output_dir, '[A][{}][{}].png'.format(prefix, utt_id)), format='png')\n plt.close()\n\n #with open(os.path.join(output_dir, '[A][{}][{}]{}.txt'.format(prefix, utt_id, text.replace(':', '%3A') if text is not None else '')), 'w') as f:\n with open(os.path.join(output_dir, '[A][{}][{}].txt'.format(prefix, utt_id)), 'w') as f:\n f.write('fr %.6f pcr %.6f dfr %.6f' % (focus_rate, phone_coverage_rate, diagonal_focus_rate))\n\n log_audio(outputs, 'pred', prediction.get('encdec_attn', None), prediction[\"predict_mels\"], decoded_inputs_txt)\n logging.info('pred_outputs.shape: {}'.format(prediction['predict_mels'].shape))\n if targets is not None:\n log_audio(targets, 'gt', None, targets[:, :self.args.audio_num_mel_bins], decoded_inputs_txt)\n\n logging.info(\">>> {}\".format(num_predictions+1))\n\n\nclass LJSpeechTaskMB(LJSpeechTask):\n\n def __init__(self, args):\n self.args = args\n self.ws = getattr(args, 'ws', False)\n self.arch = getattr(args, 'arch', None)\n self.data_dir = args.data_dir\n self.output_dir = args.output_dir\n self.max_tokens = args.max_tokens\n self.max_sentences = args.max_sentences\n self.max_eval_tokens = args.max_eval_tokens if getattr(args, 'max_eval_tokens', None) is not None else self.max_tokens\n self.max_eval_sentences = args.max_eval_sentences if getattr(args, 'max_eval_sentences', None) is not None else self.max_sentences\n if isinstance(self.arch, str):\n self.arch = list(map(int, self.arch.strip().split()))\n if self.arch is not None:\n self.num_heads = utils.get_num_heads(self.arch[args.enc_layers*2:])\n \n def setup_task(self, model_state_dict=None, optimizer_state_dict=None, ws=False):\n self.phone_encoder = self.build_phone_encoder(self.data_dir)\n self.train_dataset, self.valid_dataset, self.test_dataset = self.build_dataset(self.data_dir, self.args.raw_data)\n self.train_queue = self.build_queue(self.train_dataset, True, self.max_tokens, self.max_sentences)\n self.valid_queue = self.build_queue(self.valid_dataset, False, self.max_eval_tokens, self.max_eval_sentences)\n self.test_queue = self.build_queue(self.test_dataset, False, self.max_eval_tokens, self.max_eval_sentences)\n self.model = self.build_model(arch=self.arch, model_state_dict=model_state_dict, ws=self.ws | ws)\n self.optimizer = self.build_optimizer(optimizer_state_dict=optimizer_state_dict)\n self.scheduler = self.build_scheduler()\n self.padding_idx = self.phone_encoder.pad()\n self.eos_idx = self.phone_encoder.eos()\n self.seg_idx = self.phone_encoder.seg()\n #if torch.cuda.device_count() > 1:\n # torch.distributed.init_process_group(backend='nccl')\n\n def build_model(self, arch=None, model_state_dict=None, ws=False):\n if arch is None:\n arch = self.arch\n assert (arch is not None) ^ ws, 'arch and ws are mutual'\n if ws:\n _model = model_ws.NASNetwork(self.args, self.phone_encoder)\n else:\n _model = model.NASNetwork(self.args, arch, self.phone_encoder)\n if model_state_dict is not None:\n _model.load_state_dict(model_state_dict)\n if torch.cuda.is_available():\n if torch.cuda.device_count() > 1:\n _model = nn.DataParallel(_model)\n _model = _model.cuda()\n return _model\n\n def infer_batch(self, model, sample):\n if model is None:\n model = self.model\n \n model.eval()\n if type(model) is nn.DataParallel:\n model = model.module\n with torch.no_grad():\n input = utils.move_to_cuda(sample['src_tokens'])\n prev_output_mels = utils.move_to_cuda(sample['prev_output_mels'])\n target = utils.move_to_cuda(sample['targets'])\n bsz = input.size(0)\n max_input_len = input.size(1)\n\n if self.args.gta:\n decode_length = target.size(1)\n else:\n decode_length = self.estimate_decode_length(max_input_len)\n \n encoder_outputs = model.forward_encoder(input)\n encoder_out = encoder_outputs['encoder_out']\n encoder_padding_mask = encoder_outputs['encoder_padding_mask']\n\n hit_eos = input.new(bsz, 1).fill_(0).bool()\n stop_logits = input.new(bsz, 0).fill_(0).float()\n stage = 0\n decoder_input = input.new(bsz, decode_length+1, self.args.audio_num_mel_bins).fill_(self.padding_idx).float()\n decoded_mel = input.new(bsz, 0, self.args.audio_num_mel_bins).fill_(self.padding_idx).float()\n encdec_attn_logits = []\n\n for i in range(self.args.dec_layers):\n encdec_attn_logits.append(input.new(bsz, self.num_heads[2*i], 0, max_input_len).fill_(0).float())\n encdec_attn_logits.append(input.new(bsz, self.num_heads[2*i+1], 0, max_input_len).fill_(0).float())\n #encdec_attn_logits = input.new(bsz, self.args.dec_layers, 0, max_input_len).fill_(0).float()\n attn_pos = input.new(bsz).fill_(0).int()\n use_masks = []\n for i in range(self.args.dec_layers):\n use_masks.append(input.new(self.num_heads[2*i]).fill_(0).float())\n use_masks.append(input.new(self.num_heads[2*i+1]).fill_(0).float())\n #use_masks = input.new(self.args.dec_layers*2).fill_(0).float()\n \n incremental_state = {}\n step = 0\n if self.args.attn_constraint:\n for i, layer in enumerate(model.decoder.layers):\n enc_dec_attn_constraint_mask = input.new(bsz, self.num_heads[2*i], max_input_len).fill_(0).int()\n layer.set_left_buffer('enc_dec_attn_constraint_mask', enc_dec_attn_constraint_mask, incremental_state)\n enc_dec_attn_constraint_mask = input.new(bsz, self.num_heads[2*i+1], max_input_len).fill_(0).int()\n layer.set_right_buffer('enc_dec_attn_constraint_mask', enc_dec_attn_constraint_mask, incremental_state)\n while True:\n #import pdb; pdb.set_trace()\n if self.is_finished(step, decode_length, hit_eos, stage):\n break\n \n if self.args.gta:\n decoder_input[:, step] = prev_output_mels[:, step]\n\n decoder_output, attn_logits = model.forward_decoder(decoder_input[:, :step+1], encoder_out, encoder_padding_mask, incremental_state=incremental_state)\n next_mel = decoder_output[:, -1:, :self.args.audio_num_mel_bins]\n stop_logit = decoder_output[:, -1:, -1]\n stop_logits = torch.cat((stop_logits, stop_logit), dim=1)\n decoded_mel = torch.cat((decoded_mel, next_mel), dim=1)\n for i in range(self.args.dec_layers):\n encdec_attn_logits[2*i] = torch.cat((encdec_attn_logits[2*i], attn_logits[2*i]), dim=2)\n encdec_attn_logits[2*i+1] = torch.cat((encdec_attn_logits[2*i+1], attn_logits[2*i+1]), dim=2)\n step += 1\n\n this_hit_eos = hit_eos[:, -1:]\n if self.args.attn_constraint:\n this_hit_eos |= (attn_pos[:, None] >= (encoder_padding_mask < 1.0).float().sum(dim=-1, keepdim=True).int() - 5) & (torch.sigmoid(stop_logit) > 0.5)\n else:\n this_hit_eos |= torch.sigmoid(stop_logit) > 0.5\n hit_eos = torch.cat((hit_eos, this_hit_eos), dim=1)\n\n \n if not self.args.gta:\n decoder_input[:, step] = next_mel[:, -1]\n\n if self.args.attn_constraint:\n stage_change_step = 50\n all_prev_weights = []\n for i in range(self.args.dec_layers):\n all_prev_weights.append(torch.softmax(encdec_attn_logits[2*i], dim=-1)) # bsz x head x L x L_kv\n all_prev_weights.append(torch.softmax(encdec_attn_logits[2*i+1], dim=-1))\n\n # if the stage should change\n next_stage = (step == stage_change_step) | (step >= decode_length)\n if not self.args.gta:\n next_stage |= (hit_eos[:, -1].sum() == hit_eos.size(0)).cpu().numpy()\n next_stage &= (stage == 0)\n\n # choose the diagonal attention\n if next_stage:#TODO\n use_masks = []\n for i in range(self.args.dec_layers):\n use_mask = (all_prev_weights[2*i][:, :, :step].max(dim=-1).values.mean(dim=(0, 2)) > 0.6).float() # [head]\n use_masks.append(use_mask)\n use_mask = (all_prev_weights[2*i+1][:, :, :step].max(dim=-1).values.mean(dim=(0, 2)) > 0.6).float() # [head]\n use_masks.append(use_mask)\n attn_pos = input.new(bsz).fill_(0).int()\n\n # reset when the stage changes\n for i, layer in enumerate(model.decoder.layers):\n layer.clear_left_buffer(input, encoder_out, encoder_padding_mask, incremental_state)\n layer.clear_right_buffer(input, encoder_out, encoder_padding_mask, incremental_state)\n \n encdec_attn_logits = []\n for i in range(self.args.dec_layers):\n encdec_attn_logits.append(input.new(bsz, self.num_heads[2*i], 0, max_input_len).fill_(0).float())\n encdec_attn_logits.append(input.new(bsz, self.num_heads[2*i+1], 0, max_input_len).fill_(0).float())\n decoded_mel = input.new(bsz, 0, self.args.audio_num_mel_bins).fill_(0).float()\n decoder_input = input.new(bsz, decode_length+1, self.args.audio_num_mel_bins).fill_(0).float()\n hit_eos = input.new(bsz, 1).fill_(0).bool()\n stage = stage + 1\n step = 0\n\n prev_weights_mask1 = utils.sequence_mask(torch.max(attn_pos - 1, attn_pos.new(attn_pos.size()).fill_(0)).float(), encdec_attn_logits[0].size(-1)).float() # bsz x L_kv\n prev_weights_mask2 = 1.0 - utils.sequence_mask(attn_pos.float() + 4, encdec_attn_logits[0].size(-1)).float() # bsz x L_kv\n enc_dec_attn_constraint_masks = []\n for i in range(self.args.dec_layers):\n mask = (prev_weights_mask1 + prev_weights_mask2)[:, None, :] * use_masks[2*i][None, :, None] # bsz x head x L_kv\n enc_dec_attn_constraint_masks.append(mask)\n mask = (prev_weights_mask1 + prev_weights_mask2)[:, None, :] * use_masks[2*i+1][None, :, None] # bsz x head x L_kv\n enc_dec_attn_constraint_masks.append(mask)\n #enc_dec_attn_constraint_masks = (prev_weights_mask1 + prev_weights_mask2)[:, None, None, :] * use_masks[None, :, None, None] # bsz x (n_layers x head) x 1 x L_kv\n\n for i, layer in enumerate(model.decoder.layers):\n enc_dec_attn_constraint_mask = enc_dec_attn_constraint_masks[2*i]\n layer.set_left_buffer('enc_dec_attn_constraint_mask', enc_dec_attn_constraint_mask, incremental_state)\n enc_dec_attn_constraint_mask = enc_dec_attn_constraint_masks[2*i+1]\n layer.set_right_buffer('enc_dec_attn_constraint_mask', enc_dec_attn_constraint_mask, incremental_state)\n\n def should_move_on():\n prev_weights = []\n for i in range(self.args.dec_layers):\n prev_weight = (all_prev_weights[2*i] * use_masks[2*i][None, :, None, None]).sum(dim=1)\n prev_weights.append(prev_weight)\n prev_weight = (all_prev_weights[2*i+1] * use_masks[2*i+1][None, :, None, None]).sum(dim=1)\n prev_weights.append(prev_weight)\n prev_weights = sum(prev_weights) / sum([mask.sum() for mask in use_masks])\n #prev_weights = (prev_weights * use_masks[None, :, None, None]).sum(dim=1) / use_masks.sum()\n move_on = (prev_weights[:, -3:].mean(dim=1).gather(1, attn_pos[:, None].long())).squeeze() < 0.7\n move_on &= torch.argmax(prev_weights[:, -1], -1) > attn_pos.long()\n return attn_pos + move_on.int()\n \n if step > 3 and stage == 1:\n attn_pos = should_move_on()\n\n #size = encdec_attn_logits.size()\n #encdec_attn_logits = encdec_attn_logits.view(size[0], size[1]*size[2], size[3], size[4])\n encdec_attn = utils.select_attn(encdec_attn_logits)\n\n src_lengths = utils.move_to_cuda(sample['src_lengths']) - 1 # exclude eos\n target_lengths = (1.0 - hit_eos[:, 1:].float()).sum(dim=-1) + 1\n src_padding_mask = input.eq(self.padding_idx) | input.eq(self.eos_idx) # also exclude eos\n src_seg_mask = input.eq(self.seg_idx)\n target_padding_mask = decoded_mel.abs().sum(-1).eq(self.padding_idx)\n focus_rate = utils.get_focus_rate(encdec_attn, src_padding_mask, target_padding_mask)\n phone_coverage_rate = utils.get_phone_coverage_rate(encdec_attn, src_padding_mask, src_seg_mask, target_padding_mask)\n attn_ks = src_lengths.float() / target_lengths.float()\n diagonal_focus_rate = utils.get_diagonal_focus_rate(encdec_attn, attn_ks, target_lengths, src_padding_mask, target_padding_mask)\n\n return decoded_mel, encdec_attn.unsqueeze(1), hit_eos, stop_logits, focus_rate, phone_coverage_rate, diagonal_focus_rate\n \n def valid_for_search_batch(self, model, sample, gta=False, arch=None, layer_norm_training=False):\n if model is None:\n model = self.model\n model.eval()\n if type(model) is nn.DataParallel:\n model = model.module\n\n with torch.no_grad():\n input = utils.move_to_cuda(sample['src_tokens'])\n prev_output_mels = utils.move_to_cuda(sample['prev_output_mels'])\n target = utils.move_to_cuda(sample['targets'])\n bsz = input.size(0)\n max_input_len = input.size(1)\n\n if gta:\n output, attn_logits = model(input, prev_output_mels, target, arch=arch, layer_norm_training=layer_norm_training)\n encdec_attn_logits = attn_logits\n else:\n decode_length = target.size(1) if target is not None else self.estimate_decode_length(input.size(1))\n encoder_outputs = model.forward_encoder(input, arch=arch, layer_norm_training=layer_norm_training)\n encoder_out = encoder_outputs['encoder_out']\n encoder_padding_mask = encoder_outputs['encoder_padding_mask']\n decoder_input = input.new(bsz, decode_length+1, self.args.audio_num_mel_bins).fill_(self.padding_idx).float()\n output = input.new(bsz, 0, self.args.audio_num_mel_bins+1).fill_(self.padding_idx).float()\n hit_eos = input.new(bsz, 1).fill_(0).bool()\n stop_logits = input.new(bsz, 0).fill_(0).float()\n encdec_attn_logits = []\n if arch is not None: # in ws mode, arch is provided at run\n num_heads = utils.get_num_heads(arch[2*self.args.enc_layers:])\n else: # in general mode, arch is defined at the begining\n num_heads = self.num_heads\n for i in range(self.args.dec_layers):\n encdec_attn_logits.append(input.new(bsz, num_heads[2*i], 0, max_input_len).fill_(0).float())\n encdec_attn_logits.append(input.new(bsz, num_heads[2*i+1], 0, max_input_len).fill_(0).float())\n incremental_state = {}\n for step in range(decode_length):\n decoder_output, attn_logits = model.forward_decoder(decoder_input[:, :step+1], encoder_out, encoder_padding_mask, incremental_state=incremental_state, arch=arch, layer_norm_training=layer_norm_training)\n next_mel = decoder_output[:, -1:, :self.args.audio_num_mel_bins]\n stop_logit = decoder_output[:, -1:, -1]\n stop_logits = torch.cat((stop_logits, stop_logit), dim=1)\n output = torch.cat((output, decoder_output), dim=1)\n for i in range(self.args.dec_layers):\n encdec_attn_logits[2*i] = torch.cat((encdec_attn_logits[2*i], attn_logits[2*i]), dim=2)\n encdec_attn_logits[2*i+1] = torch.cat((encdec_attn_logits[2*i+1], attn_logits[2*i+1]), dim=2)\n decoder_input[:, step+1] = next_mel[:, -1]\n this_hit_eos = hit_eos[:, -1:]\n this_hit_eos |= torch.sigmoid(stop_logit) > 0.5\n hit_eos = torch.cat((hit_eos, this_hit_eos), dim=1)\n \n loss_output = self.loss(output, target)\n decoder_loss = loss_output['decoder loss']\n stop_loss = loss_output['stop loss']\n total_loss = decoder_loss + stop_loss\n encdec_attn = utils.select_attn(encdec_attn_logits)\n\n src_lengths = utils.move_to_cuda(sample['src_lengths']) - 1 # exclude eos\n src_lengths = utils.move_to_cuda(sample['src_lengths']) - 1 # exclude eos\n if target is not None:\n target_lengths = utils.move_to_cuda(sample['target_lengths'])\n target_padding_mask = target.abs().sum(-1).eq(self.padding_idx)\n else:\n hit_eos = hit_eos[:, 1:]\n target_lengths = (1.0 - hit_eos.float()).sum(dim=-1)\n target_padding_mask = output[:, :, :self.args.audio_num_mel_bins].abs().sum(-1).eq(self.padding_idx)\n src_padding_mask = input.eq(self.padding_idx) | input.eq(self.eos_idx) # also exclude eos\n src_seg_mask = input.eq(self.seg_idx)\n focus_rate = utils.get_focus_rate(encdec_attn, src_padding_mask, target_padding_mask)\n phone_coverage_rate = utils.get_phone_coverage_rate(encdec_attn, src_padding_mask, src_seg_mask, target_padding_mask)\n attn_ks = src_lengths.float() / target_lengths.float()\n diagonal_focus_rate = utils.get_diagonal_focus_rate(encdec_attn, attn_ks, target_lengths, src_padding_mask, target_padding_mask)\n\n ret = {\n 'focus_rate': focus_rate.mean().data,\n 'phone_coverage_rate': phone_coverage_rate.mean().data,\n 'diagonal_focus_rate': diagonal_focus_rate.mean().data,\n 'loss': total_loss.data,\n 'nsamples': sample['nsamples'],\n }\n return ret\n",
"import os\nimport sys\nimport glob\nimport time\nimport copy\nimport random\nimport numpy as np\nimport torch\nimport utils\nimport logging\nimport argparse\nimport torch.backends.cudnn as cudnn\nimport tasks\n\nparser = argparse.ArgumentParser(description='TTS')\n\n# Basic model parameters.\nparser.add_argument('--debug', action='store_true')\nparser.add_argument('--data_dir', type=str, default='data/LJSpeech-1.1')\nparser.add_argument('--raw_data', action='store_true')\nparser.add_argument('--split', type=str, default='test', choices=['train', 'valid', 'test'])\nparser.add_argument('--n', type=int, default=None)\nparser.add_argument('--output_dir', type=str, default='outputs')\nparser.add_argument('--checkpoint_path', type=str, default=None)\nparser.add_argument('--max_tokens', type=int, default=31250)\nparser.add_argument('--max_sentences', type=int, default=None)\nparser.add_argument('--gta', action='store_true')\nparser.add_argument('--attn_constraint', action='store_true')\nparser.add_argument('--seed', type=int, default=1)\nargs = parser.parse_args()\n\n\nlog_format = '%(asctime)s %(message)s'\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt='%m/%d %I:%M:%S %p')\n\n\ndef main(args):\n if not torch.cuda.is_available():\n logging.info('No gpu device available')\n sys.exit(1)\n \n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n cudnn.enabled = True\n cudnn.benchmark = True\n cudnn.deterministic = True\n\n tasks.set_ljspeech_hparams(args)\n logging.info(\"args = %s\", args)\n \n saved_args, model_state_dict, epoch, global_step, optimizer_state_dict, best_valid_loss = utils.load(args.checkpoint_path)\n if any([saved_args, model_state_dict, epoch, global_step, optimizer_state_dict]):\n logging.info('Found exist checkpoint with epoch %d and updates %d', epoch, global_step)\n\n if saved_args is not None:\n saved_args.__dict__.update(args.__dict__)\n args = saved_args\n task = tasks.LJSpeechTask(args)\n task.setup_task(model_state_dict, optimizer_state_dict)\n task.infer(split=args.split, n=args.n)\n \n\nif __name__ == '__main__':\n main(args)"
] | [
[
"torch.cat",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.where",
"torch.softmax",
"numpy.arange",
"torch.nn.functional.relu",
"matplotlib.pyplot.close",
"numpy.load",
"torch.LongTensor",
"torch.sigmoid",
"torch.nn.functional.mse_loss",
"torch.cuda.device_count",
"numpy.array",
"numpy.sum",
"numpy.abs",
"torch.Tensor",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle",
"torch.nn.DataParallel",
"torch.argmax"
],
[
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
elletech/practice_manim | [
"83671e9e801490ce84100da3a684e369860fda37",
"83671e9e801490ce84100da3a684e369860fda37"
] | [
"manim/utils/hashing.py",
"tests/utils/GraphicalUnitTester.py"
] | [
"\"\"\"Utilities for scene caching.\"\"\"\n\nimport json\nimport zlib\nimport inspect\nimport copy\nimport numpy as np\nfrom types import ModuleType, MappingProxyType, FunctionType, MethodType\nfrom time import perf_counter\n\nfrom .. import logger\n\nALREADY_PROCESSED_ID = {}\n\n\nclass CustomEncoder(json.JSONEncoder):\n def default(self, obj):\n \"\"\"\n This method is used to serialize objects to JSON format.\n\n If obj is a function, then it will return a dict with two keys : 'code', for the code source, and 'nonlocals' for all nonlocalsvalues. (including nonlocals functions, that will be serialized as this is recursive.)\n if obj is a np.darray, it converts it into a list.\n if obj is an object with __dict__ attribute, it returns its __dict__.\n Else, will let the JSONEncoder do the stuff, and throw an error if the type is not suitable for JSONEncoder.\n\n Parameters\n ----------\n obj : Any\n Arbitrary object to convert\n\n Returns\n -------\n Any\n Python object that JSON encoder will recognize\n\n \"\"\"\n if not (isinstance(obj, ModuleType)) and isinstance(\n obj, (MethodType, FunctionType)\n ):\n cvars = inspect.getclosurevars(obj)\n cvardict = {**copy.copy(cvars.globals), **copy.copy(cvars.nonlocals)}\n for i in list(cvardict):\n # NOTE : All module types objects are removed, because otherwise it throws ValueError: Circular reference detected if not. TODO\n if isinstance(cvardict[i], ModuleType):\n del cvardict[i]\n try:\n code = inspect.getsource(obj)\n except OSError:\n # This happens when rendering videos included in the documentation\n # within doctests and should be replaced by a solution avoiding\n # hash collision (due to the same, empty, code strings) at some point.\n # See https://github.com/ManimCommunity/manim/pull/402.\n code = \"\"\n return self._check_iterable({\"code\": code, \"nonlocals\": cvardict})\n elif isinstance(obj, np.ndarray):\n if obj.size > 1000:\n obj = np.resize(obj, (100, 100))\n return f\"TRUNCATED ARRAY: {repr(obj)}\"\n # We return the repr and not a list to avoid the JsonEncoder to iterate over it.\n return repr(obj)\n elif hasattr(obj, \"__dict__\"):\n temp = getattr(obj, \"__dict__\")\n # MappingProxy is scene-caching nightmare. It contains all of the object methods and attributes. We skip it as the mechanism will at some point process the object, but instancied\n # Indeed, there is certainly no case where scene-caching will recieve only a non instancied object, as this is never used in the library or encouraged to be used user-side.\n if isinstance(temp, MappingProxyType):\n return \"MappingProxy\"\n return self._check_iterable(temp)\n elif isinstance(obj, np.uint8):\n return int(obj)\n\n return f\"Unsupported type for serializing -> {str(type(obj))}\"\n\n def _handle_already_processed(self, obj):\n \"\"\"Handle if an object has been already processed by checking the id of the object.\n\n This prevents the mechanism to handle an object several times, and is used to prevent any circular reference.\n\n Parameters\n ----------\n obj : Any\n The obj to check.\n\n Returns\n -------\n Any\n \"already_processed\" string if it has been processed, otherwise obj.\n \"\"\"\n global ALREADY_PROCESSED_ID\n if id(obj) in ALREADY_PROCESSED_ID:\n return \"already_processed\"\n if not isinstance(obj, (str, int, bool, float)):\n ALREADY_PROCESSED_ID[id(obj)] = obj\n return obj\n\n def _check_iterable(self, iterable):\n \"\"\"Check for circular reference at each iterable that will go through the JSONEncoder, as well as key of the wrong format.\n\n If a key with a bad format is found (i.e not a int, string, or float), it gets replaced byt its hash using the same process implemented here.\n If a circular reference is found within the iterable, it will be replaced by the string \"already processed\".\n\n Parameters\n ----------\n iterable : Iterable[Any]\n The iterable to check.\n \"\"\"\n\n def _key_to_hash(key):\n return zlib.crc32(json.dumps(key, cls=CustomEncoder).encode())\n\n def _iter_check_list(lst):\n # We have to make a copy, as we don't want to touch to the original list\n # A deepcopy isn't necessary as it is already recursive.\n lst_copy = copy.copy(lst)\n if isinstance(lst, tuple):\n # NOTE: Sometimes a tuple can pass through this function. As a tuple\n # is immutable, we convert it to a list to be able to modify it.\n # It's ok as it is a copy.\n lst_copy = list(lst_copy)\n for i, el in enumerate(lst):\n if not isinstance(lst, tuple):\n lst_copy[i] = self._handle_already_processed(\n el\n ) # ISSUE here, because of copy.\n if isinstance(el, (list, tuple)):\n lst_copy[i] = _iter_check_list(el)\n elif isinstance(el, dict):\n lst_copy[i] = _iter_check_dict(el)\n return lst_copy\n\n def _iter_check_dict(dct):\n # We have to make a copy, as we don't want to touch to the original dict\n # A deepcopy isn't necessary as it is already recursive.\n dct_copy = copy.copy(dct)\n for k, v in dct.items():\n dct_copy[k] = self._handle_already_processed(v)\n # We check if the k is of the right format (supporter by Json)\n if not isinstance(k, (str, int, float, bool)) and k is not None:\n k_new = _key_to_hash(k)\n # We delete the value coupled with the old key, as the value is now coupled with the new key.\n dct_copy[k_new] = dct_copy[k]\n del dct_copy[k]\n else:\n k_new = k\n if isinstance(v, dict):\n dct_copy[k_new] = _iter_check_dict(v)\n elif isinstance(v, (list, tuple)):\n dct_copy[k_new] = _iter_check_list(v)\n return dct_copy\n\n if isinstance(iterable, (list, tuple)):\n return _iter_check_list(iterable)\n elif isinstance(iterable, dict):\n return _iter_check_dict(iterable)\n\n def encode(self, obj):\n \"\"\"Overriding of :meth:`JSONEncoder.encode`, to make our own process.\n\n Parameters\n ----------\n obj: Any\n The object to encode in JSON.\n\n Returns\n -------\n :class:`str`\n The object encoder with the standard json process.\n \"\"\"\n # We need to mark as already processed the first object to go in the process,\n # As after, only objects that come from iterables will be marked as such.\n global ALREADY_PROCESSED_ID\n ALREADY_PROCESSED_ID[id(obj)] = obj\n if isinstance(obj, (dict, list, tuple)):\n return super().encode(self._check_iterable(obj))\n return super().encode(obj)\n\n\ndef get_json(obj):\n \"\"\"Recursively serialize `object` to JSON using the :class:`CustomEncoder` class.\n\n Parameters\n ----------\n dict_config : :class:`dict`\n The dict to flatten\n\n Returns\n -------\n :class:`str`\n The flattened object\n \"\"\"\n return json.dumps(obj, cls=CustomEncoder)\n\n\ndef get_camera_dict_for_hashing(camera_object):\n \"\"\"Remove some keys from `camera_object.__dict__` that are very heavy and useless for the caching functionality.\n\n Parameters\n ----------\n camera_object : :class:`~.Camera`\n The camera object used in the scene\n\n Returns\n -------\n :class:`dict`\n `Camera.__dict__` but cleaned.\n \"\"\"\n camera_object_dict = copy.copy(camera_object.__dict__)\n # We have to clean a little bit of camera_dict, as pixel_array and background are two very big numpy arrays.\n # They are not essential to caching process.\n # We also have to remove pixel_array_to_cairo_context as it contains used memory adress (set randomly). See l.516 get_cached_cairo_context in camera.py\n for to_clean in [\"background\", \"pixel_array\", \"pixel_array_to_cairo_context\"]:\n camera_object_dict.pop(to_clean, None)\n return camera_object_dict\n\n\ndef get_hash_from_play_call(\n scene_object, camera_object, animations_list, current_mobjects_list\n):\n \"\"\"Take the list of animations and a list of mobjects and output their hashes. This is meant to be used for `scene.play` function.\n\n Parameters\n -----------\n scene_object : :class:`~.Scene`\n The scene object.\n\n camera_object : :class:`~.Camera`\n The camera object used in the scene.\n\n animations_list : Iterable[:class:`~.Animation`]\n The list of animations.\n\n current_mobjects_list : Iterable[:class:`~.Mobject`]\n The list of mobjects.\n\n Returns\n -------\n :class:`str`\n A string concatenation of the respective hashes of `camera_object`, `animations_list` and `current_mobjects_list`, separated by `_`.\n \"\"\"\n logger.debug(\"Hashing ...\")\n global ALREADY_PROCESSED_ID\n # We add the scene object within the ALREADY_PROCESSED_ID, as we don't want to process because pretty much all of its attributes will be soon or later processed (in one of the three hashes).\n ALREADY_PROCESSED_ID = {id(scene_object): scene_object}\n t_start = perf_counter()\n camera_json = get_json(get_camera_dict_for_hashing(camera_object))\n animations_list_json = [get_json(x) for x in sorted(animations_list, key=str)]\n current_mobjects_list_json = [get_json(x) for x in current_mobjects_list]\n hash_camera, hash_animations, hash_current_mobjects = [\n zlib.crc32(repr(json_val).encode())\n for json_val in [camera_json, animations_list_json, current_mobjects_list_json]\n ]\n t_end = perf_counter()\n logger.debug(\"Hashing done in %(time)s s.\", {\"time\": str(t_end - t_start)[:8]})\n hash_complete = f\"{hash_camera}_{hash_animations}_{hash_current_mobjects}\"\n # This will reset ALREADY_PROCESSED_ID as all the hashing processus is finished.\n ALREADY_PROCESSED_ID = {}\n logger.debug(\"Hash generated : %(h)s\", {\"h\": hash_complete})\n return hash_complete\n\n\ndef get_hash_from_wait_call(\n scene_object,\n camera_object,\n wait_time,\n stop_condition_function,\n current_mobjects_list,\n):\n \"\"\"Take a wait time, a boolean function as a stop condition and a list of mobjects, and then output their individual hashes. This is meant to be used for `scene.wait` function.\n\n Parameters\n -----------\n scene_object : :class:`~.Scene`\n The scene object.\n camera_object : :class:`~.Camera`\n The camera object.\n wait_time : :class:`float`\n The time to wait\n stop_condition_function : Callable[[...], bool]\n Boolean function used as a stop_condition in `wait`.\n\n Returns\n -------\n :class:`str`\n A concatenation of the respective hashes of `animations_list and `current_mobjects_list`, separated by `_`.\n \"\"\"\n logger.debug(\"Hashing ...\")\n t_start = perf_counter()\n global ALREADY_PROCESSED_ID\n # We add the scene object within the ALREADY_PROCESSED_ID, as we don't want to process because pretty much all of its attributes will be soon or later processed (in one of the three hashes).\n ALREADY_PROCESSED_ID = {id(scene_object): scene_object}\n camera_json = get_json(get_camera_dict_for_hashing(camera_object))\n current_mobjects_list_json = [get_json(x) for x in current_mobjects_list]\n hash_current_mobjects = zlib.crc32(repr(current_mobjects_list_json).encode())\n hash_camera = zlib.crc32(repr(camera_json).encode())\n if stop_condition_function is not None:\n hash_function = zlib.crc32(get_json(stop_condition_function).encode())\n # This will reset ALREADY_PROCESSED_ID as all the hashing processus is finished.\n ALREADY_PROCESSED_ID = {}\n t_end = perf_counter()\n logger.debug(\"Hashing done in %(time)s s.\", {\"time\": str(t_end - t_start)[:8]})\n hash_complete = f\"{hash_camera}_{str(wait_time).replace('.', '-')}{hash_function}_{hash_current_mobjects}\"\n logger.debug(\"Hash generated : %(h)s\", {\"h\": hash_complete})\n return hash_complete\n ALREADY_PROCESSED_ID = {}\n t_end = perf_counter()\n logger.debug(\"Hashing done in %(time)s s.\", {\"time\": str(t_end - t_start)[:8]})\n hash_complete = (\n f\"{hash_camera}_{str(wait_time).replace('.', '-')}_{hash_current_mobjects}\"\n )\n\n logger.debug(\"Hash generated : %(h)s\", {\"h\": hash_complete})\n return hash_complete\n",
"import os\nimport logging\nimport numpy as np\n\nfrom manim import config, tempconfig\n\n\nclass GraphicalUnitTester:\n \"\"\"Class used to test the animations.\n\n Parameters\n ----------\n scene_object : :class:`~.Scene`\n The scene to be tested\n config_scene : :class:`dict`\n The configuration of the scene\n module_tested : :class:`str`\n The name of the module tested. i.e if we are testing functions of creation.py, the module will be \"creation\"\n\n Attributes\n -----------\n path_tests_medias_cache : : class:`str`\n Path to 'media' folder generated by manim. This folder contains cached data used by some tests.\n path_control_data : :class:`str`\n Path to the data used for the tests (i.e the pre-rendered frames).\n scene : :class:`Scene`\n The scene tested\n \"\"\"\n\n def __init__(\n self,\n scene_object,\n module_tested,\n tmpdir,\n ):\n # Disable the the logs, (--quiet is broken) TODO\n logging.disable(logging.CRITICAL)\n tests_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n self.path_tests_medias_cache = os.path.join(\n tmpdir,\n \"test_graphical_units\",\n \"tests_cache\",\n module_tested,\n scene_object.__name__,\n )\n self.path_control_data = os.path.join(\n tests_directory, \"control_data\", \"graphical_units_data\", module_tested\n )\n\n # IMPORTANT NOTE : The graphical units tests don't use for now any\n # custom manim.cfg, since it is impossible to manually select a\n # manim.cfg from a python file. (see issue #293)\n config[\"text_dir\"] = os.path.join(self.path_tests_medias_cache, \"Text\")\n config[\"tex_dir\"] = os.path.join(self.path_tests_medias_cache, \"Tex\")\n\n config[\"disable_caching\"] = True\n config[\"quality\"] = \"low_quality\"\n\n for dir_temp in [\n self.path_tests_medias_cache,\n config[\"text_dir\"],\n config[\"tex_dir\"],\n ]:\n os.makedirs(dir_temp)\n\n with tempconfig({\"dry_run\": True}):\n self.scene = scene_object(skip_animations=True)\n self.scene.render()\n\n def _load_data(self):\n \"\"\"Load the np.array of the last frame of a pre-rendered scene. If not found, throw FileNotFoundError.\n\n Returns\n -------\n :class:`numpy.array`\n The pre-rendered frame.\n \"\"\"\n frame_data_path = os.path.join(\n os.path.join(self.path_control_data, f\"{self.scene}.npz\")\n )\n return np.load(frame_data_path)[\"frame_data\"]\n\n def _show_diff_helper(self, frame_data, expected_frame_data):\n \"\"\"Will visually display with matplotlib differences between frame generared and the one expected.\"\"\"\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n\n gs = gridspec.GridSpec(2, 2)\n fig = plt.figure()\n fig.suptitle(f\"Test for {str(self.scene).replace('Test', '')}\", fontsize=16)\n\n ax = fig.add_subplot(gs[0, 0])\n ax.imshow(frame_data)\n ax.set_title(\"Generated :\")\n\n ax = fig.add_subplot(gs[0, 1])\n ax.imshow(expected_frame_data)\n ax.set_title(\"Expected :\")\n\n ax = fig.add_subplot(gs[1, :])\n diff_im = expected_frame_data.copy()\n diff_im = np.where(\n frame_data != np.array([0, 0, 0, 255]),\n np.array([255, 0, 0, 255], dtype=\"uint8\"),\n np.array([0, 0, 0, 255], dtype=\"uint8\"),\n ) # Set the points of the frame generated to red.\n np.putmask(\n diff_im,\n expected_frame_data != np.array([0, 0, 0, 255], dtype=\"uint8\"),\n np.array([0, 255, 0, 255], dtype=\"uint8\"),\n ) # Set the points of the frame generated to green.\n ax.imshow(diff_im, interpolation=\"nearest\")\n ax.set_title(\"Differences summary : (red = got, green = expected)\")\n\n plt.show()\n plt.savefig(f\"{self.scene}.png\")\n\n def test(self, show_diff=False):\n \"\"\"Compare pre-rendered frame to the frame rendered during the test.\"\"\"\n frame_data = self.scene.renderer.get_frame()\n expected_frame_data = self._load_data()\n\n assert frame_data.shape == expected_frame_data.shape, (\n \"The frames have different shape:\"\n + f\"\\nexpected_frame_data.shape = {expected_frame_data.shape}\"\n + f\"\\nframe_data.shape = {frame_data.shape}\"\n )\n\n test_result = np.array_equal(frame_data, expected_frame_data)\n if not test_result:\n incorrect_indices = np.argwhere(frame_data != expected_frame_data)\n first_incorrect_index = incorrect_indices[0][:2]\n first_incorrect_point = frame_data[tuple(first_incorrect_index)]\n expected_point = expected_frame_data[tuple(first_incorrect_index)]\n if show_diff:\n self._show_diff_helper(frame_data, expected_frame_data)\n assert test_result, (\n f\"The frames don't match. {str(self.scene).replace('Test', '')} has been modified.\"\n + \"\\nPlease ignore if it was intended.\"\n + f\"\\nFirst unmatched index is at {first_incorrect_index}: {first_incorrect_point} != {expected_point}\"\n )\n"
] | [
[
"numpy.resize"
],
[
"numpy.array_equal",
"matplotlib.pyplot.savefig",
"numpy.argwhere",
"matplotlib.gridspec.GridSpec",
"numpy.load",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
viniciusd/DCO1008---Digital-Signal-Processing | [
"a2756cb577bcdaf8852e2ef766732799cde7f5a3"
] | [
"projeto2/question2.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.io import loadmat\nfrom scipy.interpolate import interp1d\n\n\ndef _load_signal(name):\n try:\n sig = loadmat(name)\n except FileNotFoundError:\n raise\n condition = name.split('_')[-1]\n sig['t'] = sig.pop('t_%s' % condition).flatten()\n sig['hr'] = sig.pop('hr_%s' % condition).flatten()\n # Removes DC component\n sig['hr'] -= np.mean(sig['hr'])\n return sig\n\n\ndef _signal_fix_sample_rate(sig, rate):\n new_sig = dict()\n\n interp, ts = interp1d(sig['t'], sig['hr']), 1/rate\n\n new_sig['t'] = np.arange(sig['t'][0], sig['t'][-1], ts)\n new_sig['hr'] = interp(new_sig['t'])\n return new_sig\n\n\ndef signal_autocorr_plot(name):\n sig = _load_signal(name)\n plt.figure()\n plt.acorr(sig['hr'], usevlines=False, maxlags=35)\n plt.xlabel('Lags')\n plt.ylabel('Autocorrelation')\n plt.savefig('q2_acorr_%s.png' % name)\n\n\ndef signal_psd_plot(name):\n rate = 100\n sig = _signal_fix_sample_rate(\n _load_signal(name),\n rate\n )\n plt.figure()\n plt.psd(sig['hr']**2, Fs=rate)\n plt.savefig('q2_psd_%s.png' % name)\n\nif __name__ == '__main__':\n signal_autocorr_plot('Hr_pre')\n signal_autocorr_plot('Hr_med')\n signal_psd_plot('Hr_pre')\n signal_psd_plot('Hr_med')\n"
] | [
[
"matplotlib.pyplot.psd",
"matplotlib.pyplot.acorr",
"numpy.arange",
"scipy.io.loadmat",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"scipy.interpolate.interp1d",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
danmar3/twodlearn | [
"02b23bf07618d5288e338bd8f312cc38aa58c195"
] | [
"twodlearn/core/autoinit.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom . import variable\nfrom .options import global_options\n\n\nclass AutoinitType(object):\n ''' Base class to identify auto initializers '''\n pass\n\n\nclass AutoInit(object):\n ''' Indicates that the property should be auto initialized\n\n Example: ::\n\n TdlModel(prop=AutoInit()) # Runs auto initialization for prop\n\n If the property initializer accepts AutoType, the Type can be provided\n using a tuple: ::\n\n TdlModel(prop=(AutoInit(), AutoType))\n '''\n\n\nclass AutoTensor(AutoinitType):\n ''' auto initialize properties as tensorflow Tensors\n '''\n def __call__(self, value):\n if isinstance(value, tf.Tensor):\n return value\n else:\n return tf.convert_to_tensor(value)\n\n\nclass AutoConstant(AutoinitType):\n ''' auto initialize properties as tensorflow constants\n '''\n def __call__(self, *args, **kargs):\n return tf.constant(*args, **kargs)\n\n\nclass AutoVariable(AutoinitType):\n ''' auto initialize properties as variables\n\n If an initializer is provided, then shape must be specified: ::\n\n init = AutoVariable(initializer=tf.keras.initializer.glorot_uniform())\n var = init(shape=shape)\n\n Otherwise, calling AutoVariable expects an initial value or an initializer\n\n '''\n def __init__(self, initializer=None):\n self.initializer = initializer\n\n def __call__(self, *args, **kargs):\n if self.initializer is not None:\n if 'shape' not in kargs:\n raise TypeError('shape must be specified for an AutoVariable '\n 'that has an initializer.')\n if args:\n raise TypeError('arguments must be explicitly stated when '\n 'AutoVariable with an initializer.')\n shape = kargs['shape']\n kargs = {key: value for key, value in kargs.items()\n if key != 'shape'}\n return variable.variable(self.initializer(shape=shape),\n **kargs)\n else:\n return variable.variable(*args, **kargs)\n\n\nclass AutoConstantVariable(AutoinitType):\n ''' auto initialize properties as non-trainable vairables\n '''\n def __call__(self, *args, **kargs):\n return variable.variable(*args, trainable=False, **kargs)\n\n\nclass AutoTrainable(AutoinitType):\n ''' auto initialize properties as trainable vairables\n '''\n def __call__(self, *args, **kargs):\n return variable.variable(*args, trainable=True, **kargs)\n\n\nclass AutoPlaceholder(AutoinitType):\n def __call__(self, **kargs):\n ''' auto initialize properties as placeholders\n '''\n if 'dtype' not in kargs:\n kargs['dtype'] = global_options.float.tftype\n return tf.placeholder(**kargs)\n\n\nclass AutoZeros(AutoinitType):\n def __call__(self, **kargs):\n ''' auto initialize properties as placeholders\n '''\n if 'dtype' not in kargs:\n kargs['dtype'] = global_options.float.tftype\n return tf.zeros(**kargs)\n\n\nclass AutoNormalVar(AutoinitType):\n def __init__(self, mean, stddev):\n self.mean = mean\n self.stddev = stddev\n\n def __call__(self, shape, **kargs):\n ''' auto initialize properties as variables\n '''\n return variable.variable(\n tf.random_normal(shape=shape, mean=self.mean, stddev=self.stddev),\n **kargs)\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.constant",
"tensorflow.zeros",
"tensorflow.placeholder",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
kamodulin/TRAILMAP | [
"1700eca3db070b02132ac1d9db8b9a80323d02cb"
] | [
"utilities/utilities.py"
] | [
"import numpy as np\nimport cv2\nimport os\nfrom os import listdir, makedirs\nfrom os.path import join\nfrom PIL import Image\nimport shutil\nimport sys\n\ndef crop_numpy(dim1, dim2, dim3, vol):\n return vol[dim1:vol.shape[0] - dim1, dim2:vol.shape[1] - dim2, dim3:vol.shape[2] - dim3]\n\n\ndef write_tiff_stack(vol, fname):\n im = Image.fromarray(vol[0])\n ims = []\n\n for i in range(1, vol.shape[0]):\n ims.append(Image.fromarray(vol[i]))\n\n im.save(fname, save_all=True, append_images=ims)\n\n\ndef get_dir(path):\n tiffs = [join(path, f) for f in listdir(path) if f[0] != '.']\n return sorted(tiffs)\n\n\ndef crop_cube(x, y, z, vol, cube_length=64):\n # Cube shape\n return crop_box(x, y, z, vol, (cube_length, cube_length, cube_length))\n\n\ndef crop_box(x, y, z, vol, shape):\n return vol[z:z + shape[2], x:x + shape[0], y:y + shape[1]]\n\n\n\"\"\"\nRead images from start_index to end_index from a folder\n\n@param path: The path to the folder\n@param start_index: The index of the image to start reading from inclusive\n@param end_index: The end of the image to stop reading from exclusive\n\n@raise FileNotFoundError: If the path to the folder cannot be found \n\"\"\"\ndef read_folder_section(path, start_index, end_index):\n fnames = get_dir(path)\n vol = []\n\n for f in fnames[start_index: end_index]:\n img = cv2.imread(f, cv2.COLOR_BGR2GRAY)\n vol.append(img)\n\n vol = np.array(vol)\n\n return vol\n\n\ndef read_folder_stack(path):\n fnames = get_dir(path)\n\n fnames.sort()\n vol = cv2.imread(fnames[0], cv2.COLOR_BGR2GRAY)\n\n if len(vol.shape) == 3:\n return vol\n\n vol = []\n\n for f in fnames:\n img = cv2.imread(f, cv2.COLOR_BGR2GRAY)\n vol.append(img)\n\n vol = np.array(vol)\n\n return vol\n\ndef write_folder_stack(vol, path):\n\n if os.path.exists(path):\n print(\"Overwriting \" + path)\n shutil.rmtree(path)\n\n makedirs(path)\n\n for i in range(vol.shape[0]):\n\n fname = os.path.join(path, \"slice\" + str(i).zfill(5) + \".tiff\")\n cv2.imwrite(fname, vol[i])\n\n\ndef read_tiff_stack(path):\n img = Image.open(path)\n images = []\n for i in range(img.n_frames):\n img.seek(i)\n slice = np.array(img)\n images.append(slice)\n\n return np.array(images)\n\n\ndef coordinate_vol(coords, shape):\n vol = np.zeros(shape, dtype=\"uint16\")\n for c in coords:\n vol[c[0], c[1], c[2]] = 1\n return vol\n\n\ndef preprocess(vol):\n return vol / 65535\n\n\ndef preprocess_batch(batch):\n assert len(batch.shape) == 5\n lst = []\n\n for i in range(batch.shape[0]):\n lst.append(preprocess(batch[i]))\n\n return np.array(lst)\n\n\ndef dist(p1, p2):\n sqr = (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2 + (p1[2] - p2[2]) ** 2\n return sqr ** .5\n\n\n\n\"\"\"\nProgress bar to indicate status of the segment_brain function\n\"\"\"\n\ndef draw_progress_bar(percent, eta=\"\", bar_len = 40):\n # percent float from 0 to 1.\n sys.stdout.write(\"\\r\")\n sys.stdout.write(\"[{:<{}}] {:>3.0f}% {:20}\".format(\"=\" * int(bar_len * percent), bar_len, percent * 100, eta))\n sys.stdout.flush()\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pprachas/ABC_dataset | [
"61c915853c0229295e728f869b11b113ee59f098",
"61c915853c0229295e728f869b11b113ee59f098",
"61c915853c0229295e728f869b11b113ee59f098",
"61c915853c0229295e728f869b11b113ee59f098",
"61c915853c0229295e728f869b11b113ee59f098"
] | [
"Domain/subdataset1_domain/subdataset1_mesh.py",
"Domain/subdataset3_domain/subdataset3_img.py",
"ML/model_explain.py",
"ML/init_models.py",
"Domain/subdataset2_domain/subdataset2_mesh.py"
] | [
"import numpy as np\r\n\r\nimport pygmsh\r\nimport meshio\r\n\r\nimport sys\r\n#---------------------Beam Parameters----------------------------#\r\nL = 40 # length of beam\r\nw = 5 # wdith of beam\r\nr_max = w/10\r\nr_min = w/15\r\n#----------------------------------Import Files---------------------#\r\n# Change to directory of dowloaded txt files in folder subdataset1_geo\r\nf_x = 'subdataset1_geometry/x.npy'\r\nf_l = 'subdataset1_geometry/l.npy'\r\n\r\nx = np.load(f_x)\r\nl = np.load(f_l)\r\n\r\nfor ii in range(0,len(x)):\r\n#-----------------------------------pygmsh structure generation-----------------------------#\r\n geom = pygmsh.opencascade.Geometry(characteristic_length_min=r_min,characteristic_length_max=r_max)\r\n block = []\r\n for jj in range(0,len(x[ii])):\r\n block.append(geom.add_rectangle([x[ii][jj]-l[ii][jj],L-(L*(jj+1)/40),0],2*l[ii][jj],L/40))\r\n \r\n unit = geom.boolean_union(block)\r\n\r\n\r\n #----------------------------Add Boundaries----------------------------#\r\n bot = geom.add_rectangle([0.0,0.0,0.0],w,L/40)\r\n top = geom.add_rectangle([0.0,L-L/40,0.0],w,L/40)\r\n \r\n unit = geom.boolean_union([unit,top])\r\n unit = geom.boolean_union([unit,bot])\r\n \r\n #---------------------------Generate Mesh----------------------------------#\r\n mesh = pygmsh.generate_mesh(geom, prune_z_0 = True)\r\n \r\n \r\n fname_mesh = 'mesh/mesh'+str(len(x)*(num)+ii) + '.xml' #directory to save mesh\r\n \r\n print(fname_mesh)\r\n \r\n for cell in mesh.cells:\r\n if cell.type == \"triangle\":\r\n triangle_cells = cell.data\r\n \r\n meshio.write(fname_mesh,meshio.Mesh(points=mesh.points,cells={\"triangle\": triangle_cells}))\r\n",
"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom skimage.draw import polygon, circle\r\n\r\n#-------------------------column Parameters-------------------------#\r\nL = 800 # column length\r\nw = 100 # column width\r\n\r\n#------------------------Import Files-------------------------------#\r\nf_x = 'bulk/img/subdataset3_geo/x.txt'\r\nf_y = 'bulk/img/subdataset3_geo/y.txt'\r\nf_inner = 'bulk/img/subdataset3_geo/inner.txt'\r\nf_outer = 'bulk/img/subdataset3_geo/outer.txt'\r\n\r\nx = np.loadtxt(f_x)\r\ny = np.loadtxt(f_y)\r\ninner = np.loadtxt(f_inner)\r\nouter = np.loadtxt(f_outer)\r\n#-------------------------Generate Geometry---------------------------#\r\ndef img_gen(L,w,x,y,outer,inner,ii):\r\n \r\n x = x\r\n y = y\r\n r = outer\r\n outer = inner\r\n \r\n r_inner = r*inner # actual inner radii; the values in the txt file are scales W.R.T inner radii\r\n \r\n img = np.zeros((L,w), dtype = bool)\r\n print(x[ii].shape)\r\n for jj in range(0,x.shape[1]):\r\n rr,cc = circle(y[ii][jj],x[ii][jj],r[ii][jj], shape = img.shape)\r\n img[rr,cc] = 1\r\n rr,cc = circle(y[ii][jj],x[ii][jj],r_inner[ii][jj], shape = img.shape)\r\n img[rr,cc] = 0\r\n \r\n \r\n polytop = np.array([[0,0],[3*w/2,0],[3*w/2,L/20],[0,L/20]])\r\n\r\n rr, cc = polygon(polytop[:, 1], polytop[:, 0], img.shape)\r\n img[rr, cc] = 1\r\n\r\n polybot = np.array([[0,L],[3*w/2,L],[3*w/2,L-L/20],[0,L-L/20]])\r\n\r\n rr, cc = polygon(polybot[:, 1], polybot[:, 0], img.shape)\r\n img[rr, cc] = 1\r\n \r\n return img\r\n\r\nfor ii in range(0,3):\r\n img = img_gen(L,w,x,y,outer,inner,ii) #img ouput, save as array of images if want to convert to graph\r\n",
"import os.path as osp\r\nimport sys\r\n\r\nimport torch\r\n\r\nimport networkx as nx\r\nimport numpy as np\r\n\r\n\r\nfrom torch_geometric.utils import add_self_loops\r\n\r\n\r\nfrom torch.nn import Sequential,Linear,LeakyReLU,Softmax\r\nimport torch.nn.functional as F\r\nfrom torch_geometric.nn import MessagePassing,BatchNorm\r\nfrom torch_geometric.nn import global_max_pool,global_add_pool\r\n\r\nfrom torch_cluster import radius_graph\r\n\r\n#-----------------------Define Pointnet Layer---------------------------------#\r\nclass PointNetLayer(MessagePassing):\r\n def __init__(self, in_channels, out_channels): \r\n # Message passing with \"max\" aggregation.\r\n super(PointNetLayer, self).__init__('max')\r\n \r\n # Initialization of the MLP:\r\n # Here, the number of input features correspond to the hidden node\r\n # dimensionality plus point dimensionality (=3).\r\n self.mlp = Sequential(Linear(in_channels+2, out_channels),\r\n LeakyReLU(),\r\n Linear(out_channels, out_channels)\r\n )\r\n \r\n def forward(self, h, pos, edge_index):\r\n # Start propagating messages.\r\n return self.propagate(edge_index, h=h, pos=pos)\r\n \r\n def message(self,h_j, pos_j, pos_i):\r\n # h_j defines the features of neighboring nodes as shape [num_edges, in_channels]\r\n # pos_j defines the position of neighboring nodes as shape [num_edges, 3]\r\n # pos_i defines the position of central nodes as shape [num_edges, 3]\r\n\r\n input = pos_j - pos_i # Compute spatial relation.\r\n\r\n if h_j is not None:\r\n # In the first layer, we may not have any hidden node features,\r\n # so we only combine them in case they are present.\r\n input = torch.cat([h_j, input], dim=-1)\r\n\r\n return self.mlp(input) # Apply our final MLP.\r\n\r\nclass PointNet4Layers(torch.nn.Module):\r\n def __init__(self):\r\n super(PointNet4Layers, self).__init__()\r\n\r\n\r\n enc = 64\r\n self.conv1 = PointNetLayer(4, enc)\r\n self.bn1 = BatchNorm(enc)\r\n \r\n self.conv2 = PointNetLayer(enc, enc)\r\n self.bn2 = BatchNorm(enc)\r\n \r\n self.conv3 = PointNetLayer(enc, enc)\r\n self.bn3 = BatchNorm(enc)\r\n \r\n self.conv4 = PointNetLayer(enc, enc)\r\n self.bn4 = BatchNorm(enc)\r\n\r\n self.lin1 = Linear(enc*4,2)\r\n \r\n def forward(self, pos, x, batch, r, edge_index):\r\n \r\n edge_index = radius_graph(pos.float(),r=r,batch=batch,loop=True)\r\n \r\n # 3. Start bipartite message passing.\r\n h = self.conv1(h=x.float(), pos=pos.float(), edge_index=edge_index)\r\n h = self.bn1(h)\r\n h1 = F.leaky_relu(h)\r\n \r\n h = self.conv2(h=h1, pos=pos.float(), edge_index=edge_index)\r\n h = self.bn2(h)\r\n h2 = F.leaky_relu(h)\r\n \r\n h = self.conv3(h=h2, pos=pos.float(), edge_index=edge_index)\r\n h = self.bn3(h)\r\n h3 = F.leaky_relu(h)\r\n \r\n h = self.conv4(h=h3, pos=pos.float(), edge_index=edge_index)\r\n h = self.bn4(h)\r\n h4 = F.leaky_relu(h)\r\n\r\n # 4. Global Pooling.\r\n h = global_max_pool(torch.cat([h1,h2,h3,h4],dim=-1), batch)\r\n\r\n # 5. Classifier.\r\n h = self.lin1(h)\r\n\r\n return h\r\n",
"import torch \r\n\r\n#----------------Note-----------------------------------------------#\r\n#This code is used to initialize the Pointet weights like the manuscript\r\n\r\n#------------------Import Models---------------------------------------#\r\nfrom Pointnet_layer import *\r\n#------------------Weight Initialization-------------------------------#\r\nfor ii in range(1,11):\r\n torch.manual_seed(ii)\r\n \r\n model = PointNet4Layers()\r\n torch.save(model.state_dict(),'init_models/init_model_seed'+str(ii)+'.pt')",
"import numpy as np\r\n\r\nimport pygmsh\r\nimport meshio\r\n\r\nimport sys\r\nimport time\r\n#---------------------Beam and Geometric Parameters----------------------------#\r\nti = time.time()\r\nL = 800 # length of beam\r\nw = 100 # wdith of beam\r\nr = 0.25*w # radius of each circle\r\np = 5 # Ring thickness\r\n\r\nfor ii in range(0,25000):\r\n#---------------------------Import files-------------------------------------#\r\n # Change to directory of dowloaded txt files in folder subdataset2_geo\r\n f_x = 'subdataset2_geometry/x/x'+str(ii)+'.txt' # import x coordinates\r\n f_y = 'subdataset2_geometry/y/y'+str(ii)+'.txt' # import y coordinates\r\n\r\n x = np.loadtxt(f_x)\r\n y = np.loadtxt(f_y)\r\n \r\n print(x)\r\n print(len(x))\r\n#-----------------------------------pygmsh structure generation-----------------------------#\r\n geom = pygmsh.opencascade.Geometry(characteristic_length_min=(r-p)/5,characteristic_length_max=(r+p)/5)\r\n\r\n circle = geom.add_disk([x[0],L-y[0],0.0],r+p)\r\n hole = geom.add_disk([x[0],L-y[0],0.0],r-p)\r\n unit = geom.boolean_difference([circle],[hole])\r\n #-----------------------------------Add Rings---------------------------# \r\n for jj in range(1,len(x)): \r\n print(x[jj],y[jj])\r\n circle = geom.add_disk([x[jj],L-y[jj],0.0],r+p)\r\n hole = geom.add_disk([x[jj],L-y[jj],0.0],r-p)\r\n donut = geom.boolean_difference([circle],[hole])\r\n \r\n unit = geom.boolean_union([unit,donut])\r\n \r\n #----------------------------Add Boundaries----------------------------#\r\n bot = geom.add_rectangle([0.0,0.0,0.0],w,L/20)\r\n top = geom.add_rectangle([0.0,L-L/20,0.0],w,L/20)\r\n \r\n unit = geom.boolean_union([unit,top])\r\n unit = geom.boolean_union([unit,bot])\r\n \r\n \r\n \r\n #---------------------------Generate Mesh----------------------------------#\r\n mesh = pygmsh.generate_mesh(geom, prune_z_0 = True)\r\n\r\n #--------------------------------Save Mesh-------------------------------------# \r\n fname_mesh = 'mesh/mesh'+str(ii) + '.xml' # Directory to meshes\r\n for cell in mesh.cells:\r\n if cell.type == \"triangle\":\r\n triangle_cells = cell.data\r\n \r\n meshio.write(fname_mesh,meshio.Mesh(points=mesh.points,cells={\"triangle\": triangle_cells}))\r\n"
] | [
[
"numpy.load"
],
[
"numpy.array",
"numpy.zeros",
"numpy.loadtxt"
],
[
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.nn.functional.leaky_relu",
"torch.cat"
],
[
"torch.manual_seed"
],
[
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kimhyeji/Mem2Seq | [
"6e6b7eacb4ae2e26517980c45046b0c519c918b7",
"6e6b7eacb4ae2e26517980c45046b0c519c918b7"
] | [
"models/enc_PTRUNK.py",
"models/Mem2Seq_NMT.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\nfrom utils.masked_cross_entropy import *\nfrom utils.config import *\nimport random\nimport numpy as np\nimport datetime\nfrom utils.measures import wer,moses_multi_bleu\nfrom tqdm import tqdm\nfrom sklearn.metrics import f1_score\nimport math\nimport nsml\n\nclass PTRUNK(nn.Module):\n def __init__(self,hidden_size,max_len,max_r,lang,path,task,lr,n_layers, dropout):\n super(PTRUNK, self).__init__()\n self.name = \"PTRUNK\"\n self.task = task\n self.input_size = lang.n_words\n self.output_size = lang.n_words\n self.hidden_size = hidden_size\n self.max_len = max_len ## max input\n self.max_r = max_r ## max responce len \n self.lang = lang\n self.lr = lr\n self.decoder_learning_ratio = 5.0\n self.n_layers = n_layers\n self.dropout = dropout\n if path:\n if USE_CUDA:\n logging.info(\"MODEL {} LOADED\".format(str(path)))\n self.encoder = torch.load(str(path)+'/enc.th')\n self.decoder = torch.load(str(path)+'/dec.th')\n else:\n logging.info(\"MODEL {} LOADED\".format(str(path)))\n self.encoder = torch.load(str(path)+'/enc.th',lambda storage, loc: storage)\n self.decoder = torch.load(str(path)+'/dec.th',lambda storage, loc: storage)\n self.decoder.viz_arr =[] \n else:\n self.encoder = EncoderRNN(lang.n_words, hidden_size, n_layers,dropout)\n self.decoder = PtrDecoderRNN(hidden_size, lang.n_words, n_layers, dropout)\n # Initialize optimizers and criterion\n self.encoder_optimizer = optim.Adam(self.encoder.parameters(), lr=lr)\n self.decoder_optimizer = optim.Adam(self.decoder.parameters(), lr=lr* self.decoder_learning_ratio)\n self.criterion = nn.MSELoss()\n self.loss = 0\n self.loss_gate = 0\n self.loss_ptr = 0\n self.loss_vac = 0\n self.print_every = 1\n # Move models to GPU\n if USE_CUDA:\n self.encoder.cuda()\n self.decoder.cuda()\n\n def print_loss(self):\n print_loss_avg = self.loss / self.print_every\n print_loss_gate = self.loss_gate / self.print_every\n print_loss_ptr = self.loss_ptr / self.print_every\n print_loss_vac = self.loss_vac / self.print_every\n self.print_every += 1\n return 'L:{:.2f}, VL:{:.2f},GL:{:.2f}, PL:{:.2f}'.format(print_loss_avg,print_loss_vac,print_loss_gate,print_loss_ptr)\n \n def save_model(self,dec_type):\n name_data = \"KVR/\" if self.task=='' else \"BABI/\"\n if USEKB:\n directory = 'save/PTR_KB-'+name_data+str(self.task)+'HDD'+str(self.hidden_size)+'DR'+str(self.dropout)+'L'+str(self.n_layers)+'lr'+str(self.lr)+str(dec_type) \n else:\n directory = 'save/PTR_noKB-'+name_data+str(self.task)+'HDD'+str(self.hidden_size)+'DR'+str(self.dropout)+'L'+str(self.n_layers)+'lr'+str(self.lr)+str(dec_type) \n #directory = 'save/PTR_KVR_KB/'+str(self.task)+'HDD'+str(self.hidden_size)+'DR'+str(self.dropout)+'L'+str(self.n_layers)+'lr'+str(self.lr)+str(dec_type) #+datetime.datetime.now().strftime(\"%I%M%p%B%d%Y\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n torch.save(self.encoder, directory+'/enc.th')\n torch.save(self.decoder, directory+'/dec.th')\n \n def train_batch(self, input_batches, input_lengths, target_batches, \n target_lengths, target_index, target_gate, batch_size, clip,\n teacher_forcing_ratio, reset):\n if reset:\n self.loss = 0\n self.loss_gate = 0\n self.loss_ptr = 0\n self.loss_vac = 0\n self.print_every = 1 \n # Zero gradients of both optimizers\n self.encoder_optimizer.zero_grad()\n self.decoder_optimizer.zero_grad()\n loss_Vocab,loss_Ptr,loss_Gate = 0,0,0\n # Run words through encoder\n encoder_outputs, encoder_hidden = self.encoder(input_batches, input_lengths)\n \n # Prepare input and output variables\n decoder_input = Variable(torch.LongTensor([SOS_token] * batch_size))\n decoder_hidden = (encoder_hidden[0][:self.decoder.n_layers],encoder_hidden[1][:self.decoder.n_layers])\n \n max_target_length = max(target_lengths)\n all_decoder_outputs_vocab = Variable(torch.zeros(max_target_length, batch_size, self.output_size))\n all_decoder_outputs_ptr = Variable(torch.zeros(max_target_length, batch_size, encoder_outputs.size(0)))\n all_decoder_outputs_gate = Variable(torch.zeros(max_target_length, batch_size))\n # Move new Variables to CUDA\n if USE_CUDA:\n all_decoder_outputs_vocab = all_decoder_outputs_vocab.cuda()\n all_decoder_outputs_ptr = all_decoder_outputs_ptr.cuda()\n all_decoder_outputs_gate = all_decoder_outputs_gate.cuda()\n decoder_input = decoder_input.cuda()\n\n # Choose whether to use teacher forcing\n use_teacher_forcing = random.random() < teacher_forcing_ratio\n \n if use_teacher_forcing: \n # Run through decoder one time step at a time\n for t in range(max_target_length):\n decoder_ptr,decoder_vacab,gate,decoder_hidden = self.decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n\n all_decoder_outputs_vocab[t] = decoder_vacab\n all_decoder_outputs_ptr[t] = decoder_ptr.transpose(0,1)\n all_decoder_outputs_gate[t] = gate.squeeze(1)\n decoder_input = target_batches[t] # Next input is current target\n if USE_CUDA: decoder_input = decoder_input.cuda()\n \n else:\n for t in range(max_target_length):\n decoder_ptr,decoder_vacab,gate,decoder_hidden = self.decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n all_decoder_outputs_vocab[t] = decoder_vacab\n all_decoder_outputs_ptr[t] = decoder_ptr.transpose(0,1) #\n all_decoder_outputs_gate[t] = gate.squeeze(1) #\n topv, topvi = decoder_vacab.data.topk(1)\n topp, toppi = decoder_ptr.data.topk(1)\n ## get the correspective word in input\n top_ptr_i = torch.gather(input_batches,0,toppi.view(1, -1))\n next_in = [top_ptr_i.squeeze()[i].data[0] if(gate.squeeze()[i].data[0]>=0.5) else topvi.squeeze()[i] for i in range(batch_size)]\n decoder_input = Variable(torch.LongTensor(next_in)) # Chosen word is next input\n if USE_CUDA: decoder_input = decoder_input.cuda()\n \n #Loss calculation and backpropagation\n loss_Vocab = masked_cross_entropy(\n all_decoder_outputs_vocab.transpose(0, 1).contiguous(), # -> batch x seq\n target_batches.transpose(0, 1).contiguous(), # -> batch x seq\n target_lengths\n )\n loss_Ptr = masked_cross_entropy(\n all_decoder_outputs_ptr.transpose(0, 1).contiguous(), # -> batch x seq\n target_index.transpose(0, 1).contiguous(), # -> batch x seq\n target_lengths\n )\n loss_gate = self.criterion(all_decoder_outputs_gate,target_gate.float())\n\n\n loss = loss_Vocab + loss_Ptr + loss_gate\n loss.backward()\n \n # Clip gradient norms\n ec = torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), clip)\n dc = torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), clip)\n # Update parameters with optimizers\n self.encoder_optimizer.step()\n self.decoder_optimizer.step()\n self.loss += loss.item()\n self.loss_gate += loss_gate.item()\n self.loss_ptr += loss_Ptr.item()\n self.loss_vac += loss_Vocab.item()\n \n \n def evaluate_batch(self,batch_size,input_batches, input_lengths, target_batches, target_lengths, target_index,target_gate,src_plain): \n # Set to not-training mode to disable dropout\n self.encoder.train(False)\n self.decoder.train(False) \n # Run words through encoder\n encoder_outputs, encoder_hidden = self.encoder(input_batches, input_lengths, None)\n # Prepare input and output variables\n decoder_input = Variable(torch.LongTensor([SOS_token] * batch_size))\n decoder_hidden = (encoder_hidden[0][:self.decoder.n_layers],encoder_hidden[1][:self.decoder.n_layers])\n\n decoded_words = []\n all_decoder_outputs_vocab = Variable(torch.zeros(self.max_r, batch_size, self.decoder.output_size))\n all_decoder_outputs_ptr = Variable(torch.zeros(self.max_r, batch_size, encoder_outputs.size(0)))\n all_decoder_outputs_gate = Variable(torch.zeros(self.max_r, batch_size))\n # Move new Variables to CUDA\n\n if USE_CUDA:\n all_decoder_outputs_vocab = all_decoder_outputs_vocab.cuda()\n all_decoder_outputs_ptr = all_decoder_outputs_ptr.cuda()\n all_decoder_outputs_gate = all_decoder_outputs_gate.cuda()\n decoder_input = decoder_input.cuda()\n p = []\n for elm in src_plain:\n p.append(elm.split(' '))\n # Run through decoder one time step at a time\n for t in range(self.max_r):\n decoder_ptr,decoder_vacab,gate,decoder_hidden = self.decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n all_decoder_outputs_vocab[t] = decoder_vacab\n all_decoder_outputs_ptr[t] = decoder_ptr.transpose(0,1)\n all_decoder_outputs_gate[t] = gate.squeeze(1)\n\n topv, topvi = decoder_vacab.data.topk(1)\n topp, toppi = decoder_ptr.data.topk(1)\n top_ptr_i = torch.gather(input_batches,0,toppi.view(1, -1)) \n next_in = [top_ptr_i.squeeze()[i].item() if(gate.squeeze()[i].item()>=0.5) else topvi.squeeze()[i] for i in range(batch_size)]\n decoder_input = Variable(torch.LongTensor(next_in)) \n # Next input is chosen word\n if USE_CUDA: decoder_input = decoder_input.cuda()\n\n temp = []\n for i in range(batch_size):\n if(gate.squeeze()[i].item()>=0.5):\n if(toppi.squeeze()[i] >= len(p[i]) ):\n temp.append('<EOS>')\n else:\n temp.append(p[i][toppi.squeeze()[i]])\n else:\n ind = topvi.squeeze()[i]\n if ind == EOS_token:\n temp.append('<EOS>')\n else:\n temp.append(self.lang.index2word[ind.item()])\n decoded_words.append(temp)\n\n # Set back to training mode\n self.encoder.train(True)\n self.decoder.train(True) \n\n return decoded_words\n\n\n def evaluate(self,dev,avg_best,epoch = 0, BLEU=False):\n logging.info(\"STARTING EVALUATION\")\n acc_avg = 0.0\n wer_avg = 0.0\n acc_G = 0.0\n acc_P = 0.0\n acc_V = 0.0\n microF1_PRED,microF1_PRED_cal,microF1_PRED_nav,microF1_PRED_wet = [],[],[],[]\n microF1_TRUE,microF1_TRUE_cal,microF1_TRUE_nav,microF1_TRUE_wet = [],[],[],[]\n ref = []\n hyp = []\n pred = []\n if nsml.IS_ON_NSML:\n pbar = enumerate(dev)\n else:\n pbar = tqdm(enumerate(dev),total=len(dev))\n for j, data_dev in pbar: \n words = self.evaluate_batch(len(data_dev[1]),data_dev[0],data_dev[1],data_dev[2],data_dev[3],data_dev[4],data_dev[5],data_dev[6]) \n acc=0\n w = 0\n temp_gen = []\n for i, row in enumerate(np.transpose(words)):\n st = ''\n for e in row:\n if e== '<EOS>':\n break\n else:\n st+= e + ' '\n temp_gen.append(st)\n correct = data_dev[7][i]\n ### compute F1 SCORE \n if(len(data_dev)>10):\n f1_true,f1_pred = computeF1(data_dev[8][i],st.lstrip().rstrip(),correct.lstrip().rstrip())\n microF1_TRUE += f1_true\n microF1_PRED += f1_pred\n\n f1_true,f1_pred = computeF1(data_dev[9][i],st.lstrip().rstrip(),correct.lstrip().rstrip())\n microF1_TRUE_cal += f1_true\n microF1_PRED_cal += f1_pred \n\n f1_true,f1_pred = computeF1(data_dev[10][i],st.lstrip().rstrip(),correct.lstrip().rstrip())\n microF1_TRUE_nav += f1_true\n microF1_PRED_nav += f1_pred \n\n f1_true,f1_pred = computeF1(data_dev[11][i],st.lstrip().rstrip(),correct.lstrip().rstrip()) \n microF1_TRUE_wet += f1_true\n microF1_PRED_wet += f1_pred \n \n if (correct.lstrip().rstrip() == st.lstrip().rstrip()):\n acc+=1\n pred.append(\"O\")\n else:\n pred.append(\"X\")\n\n w += wer(correct.lstrip().rstrip(),st.lstrip().rstrip())\n ref.append(str(correct.lstrip().rstrip()))\n hyp.append(str(st.lstrip().rstrip()))\n\n acc_avg += acc/float(len(data_dev[1]))\n wer_avg += w/float(len(data_dev[1]))\n if not nsml.IS_ON_NSML:\n pbar.set_description(\"R:{:.4f},W:{:.4f}\".format(acc_avg/float(len(dev)),wer_avg/float(len(dev))))\n if(len(data_dev)>10):\n logging.info(\"F1 SCORE:\\t\"+str(f1_score(microF1_TRUE, microF1_PRED, average='micro')))\n logging.info(\"F1 CAL:\\t\"+str(f1_score(microF1_TRUE_cal, microF1_PRED_cal, average='micro')))\n logging.info(\"F1 WET:\\t\"+str(f1_score(microF1_TRUE_wet, microF1_PRED_wet, average='micro')))\n logging.info(\"F1 NAV:\\t\"+str(f1_score(microF1_TRUE_nav, microF1_PRED_nav, average='micro')))\n\n if (BLEU): \n bleu_score = moses_multi_bleu(np.array(hyp), np.array(ref), lowercase=True) \n logging.info(\"BLEU SCORE:\"+str(bleu_score)) \n \n if (bleu_score >= avg_best):\n self.save_model(str(self.name)+str(bleu_score))\n logging.info(\"MODEL SAVED\")\n return bleu_score\n else: \n acc_avg = acc_avg/float(len(dev))\n logging.info(\"ACC : {}\".format(str(acc_avg)))\n if (acc_avg >= avg_best):\n if nsml.IS_ON_NSML:\n pass\n else:\n self.save_model(str(self.name) + str(acc_avg))\n logging.info(\"MODEL SAVED\")\n '''\n if acc_avg > 0.33:\n for i in range(len(pred)):\n print(\"{} {} {}\".format(pred[i], ref[i], hyp[i]))\n '''\n return acc_avg\n\ndef computeF1(entity,st,correct):\n y_pred = [0 for z in range(len(entity))]\n y_true = [1 for z in range(len(entity))]\n for k in st.lstrip().rstrip().split(' '):\n if (k in entity):\n y_pred[entity.index(k)] = 1\n return y_true,y_pred\n\nclass EncoderRNN(nn.Module):\n def __init__(self, input_size, hidden_size, n_layers=1, dropout=0.1):\n super(EncoderRNN, self).__init__() \n self.input_size = input_size\n self.hidden_size = hidden_size\n self.n_layers = n_layers\n self.dropout = dropout \n self.embedding = nn.Embedding(input_size, hidden_size)\n self.embedding_dropout = nn.Dropout(dropout) \n self.cell = nn.LSTM(hidden_size, hidden_size, n_layers, dropout=self.dropout)\n if USE_CUDA:\n self.cell = self.cell.cuda()\n self.embedding_dropout = self.embedding_dropout.cuda()\n self.embedding = self.embedding.cuda() \n\n def get_state(self, input):\n \"\"\"Get cell states and hidden states.\"\"\"\n batch_size = input.size(1)\n c0_encoder = Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size)) \n h0_encoder = Variable(torch.zeros(self.n_layers, batch_size, self.hidden_size)) ### * self.num_directions = 2 if bi\n if USE_CUDA:\n h0_encoder = h0_encoder.cuda()\n c0_encoder = c0_encoder.cuda() \n return (h0_encoder, c0_encoder)\n\n def forward(self, input_seqs, input_lengths, hidden=None):\n # Note: we run this all at once (over multiple batches of multiple sequences)\n embedded = self.embedding(input_seqs) # S * B * H\n embedded = self.embedding_dropout(embedded)\n hidden = self.get_state(input_seqs) # N * B * H\n if input_lengths:\n embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=False)\n \n outputs, hidden = self.cell(embedded, hidden)\n if input_lengths:\n outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=False) # Max_S * B * H\n \n return outputs, hidden\n\nclass PtrDecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size, n_layers=1, dropout=0.1):\n super(PtrDecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n self.output_size = output_size ### Vocab size\n self.n_layers = n_layers\n self.dropout = dropout\n self.embedding = nn.Embedding(output_size, hidden_size)\n self.embedding_dropout = nn.Dropout(dropout)\n self.cell = nn.LSTM(2*hidden_size, hidden_size, n_layers, dropout=dropout)\n self.W1 = nn.Linear(2*hidden_size, hidden_size)\n v = torch.rand(hidden_size)\n stdv = 1. / math.sqrt(v.size(0))\n v = v.data.normal_(mean=0, std=stdv)\n self.concat = nn.Linear(hidden_size * 2, hidden_size) \n self.U = nn.Linear(hidden_size, output_size)\n self.W = nn.Linear(hidden_size, 1)\n\n if USE_CUDA:\n self.embedding = self.embedding.cuda()\n self.embedding_dropout = self.embedding_dropout.cuda()\n self.cell = self.cell.cuda()\n self.W1 = self.W1.cuda() \n v = v.cuda()\n self.U = self.U.cuda() \n self.W = self.W.cuda()\n\n self.v = nn.Parameter(v)\n def forward(self, input_seq, last_hidden, encoder_outputs):\n # Note: we run this one step at a time \n # Get the embedding of the current input word (last output word)\n max_len = encoder_outputs.size(0) # MaxS\n batch_size = input_seq.size(0)\n input_seq = input_seq # B (word)\n encoder_outputs = encoder_outputs.transpose(0,1)\n \n word_embedded = self.embedding(input_seq) # S=1 x B x H\n word_embedded = self.embedding_dropout(word_embedded)\n\n ## ATTENTION CALCULATION \n s_t = last_hidden[0][-1].unsqueeze(0)\n H = s_t.repeat(max_len,1,1).transpose(0,1) # B * MaxS * H\n\n energy = F.tanh(self.W1(torch.cat([H,encoder_outputs], 2))) # B * MaxS * H\n energy = energy.transpose(2,1) # B * H * MaxS\n\n # NORMALIZATION\n v = self.v.repeat(encoder_outputs.data.shape[0],1).unsqueeze(1) #[B*1*H]\n p_ptr = torch.bmm(v,energy) # [B*1*MaxS]\n \n a = F.softmax(p_ptr)\n context = a.bmm(encoder_outputs) # B * 1 * H (encoder_output : B * MaxS * H)\n\n # Combine embedded input word and attended context, run through RNN\n if word_embedded.size() != context.squeeze().size():\n print(word_embedded.size(), context.squeeze().size())\n rnn_input = torch.cat((word_embedded, context.squeeze()), 1).unsqueeze(0)\n output, hidden = self.cell(rnn_input, last_hidden)\n \n p_vacab = self.U(output) # 1 * B * output_size\n \n gate = F.sigmoid(self.W(hidden[0][-1])) # B * 1\n\n return p_ptr,p_vacab,gate,hidden\n",
"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.optim import lr_scheduler\nfrom torch import optim\nimport torch.nn.functional as F\nfrom utils.masked_cross_entropy import *\nfrom utils.config import *\nimport random\nimport numpy as np\nimport datetime\nfrom utils.measures import wer, moses_multi_bleu\nimport matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport nltk\nimport os\nimport logging\nfrom sklearn.metrics import f1_score\n\nclass Mem2Seq(nn.Module):\n def __init__(self, hidden_size, max_len, max_r, lang, path, lr, n_layers, dropout):\n super(Mem2Seq, self).__init__()\n self.name = \"Mem2Seq\"\n self.input_size = lang.n_words\n self.output_size = lang.n_words\n self.hidden_size = hidden_size\n self.max_len = max_len ## max input\n self.max_r = max_r ## max responce len \n self.lang = lang\n self.lr = lr\n self.n_layers = n_layers\n self.dropout = dropout\n \n if path:\n if USE_CUDA:\n logging.info(\"MODEL {} LOADED\".format(str(path)))\n self.encoder = torch.load(str(path)+'/enc.th')\n self.decoder = torch.load(str(path)+'/dec.th')\n else:\n logging.info(\"MODEL {} LOADED\".format(str(path)))\n self.encoder = torch.load(str(path)+'/enc.th',lambda storage, loc: storage)\n self.decoder = torch.load(str(path)+'/dec.th',lambda storage, loc: storage)\n else:\n self.encoder = EncoderMemNN(lang.n_words, hidden_size, n_layers, self.dropout)\n self.decoder = DecoderrMemNN(lang.n_words, hidden_size, n_layers, self.dropout)\n # Initialize optimizers and criterion\n self.encoder_optimizer = optim.Adam(self.encoder.parameters(), lr=lr)\n self.decoder_optimizer = optim.Adam(self.decoder.parameters(), lr=lr)\n self.scheduler = lr_scheduler.ReduceLROnPlateau(self.decoder_optimizer,mode='max',factor=0.5,patience=1,min_lr=0.0001, verbose=True)\n self.criterion = nn.MSELoss()\n self.loss = 0\n self.loss_gate = 0\n self.loss_ptr = 0\n self.loss_vac = 0\n self.print_every = 1\n self.batch_size = 0\n # Move models to GPU\n if USE_CUDA:\n self.encoder.cuda()\n self.decoder.cuda()\n\n def print_loss(self): \n print_loss_avg = self.loss / self.print_every\n print_loss_ptr = self.loss_ptr / self.print_every\n print_loss_vac = self.loss_vac / self.print_every\n self.print_every += 1 \n return 'L:{:.2f}, VL:{:.2f}, PL:{:.2f}'.format(print_loss_avg,print_loss_vac,print_loss_ptr)\n \n def save_model(self, dec_type):\n directory = 'save/mem2seq_'+'HDD'+str(self.hidden_size)+'BSZ'+str(self.batch_size)+'DR'+str(self.dropout)+'L'+str(self.n_layers)+'lr'+str(self.lr)+str(dec_type) \n if not os.path.exists(directory):\n os.makedirs(directory)\n torch.save(self.encoder, directory+'/enc.th')\n torch.save(self.decoder, directory+'/dec.th')\n \n def train_batch(self, input_batches, input_lengths, target_batches, \n target_lengths, target_index, batch_size, clip,\n teacher_forcing_ratio,reset): \n if reset:\n self.loss = 0\n self.loss_gate = 0\n self.loss_ptr = 0\n self.loss_vac = 0\n self.print_every = 1\n \n self.batch_size = batch_size\n # Zero gradients of both optimizers\n self.encoder_optimizer.zero_grad()\n self.decoder_optimizer.zero_grad()\n loss_Vocab,loss_Ptr= 0,0\n\n # Run words through encoder\n decoder_hidden = self.encoder(input_batches.transpose(0,1)).unsqueeze(0)\n\n # load memories with input\n self.decoder.load_memory(input_batches.transpose(0,1))\n\n # Prepare input and output variables\n decoder_input = Variable(torch.LongTensor([SOS_token] * batch_size))\n \n max_target_length = max(target_lengths)\n all_decoder_outputs_vocab = Variable(torch.zeros(max_target_length, batch_size, self.output_size))\n all_decoder_outputs_ptr = Variable(torch.zeros(max_target_length, batch_size, input_batches.size(0)))\n\n # Move new Variables to CUDA\n if USE_CUDA:\n all_decoder_outputs_vocab = all_decoder_outputs_vocab.cuda()\n all_decoder_outputs_ptr = all_decoder_outputs_ptr.cuda()\n decoder_input = decoder_input.cuda()\n\n # Choose whether to use teacher forcing\n use_teacher_forcing = random.random() < teacher_forcing_ratio\n \n if use_teacher_forcing: \n # Run through decoder one time step at a time\n for t in range(max_target_length):\n decoder_ptr, decoder_vacab, decoder_hidden = self.decoder.ptrMemDecoder(decoder_input, decoder_hidden)\n all_decoder_outputs_vocab[t] = decoder_vacab\n all_decoder_outputs_ptr[t] = decoder_ptr\n decoder_input = target_batches[t]# Chosen word is next input\n if USE_CUDA: decoder_input = decoder_input.cuda() \n else:\n for t in range(max_target_length):\n decoder_ptr, decoder_vacab, decoder_hidden = self.decoder.ptrMemDecoder(decoder_input, decoder_hidden)\n _, toppi = decoder_ptr.data.topk(1)\n _, topvi = decoder_vacab.data.topk(1)\n all_decoder_outputs_vocab[t] = decoder_vacab\n all_decoder_outputs_ptr[t] = decoder_ptr\n ## get the correspective word in input\n top_ptr_i = torch.gather(input_batches,0,Variable(toppi.view(1, -1)))\n next_in = [top_ptr_i.squeeze()[i].data[0] if(toppi.squeeze()[i] < input_lengths[i]-1) else topvi.squeeze()[i] for i in range(batch_size)]\n decoder_input = Variable(torch.LongTensor(next_in)) # Chosen word is next input\n if USE_CUDA: decoder_input = decoder_input.cuda()\n \n #Loss calculation and backpropagation\n loss_Vocab = masked_cross_entropy(\n all_decoder_outputs_vocab.transpose(0, 1).contiguous(), # -> batch x seq\n target_batches.transpose(0, 1).contiguous(), # -> batch x seq\n target_lengths\n )\n loss_Ptr = masked_cross_entropy(\n all_decoder_outputs_ptr.transpose(0, 1).contiguous(), # -> batch x seq\n target_index.transpose(0, 1).contiguous(), # -> batch x seq\n target_lengths\n )\n\n loss = loss_Vocab + loss_Ptr\n loss.backward()\n \n # Clip gradient norms\n ec = torch.nn.utils.clip_grad_norm(self.encoder.parameters(), clip)\n dc = torch.nn.utils.clip_grad_norm(self.decoder.parameters(), clip)\n # Update parameters with optimizers\n self.encoder_optimizer.step()\n self.decoder_optimizer.step()\n self.loss += loss.data[0]\n #self.loss_gate += loss_gate.data[0] \n self.loss_ptr += loss_Ptr.data[0]\n self.loss_vac += loss_Vocab.data[0]\n \n def evaluate_batch(self,batch_size,input_batches, input_lengths, target_batches, target_lengths, target_index,src_plain): \n # Set to not-training mode to disable dropout\n self.encoder.train(False)\n self.decoder.train(False) \n # Run words through encoder\n decoder_hidden = self.encoder(input_batches.transpose(0,1)).unsqueeze(0)\n self.decoder.load_memory(input_batches.transpose(0,1))\n\n # Prepare input and output variables\n decoder_input = Variable(torch.LongTensor([SOS_token] * batch_size))\n\n decoded_words = []\n all_decoder_outputs_vocab = Variable(torch.zeros(self.max_r, batch_size, self.output_size))\n all_decoder_outputs_ptr = Variable(torch.zeros(self.max_r, batch_size, input_batches.size(0)))\n # Move new Variables to CUDA\n if USE_CUDA:\n all_decoder_outputs_vocab = all_decoder_outputs_vocab.cuda()\n all_decoder_outputs_ptr = all_decoder_outputs_ptr.cuda()\n decoder_input = decoder_input.cuda()\n \n # Run through decoder one time step at a time\n for t in range(self.max_r):\n decoder_ptr,decoder_vacab, decoder_hidden = self.decoder.ptrMemDecoder(decoder_input, decoder_hidden)\n all_decoder_outputs_vocab[t] = decoder_vacab\n _, topvi = decoder_vacab.data.topk(1)\n all_decoder_outputs_ptr[t] = decoder_ptr\n _, toppi = decoder_ptr.data.topk(1)\n top_ptr_i = torch.gather(input_batches,0,Variable(toppi.view(1, -1))) \n next_in = [top_ptr_i.squeeze()[i].data[0] if(toppi.squeeze()[i] < input_lengths[i]-1) else topvi.squeeze()[i] for i in range(batch_size)]\n\n decoder_input = Variable(torch.LongTensor(next_in)) # Chosen word is next input\n if USE_CUDA: decoder_input = decoder_input.cuda()\n\n temp = []\n for i in range(batch_size):\n if(toppi.squeeze()[i] < len(src_plain[i])-1 ):\n temp.append(src_plain[i][toppi.squeeze()[i]]) ## copy from the input\n else:\n ind = topvi.squeeze()[i]\n if ind == EOS_token:\n temp.append('<EOS>')\n else:\n temp.append(self.lang.index2word[ind]) ## get from vocabulary\n decoded_words.append(temp)\n\n # Set back to training mode\n self.encoder.train(True)\n self.decoder.train(True)\n return decoded_words\n\n\n def evaluate(self,dev,avg_best,epoch=0,BLEU=False):\n logging.info(\"STARTING EVALUATION\")\n acc_avg = 0.0\n wer_avg = 0.0\n ref = []\n hyp = []\n pbar = tqdm(enumerate(dev),total=len(dev))\n for j, data_dev in pbar: \n words = self.evaluate_batch(\n batch_size=len(data_dev[1]),\n input_batches=data_dev[0], \n input_lengths=data_dev[1], \n target_batches=data_dev[2], \n target_lengths=data_dev[3], \n target_index=data_dev[4],\n src_plain=data_dev[5])\n acc=0\n w = 0 \n temp_gen = []\n for i, row in enumerate(np.transpose(words)):\n st = ''\n for e in row:\n if e== '<EOS>': break\n else: st+= e + ' '\n temp_gen.append(st)\n correct = \" \".join(data_dev[6][i])\n ### IMPORTANT \n ### WE NEED TO COMPARE THE PLAIN STRING, BECAUSE WE COPY THE WORDS FROM THE INPUT \n ### ====>> the index in the output gold can be UNK \n if (correct.lstrip().rstrip() == st.lstrip().rstrip()):\n acc+=1\n w += wer(correct.lstrip().rstrip(),st.lstrip().rstrip())\n ref.append(str(correct.lstrip().rstrip()))\n hyp.append(str(st.lstrip().rstrip()))\n\n acc_avg += acc/float(len(data_dev[1]))\n wer_avg += w/float(len(data_dev[1])) \n pbar.set_description(\"R:{:.4f},W:{:.4f}\".format(acc_avg/float(len(dev)),wer_avg/float(len(dev))))\n\n bleu_score = moses_multi_bleu(np.array(hyp), np.array(ref), lowercase=True) \n logging.info(\"BLEU SCORE:\"+str(bleu_score)) \n \n if (bleu_score >= avg_best):\n self.save_model(str(self.name)+str(bleu_score))\n logging.info(\"MODEL SAVED\") \n return bleu_score\n\nclass EncoderMemNN(nn.Module):\n def __init__(self, vocab, embedding_dim, hop, dropout):\n super(EncoderMemNN, self).__init__()\n self.num_vocab = vocab\n self.max_hops = hop\n self.embedding_dim = embedding_dim\n self.dropout = dropout\n for hop in range(self.max_hops+1):\n C = nn.Embedding(self.num_vocab, embedding_dim, padding_idx=PAD_token)\n C.weight.data.normal_(0, 0.1)\n self.add_module(\"C_{}\".format(hop), C)\n self.C = AttrProxy(self, \"C_\")\n self.softmax = nn.Softmax(dim=1)\n \n def get_state(self,bsz):\n \"\"\"Get cell states and hidden states.\"\"\"\n if USE_CUDA:\n return Variable(torch.zeros(bsz, self.embedding_dim)).cuda()\n else:\n return Variable(torch.zeros(bsz, self.embedding_dim))\n\n\n def forward(self, story):\n u = [self.get_state(story.size(0))]\n for hop in range(self.max_hops):\n embed_A = self.C[hop](story.contiguous().view(story.size(0), -1).long()) # b * (m * s) * e\n u_temp = u[-1].unsqueeze(1).expand_as(embed_A)\n prob = self.softmax(torch.sum(embed_A*u_temp, 2)) \n embed_C = self.C[hop+1](story.contiguous().view(story.size(0), -1).long())\n prob = prob.unsqueeze(2).expand_as(embed_C)\n o_k = torch.sum(embed_C*prob, 1)\n u_k = u[-1] + o_k\n u.append(u_k) \n return u_k\n\nclass DecoderrMemNN(nn.Module):\n def __init__(self, vocab, embedding_dim, hop, dropout):\n super(DecoderrMemNN, self).__init__()\n self.num_vocab = vocab\n self.max_hops = hop\n self.embedding_dim = embedding_dim\n self.dropout = dropout\n for hop in range(self.max_hops+1):\n C = nn.Embedding(self.num_vocab, embedding_dim, padding_idx=PAD_token)\n C.weight.data.normal_(0, 0.1)\n self.add_module(\"C_{}\".format(hop), C)\n self.C = AttrProxy(self, \"C_\")\n self.softmax = nn.Softmax(dim=1)\n self.W = nn.Linear(embedding_dim,1)\n self.W1 = nn.Linear(2*embedding_dim,self.num_vocab)\n self.gru = nn.GRU(embedding_dim, embedding_dim, dropout=dropout)\n\n def load_memory(self, story):\n self.m_story = []\n for hop in range(self.max_hops):\n embed_A = self.C[hop](story.contiguous().view(story.size(0), -1))#.long()) # b * (m * s) * e\n m_A = embed_A \n embed_C = self.C[hop+1](story.contiguous().view(story.size(0), -1).long())\n m_C = embed_C\n self.m_story.append(m_A)\n self.m_story.append(m_C)\n\n def ptrMemDecoder(self, enc_query, last_hidden):\n embed_q = self.C[0](enc_query) # b * e\n _, hidden = self.gru(embed_q.unsqueeze(0), last_hidden)\n temp = []\n u = [hidden[0].squeeze()] \n for hop in range(self.max_hops):\n m_A = self.m_story[hop]\n u_temp = u[-1].unsqueeze(1).expand_as(m_A)\n prob_lg = torch.sum(m_A*u_temp, 2)\n prob_ = self.softmax(prob_lg)\n m_C = self.m_story[hop+1]\n temp.append(prob_)\n prob = prob_.unsqueeze(2).expand_as(m_C)\n o_k = torch.sum(m_C*prob, 1)\n if (hop==0):\n p_vocab = self.W1(torch.cat((u[0], o_k),1))\n u_k = u[-1] + o_k\n u.append(u_k)\n p_ptr = prob_lg \n return p_ptr, p_vocab, hidden\n\n\nclass AttrProxy(object):\n \"\"\"\n Translates index lookups into attribute lookups.\n To implement some trick which able to use list of nn.Module in a nn.Module\n see https://discuss.pytorch.org/t/list-of-nn-module-in-a-nn-module/219/2\n \"\"\"\n def __init__(self, module, prefix):\n self.module = module\n self.prefix = prefix\n\n def __getitem__(self, i):\n return getattr(self.module, self.prefix + str(i))\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.Parameter",
"torch.nn.functional.softmax",
"torch.LongTensor",
"torch.nn.LSTM",
"torch.zeros",
"torch.cat",
"torch.nn.Embedding",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Linear",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.rand",
"torch.bmm",
"numpy.transpose",
"sklearn.metrics.f1_score",
"numpy.array",
"torch.nn.MSELoss",
"torch.save"
],
[
"torch.nn.Softmax",
"torch.LongTensor",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.zeros",
"torch.cat",
"torch.nn.GRU",
"torch.sum",
"torch.nn.Embedding",
"torch.nn.Linear",
"numpy.transpose",
"numpy.array",
"torch.nn.MSELoss",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
m0r13/pytorch-ssd | [
"c92dc5228f8e12df907f0e6f06646e9a2ec94a73"
] | [
"vision/ssd/predictor.py"
] | [
"import torch\n\nfrom ..utils import box_utils\nfrom .data_preprocessing import PredictionTransform\nfrom ..utils.misc import Timer\n\n\nclass Predictor:\n def __init__(self, net, size, mean=0.0, std=1.0, nms_method=None,\n iou_threshold=0.45, filter_threshold=0.01, candidate_size=200, sigma=0.5, device=None):\n self.net = net\n self.transform = PredictionTransform(size, mean, std)\n self.iou_threshold = iou_threshold\n self.filter_threshold = filter_threshold\n self.candidate_size = candidate_size\n self.nms_method = nms_method\n\n self.sigma = sigma\n if device:\n self.device = device\n else:\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n self.net.to(self.device)\n self.net.eval()\n\n self.timer = Timer()\n\n def predict(self, image, top_k=-1, prob_threshold=None):\n cpu_device = torch.device(\"cpu\")\n height, width, _ = image.shape\n image = self.transform(image)\n images = image.unsqueeze(0)\n images = images.to(self.device)\n with torch.no_grad():\n self.timer.start()\n scores, boxes = self.net.forward(images)\n #print(\"Inference time: \", self.timer.end())\n boxes = boxes[0]\n scores = scores[0]\n if not prob_threshold:\n prob_threshold = self.filter_threshold\n # this version of nms is slower on GPU, so we move data to CPU.\n boxes = boxes.to(cpu_device)\n scores = scores.to(cpu_device)\n picked_box_probs = []\n picked_labels = []\n for class_index in range(1, scores.size(1)):\n probs = scores[:, class_index]\n mask = probs > prob_threshold\n probs = probs[mask]\n if probs.size(0) == 0:\n continue\n subset_boxes = boxes[mask, :]\n box_probs = torch.cat([subset_boxes, probs.reshape(-1, 1)], dim=1)\n box_probs = box_utils.nms(box_probs, self.nms_method,\n score_threshold=prob_threshold,\n iou_threshold=self.iou_threshold,\n sigma=self.sigma,\n top_k=top_k,\n candidate_size=self.candidate_size)\n picked_box_probs.append(box_probs)\n picked_labels.extend([class_index] * box_probs.size(0))\n if not picked_box_probs:\n return torch.tensor([]), torch.tensor([]), torch.tensor([])\n picked_box_probs = torch.cat(picked_box_probs)\n picked_box_probs[:, 0] *= width\n picked_box_probs[:, 1] *= height\n picked_box_probs[:, 2] *= width\n picked_box_probs[:, 3] *= height\n return picked_box_probs[:, :4], torch.tensor(picked_labels), picked_box_probs[:, 4]"
] | [
[
"torch.cat",
"torch.tensor",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TorchSpatiotemporal/tsl | [
"da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0",
"da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0"
] | [
"tsl/nn/base/dense.py",
"tsl/nn/models/stgn/dcrnn_model.py"
] | [
"from torch import nn\n\nfrom tsl.nn.utils import utils\n\n\nclass Dense(nn.Module):\n r\"\"\"\n A simple fully-connected layer.\n\n Args:\n input_size (int): Size of the input.\n output_size (int): Size of the output.\n activation (str, optional): Activation function.\n dropout (float, optional): Dropout rate.\n bias (bool, optional): Whether to use a bias.\n \"\"\"\n def __init__(self, input_size, output_size, activation='linear', dropout=0., bias=True):\n super(Dense, self).__init__()\n self.layer = nn.Sequential(\n nn.Linear(input_size, output_size, bias=bias),\n utils.get_layer_activation(activation)(),\n nn.Dropout(dropout) if dropout > 0. else nn.Identity()\n )\n\n def forward(self, x):\n return self.layer(x)\n",
"from tsl.nn.blocks.encoders.dcrnn import DCRNN\nfrom tsl.utils.parser_utils import ArgParser\n\nfrom einops import rearrange\nfrom torch import nn\n\nfrom tsl.nn.blocks.encoders import ConditionalBlock\nfrom tsl.nn.blocks.decoders.mlp_decoder import MLPDecoder\n\n\nclass DCRNNModel(nn.Module):\n r\"\"\"\n Diffusion ConvolutionalRecurrent Neural Network with a nonlinear readout.\n\n From Li et al., \"Diffusion Convolutional Recurrent Neural Network: Data-Driven Traffic Forecasting\", ICLR 2018.\n\n Args:\n input_size (int): Size of the input.\n hidden_size (int): Number of units in the DCRNN hidden layer.\n ff_size (int): Number of units in the nonlinear readout.\n output_size (int): Number of output channels.\n n_layers (int): Number DCRNN cells.\n exog_size (int): Number of channels in the exogenous variable.\n horizon (int): Number of steps to forecast.\n activation (str, optional): Activation function in the readout.\n dropout (float, optional): Dropout probability.\n \"\"\"\n\n def __init__(self,\n input_size,\n hidden_size,\n ff_size,\n output_size,\n n_layers,\n exog_size,\n horizon,\n activation='relu',\n dropout=0.,\n kernel_size=2):\n super(DCRNNModel, self).__init__()\n if exog_size:\n self.input_encoder = ConditionalBlock(input_size=input_size,\n exog_size=exog_size,\n output_size=hidden_size,\n activation=activation)\n else:\n self.input_encoder = nn.Linear(input_size, hidden_size)\n\n self.dcrnn = DCRNN(input_size=hidden_size,\n hidden_size=hidden_size,\n n_layers=n_layers,\n k=kernel_size)\n\n self.readout = MLPDecoder(input_size=hidden_size,\n hidden_size=ff_size,\n output_size=output_size,\n horizon=horizon,\n activation=activation,\n dropout=dropout)\n\n def forward(self, x, edge_index, edge_weight=None, u=None, **kwargs):\n if u is not None:\n if u.dim() == 3:\n u = rearrange(u, 'b s c -> b s 1 c')\n x = self.input_encoder(x, u)\n else:\n x = self.input_encoder(x)\n\n h, _ = self.dcrnn(x, edge_index, edge_weight)\n return self.readout(h)\n\n @staticmethod\n def add_model_specific_args(parser: ArgParser):\n parser.opt_list('--hidden-size', type=int, default=32, tunable=True, options=[16, 32, 64, 128])\n parser.opt_list('--ff-size', type=int, default=256, tunable=True, options=[64, 128, 256, 512])\n parser.opt_list('--n-layers', type=int, default=1, tunable=True, options=[1, 2])\n parser.opt_list('--dropout', type=float, default=0., tunable=True, options=[0., 0.1, 0.25, 0.5])\n parser.opt_list('--kernel-size', type=int, default=2, tunable=True, options=[1, 2])\n return parser\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.Identity"
],
[
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sjk0709/Cartpole-DQN-pytorch041 | [
"439e5be4bd7b44dd923c46f24e62b46b7dadfba4",
"439e5be4bd7b44dd923c46f24e62b46b7dadfba4"
] | [
"CartPole_DQN2015_tf140/network.py",
"CartPole_DQN2015_tf140/CartPole-DQN-train.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n\nimport gym \nfrom gym.envs.registration import register\nimport sys, os\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random as pr\n\n \nclass DQN:\n \n def __init__(self, session, input_size, output_size, name=\"policy\"):\n \n self.sess = session\n self.input_size = input_size\n self.output_size = output_size\n self.net_name = name\n \n self.kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=1e-5)\n \n self.build_network()\n \n def build_network(self, h_size=16, learning_rate=1e-1):\n with tf.variable_scope(self.net_name):\n self.state = tf.placeholder(shape=[None, self.input_size], dtype=tf.float32, name=\"state\")\n self.action = tf.placeholder(shape=[None], dtype=tf.int32, name=\"action\" )\n \n dense1 = tf.layers.dense(inputs=self.state, units=h_size, activation=tf.nn.relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=self.kernel_regularizer)\n \n dense2 = tf.layers.dense(inputs=dense1, units=h_size, activation=tf.nn.relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=self.kernel_regularizer)\n \n# dense3 = tf.layers.dense(inputs=dense2, units=h_size, activation=tf.nn.relu,\n# kernel_initializer=tf.contrib.layers.xavier_initializer(),\n# kernel_regularizer=self.kernel_regularizer)\n \n self.output = tf.layers.dense(inputs=dense2, units=self.output_size, \n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n kernel_regularizer=self.kernel_regularizer)\n \n# # First layer of weights\n# W1 = tf.get_variable(\"W1\", shape=[self.input_size, h_size], initializer=tf.contrib.layers.xavier_initializer())\n# b1 = tf.Variable(tf.constant(0.1, shape=[h_size]))\n# layer1 = tf.nn.tanh(tf.matmul(self._X, W1)+b1)\n# \n# # Second layer of weights\n# W2 = tf.get_variable(\"W2\", shape=[h_size, h_size], initializer=tf.contrib.layers.xavier_initializer())\n# b2 = tf.Variable(tf.constant(0.1, shape=[h_size]))\n# layer2 = tf.nn.relu(tf.matmul(layer1, W2)+b2)\n# \n# W3 = tf.get_variable(\"W3\", shape=[h_size, self.output_size], initializer=tf.contrib.layers.xavier_initializer())\n# b3 = tf.Variable(tf.constant(0.1, shape=[self.output_size]))\n# # Q prediction\n# self._Qpred = tf.matmul(layer2, W3, name=\"Q\")+b3\n \n self.one_hot = tf.one_hot(self.action, self.output_size)\n# print(self.one_hot)\n self.Q = tf.reduce_sum(self.output*self.one_hot , axis=1)\n \n self.prob = tf.nn.softmax(self.output, name=\"prob\")\n # we need to define the parts of the network needed for learning a\n \n # policy\n self.Y = tf.placeholder(shape=[None], dtype=tf.float32)\n \n # Loss function\n self.loss = tf.reduce_mean(tf.square(self.Y - self.Q))\n \n # Learning\n self.train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss)\n \n def predict(self, state):\n state = np.reshape(state, [-1, self.input_size])\n return self.sess.run(self.output, feed_dict={self.state: state})\n \n def update(self, state, action, y):\n return self.sess.run([self.loss, self.train], feed_dict={self.state: state, self.action: action, self.Y: y})\n \n",
"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n\nimport gym \nfrom gym.envs.registration import register\nfrom gym import wrappers\n\nimport sys, os, time\n\nimport numpy as np\n#import matplotlib as mpl\n#mpl.use('TkAgg')\nimport random \nfrom collections import deque\nfrom collections import namedtuple\n\nimport tensorflow as tf\n#from xml.etree.ElementTree import Element, SubElement, dump, ElementTree\nimport network\n \n\ndef createFolder(path):\n if not os.path.exists(path):\n os.mkdir(path)\n \nclass DQN2015 :\n \n def __init__(self, game, settings):\n self.env = game\n self.settings = settings\n \n # Constants defining our neural network\n self.input_size = self.env.observation_space.shape[0]\n self.output_size = self.env.action_space.n\n print('input_size : ', self.input_size) # [position of cart, velocity of cart, angle of pole, rotation rate of pole]\n print('output_size : ', self.output_size) # Left, Right\n \n self.transition = namedtuple('Transition', ('state', 'action', 'reward', 'next_state', 'terminal'))\n \n self.EPS_START = 0.9\n self.EPS_END = 0.05\n self.EPS_DECAY = 200\n self.steps_done = 0\n \n model_folder_name = \"models/\"\n createFolder(model_folder_name) \n \n self.save_folder_path = model_folder_name + self.settings.save_folder_file[0]\n createFolder(self.save_folder_path)\n \n self.checkpoint_state = \"checkpoint_state\"\n self.save_model_path = self.save_folder_path + self.settings.save_folder_file[1]\n self.optimal_model_path = self.save_folder_path + \"optimal\"\n \n \n self.load_folder_path = model_folder_name + self.settings.load_folder_file[0]\n self.load_model_path = self.load_folder_path + self.settings.load_folder_file[1] + \".meta\"\n# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)\n# self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,allow_soft_placement=True) )\n self.sess = tf.Session()\n \n \n # declare model\n self.policyNet = network.DQN(self.sess, self.input_size, self.output_size, name=\"policy\")\n self.targetNet = network.DQN(self.sess, self.input_size, self.output_size, name=\"target\")\n \n# if 'session' in locals() and self.sess is not None:\n# print('Close interactive session')\n# session.close()\n \n self.saver = tf.train.Saver() \n checkpoint = tf.train.get_checkpoint_state(self.load_folder_path, latest_filename=self.checkpoint_state)\n \n self.sess.run(tf.global_variables_initializer())\n \n \n if checkpoint and checkpoint.model_checkpoint_path:\n print(checkpoint)\n print(checkpoint.model_checkpoint_path)\n# self.saver = tf.train.import_meta_graph(self.load_model_path)\n# self.saver.restore(self.sess,tf.train.latest_checkpoint('./'))\n self.saver.restore(self.sess, checkpoint.model_checkpoint_path)\n print(\"%s has been loaded.\" % checkpoint.model_checkpoint_path)\n \n else :\n print(\"First learning.\")\n \n \n \n\n \n def get_copy_var_ops(self, *, dest_scope_name=\"target\", src_scope_name=\"policy\"):\n # Copy variables src_scope to dest_scope\n op_holder = []\n \n src_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name)\n dest_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name)\n \n for src_var, dest_var in zip(src_vars, dest_vars):\n op_holder.append(dest_var.assign(src_var.value()))\n \n return op_holder\n \n def replay_train(self, policy_net, target_net, train_batch, gamma=0.9):\n \n batch = self.transition(*zip(*train_batch)) \n \n # Compute max_a'Q(S_{t+1}, a', theta-) for all next states.\n max_Qtarget_next_s = np.max( self.targetNet.predict(batch.next_state), axis=1 )\n \n# print(max_Qtarget_next_s)\n \n # R_{t+1} + gamma * max_a'Q(S_{t+1}, a', theta-)\n y = batch.reward + gamma * max_Qtarget_next_s * batch.terminal\n \n\n # Q(S_t, A_t, theta) \n # Train our network using target and predicted Q values on each episode\n return self.policyNet.update(batch.state, batch.action, y)\n\n def select_action(self, state):\n \n self.eps_threshold = self.EPS_END + (self.EPS_START - self.EPS_END) * \\\n np.exp(-1. * self.steps_done / self.EPS_DECAY)\n \n self.steps_done += 1\n \n if random.random() > self.eps_threshold: \n return np.argmax(self.policyNet.predict(state))\n else:\n return self.env.action_space.sample()\n \n\n\n def train(self, max_episodes):\n # Save our model\n# tf.train.write_graph(self.sess.graph_def, self.model_dir, self.input_graph_name, as_text=True)\n \n # store the previous observations in replay memory\n replay_buffer = deque()\n start_time = time.perf_counter()\n \n current_max_step = self.settings.initialStepForOptimalModel\n \n # initial copy q_net -> target_net\n copy_ops = self.get_copy_var_ops( dest_scope_name=\"target\",\n src_scope_name=\"policy\")\n self.sess.run(copy_ops)\n \n \n # train my model\n for episode in range(max_episodes):\n# self._e = 1. / ((episode / 100) + 1)\n terminal = False\n step_count = 0\n loss = 0.0\n state = self.env.reset()\n \n while not terminal:\n \n action = self.select_action(state)\n \n # Get new state and reward from environment\n next_state, reward, terminal, _ = self.env.step(action)\n \n terminalNo = 1.0\n if terminal: # big penalty \n terminalNo = 0.0\n# reward = -100 \n \n # Save the experience to our buffer\n replay_buffer.append((state, action, reward, next_state, terminalNo))\n if len(replay_buffer) > self.settings.replayMemory:\n replay_buffer.popleft()\n \n state = next_state\n step_count += 1\n \n \n \n if step_count > current_max_step:\n current_max_step = step_count\n # save model \n save_path = self.saver.save(self.sess, self.save_model_path, global_step=0, latest_filename=self.checkpoint_state)\n save_path = self.saver.save(self.sess, self.optimal_model_path)\n# f = open(self.checkpoint_dir +\"parameters.txt\", 'w')\n# f.write(str(self._e))\n# f.close() \n print('=====================================================================')\n print(\"Episode: {} steps: {} <= Good enough!!!!!!!!!!\".format(episode, step_count))\n print('Current checkpoint has been saved')\n print('=====================================================================')\n \n else :\n print(\"Episode: {} steps: {}\".format(episode, step_count))\n\n# if step_count > 10000:\n# pass\n \n if episode % 10 == 0 and len(replay_buffer)>self.settings.batch_size:\n # Get a random batch of experiences.\n for _ in range(50):\n # Minibatch works better\n minibatch = random.sample(replay_buffer, self.settings.batch_size)\n loss, _ = self.replay_train(self.policyNet, self.targetNet, minibatch)\n \n self.sess.run(copy_ops) # main -> target after training\n \n print('=====================================================================') \n print(\"Eps threshold :\", self.eps_threshold)\n print(\"Loss :\", loss)\n print('=====================================================================')\n\n# if self._e > self._FINAL_RANDOM_ACTION_PROB and len(self._replay_buffer) > self._OBSERVATION_STEPS:\n# self._e -= (self._INITIAL_RANDOM_ACTION_PROB - self._FINAL_RANDOM_ACTION_PROB) / self._EXPLORE_STEPS\n \n \n # Save training information and our model \n# tf.train.write_graph(self.sess.graph_def, self.model_dir, self.input_graph_name, as_text=True) \n \n elapsed_time = (time.perf_counter() - start_time)\n print('=====================================================================')\n print('Elapsed %.3f seconds.' % elapsed_time)\n print('%.0f h' % (elapsed_time/3600), '%.0f m' % ((elapsed_time%3600)/60) , '%.0f s' % (elapsed_time%60) )\n print('Learning Finished!') \n print('=====================================================================')\n \n self.test()\n # See our trained bot in action\n# env2 = wrappers.Monitor(env, 'gym-results', force=True) \n# for i in range(200):\n# self.bot_play(self._mainDQN, env=env2)\n \n \n def bot_play(self, policyNet, use_render=True):\n # See our trained network in action\n state = self._game.reset()\n reward_sum = 0\n step_count = 0\n while True:\n if use_render:\n self.env.render()\n \n action = np.argmax(policyNet.predict(state))\n \n state, reward, terminal, _ = self.env.step(action)\n reward_sum += reward\n step_count += 1 \n if terminal:\n# print(\"You have to train the model\")\n# print(\"Total score: {}\".format(reward_sum))\n break\n \n \n self.env.close()\n return step_count\n \n def test(self, use_render=True): \n \n# game2 = wrappers.Monitor(self._game, 'gym-results', force=True) \n# for i in range(200): \n self.bot_play(self.policyNet, use_render)\n \n \nclass dotdict(dict):\n def __getattr__(self, name):\n return self[name]\n \ndef main():\n \n print(\"Tensorflow version :\", tf.__version__)\n\n\n \n settings = dotdict({\n 'training' : True, \n 'isGPU' : True,\n 'load_model' : True, \n 'save_folder_file' : (\"Cartpole0/\", 'checkpoint0'),\n 'load_folder_file' : (\"Cartpole0/\", 'checkpoint0-0'),\n 'replayMemory' : 50000,\n 'initialStepForOptimalModel' : 5000, \n 'maxEpisodes' : 10000,\n 'batch_size' : 128, \n 'learning_rate' : 1e-1,\n 'graphName' : \"CartPole0\",\n })\n \n gym.envs.register(\n id='CartPole-v2',\n entry_point='gym.envs.classic_control:CartPoleEnv',\n tags={'wrapper_config.TimeLimit.max_episode_steps': 10000},\n reward_threshold=9750,\n )\n game = gym.make('CartPole-v2')\n# game.reset()\n# game = gym.make('CartPole-v0')\n \n cartPole = DQN2015(game, settings)\n \n if settings.training:\n cartPole.train(settings.maxEpisodes)\n else:\n cartPole.test(use_render=True)\n \n\nif __name__ == \"__main__\":\n main()"
] | [
[
"tensorflow.nn.softmax",
"numpy.reshape",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"tensorflow.one_hot",
"tensorflow.square",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.variable_scope",
"tensorflow.train.AdamOptimizer",
"tensorflow.contrib.layers.xavier_initializer"
],
[
"tensorflow.train.get_checkpoint_state",
"tensorflow.get_collection",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.exp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
goerz-forks/qutip | [
"759759e85f61e3619b37253a6f981f71abc442d6",
"759759e85f61e3619b37253a6f981f71abc442d6"
] | [
"qutip/qobj.py",
"qutip/sesolve.py"
] | [
"# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\"\"\"The Quantum Object (Qobj) class, for representing quantum states and\noperators, and related functions.\n\"\"\"\n\n__all__ = ['Qobj', 'qobj_list_evaluate', 'ptrace', 'dag', 'isequal',\n 'issuper', 'isoper', 'isoperket', 'isoperbra', 'isket', 'isbra',\n 'isherm', 'shape', 'dims']\n\nimport warnings\nimport types\n\ntry:\n import builtins\nexcept:\n import __builtin__ as builtins\n\n# import math functions from numpy.math: required for td string evaluation\nfrom numpy import (arccos, arccosh, arcsin, arcsinh, arctan, arctan2, arctanh,\n ceil, copysign, cos, cosh, degrees, e, exp, expm1, fabs,\n floor, fmod, frexp, hypot, isinf, isnan, ldexp, log, log10,\n log1p, modf, pi, radians, sin, sinh, sqrt, tan, tanh, trunc)\n\nimport numpy as np\nimport scipy.sparse as sp\nimport scipy.linalg as la\nimport qutip.settings as settings\nfrom qutip import __version__\nfrom qutip.fastsparse import fast_csr_matrix, fast_identity\nfrom qutip.cy.ptrace import _ptrace\nfrom qutip.permute import _permute\nfrom qutip.sparse import (sp_eigs, sp_expm, sp_fro_norm, sp_max_norm,\n sp_one_norm, sp_L2_norm)\nfrom qutip.dimensions import type_from_dims, enumerate_flat, collapse_dims_super\nfrom qutip.cy.spmath import (zcsr_transpose, zcsr_adjoint, zcsr_isherm,\n zcsr_trace, zcsr_proj, zcsr_inner)\nfrom qutip.cy.spmatfuncs import zcsr_mat_elem\nfrom qutip.cy.sparse_utils import cy_tidyup\nimport sys\nif sys.version_info.major >= 3:\n from itertools import zip_longest\nelif sys.version_info.major < 3:\n from itertools import izip_longest\n zip_longest = izip_longest\n\n#OPENMP stuff\nfrom qutip.cy.openmp.utilities import use_openmp\nif settings.has_openmp:\n from qutip.cy.openmp.omp_sparse_utils import omp_tidyup\n\n\nclass Qobj(object):\n \"\"\"A class for representing quantum objects, such as quantum operators\n and states.\n\n The Qobj class is the QuTiP representation of quantum operators and state\n vectors. This class also implements math operations +,-,* between Qobj\n instances (and / by a C-number), as well as a collection of common\n operator/state operations. The Qobj constructor optionally takes a\n dimension ``list`` and/or shape ``list`` as arguments.\n\n Parameters\n ----------\n inpt : array_like\n Data for vector/matrix representation of the quantum object.\n dims : list\n Dimensions of object used for tensor products.\n shape : list\n Shape of underlying data structure (matrix shape).\n copy : bool\n Flag specifying whether Qobj should get a copy of the\n input data, or use the original.\n fast : bool\n Flag for fast qobj creation when running ode solvers.\n This parameter is used internally only.\n\n\n Attributes\n ----------\n data : array_like\n Sparse matrix characterizing the quantum object.\n dims : list\n List of dimensions keeping track of the tensor structure.\n shape : list\n Shape of the underlying `data` array.\n type : str\n Type of quantum object: 'bra', 'ket', 'oper', 'operator-ket',\n 'operator-bra', or 'super'.\n superrep : str\n Representation used if `type` is 'super'. One of 'super'\n (Liouville form) or 'choi' (Choi matrix with tr = dimension).\n isherm : bool\n Indicates if quantum object represents Hermitian operator.\n isunitary : bool\n Indictaes if quantum object represents unitary operator.\n iscp : bool\n Indicates if the quantum object represents a map, and if that map is\n completely positive (CP).\n ishp : bool\n Indicates if the quantum object represents a map, and if that map is\n hermicity preserving (HP).\n istp : bool\n Indicates if the quantum object represents a map, and if that map is\n trace preserving (TP).\n iscptp : bool\n Indicates if the quantum object represents a map that is completely\n positive and trace preserving (CPTP).\n isket : bool\n Indicates if the quantum object represents a ket.\n isbra : bool\n Indicates if the quantum object represents a bra.\n isoper : bool\n Indicates if the quantum object represents an operator.\n issuper : bool\n Indicates if the quantum object represents a superoperator.\n isoperket : bool\n Indicates if the quantum object represents an operator in column vector\n form.\n isoperbra : bool\n Indicates if the quantum object represents an operator in row vector\n form.\n\n Methods\n -------\n copy()\n Create copy of Qobj\n conj()\n Conjugate of quantum object.\n cosm()\n Cosine of quantum object.\n dag()\n Adjoint (dagger) of quantum object.\n dnorm()\n Diamond norm of quantum operator.\n dual_chan()\n Dual channel of quantum object representing a CP map.\n eigenenergies(sparse=False, sort='low', eigvals=0, tol=0, maxiter=100000)\n Returns eigenenergies (eigenvalues) of a quantum object.\n eigenstates(sparse=False, sort='low', eigvals=0, tol=0, maxiter=100000)\n Returns eigenenergies and eigenstates of quantum object.\n expm()\n Matrix exponential of quantum object.\n full(order='C')\n Returns dense array of quantum object `data` attribute.\n groundstate(sparse=False, tol=0, maxiter=100000)\n Returns eigenvalue and eigenket for the groundstate of a quantum\n object.\n matrix_element(bra, ket)\n Returns the matrix element of operator between `bra` and `ket` vectors.\n norm(norm='tr', sparse=False, tol=0, maxiter=100000)\n Returns norm of a ket or an operator.\n permute(order)\n Returns composite qobj with indices reordered.\n proj()\n Computes the projector for a ket or bra vector.\n ptrace(sel)\n Returns quantum object for selected dimensions after performing\n partial trace.\n sinm()\n Sine of quantum object.\n sqrtm()\n Matrix square root of quantum object.\n tidyup(atol=1e-12)\n Removes small elements from quantum object.\n tr()\n Trace of quantum object.\n trans()\n Transpose of quantum object.\n transform(inpt, inverse=False)\n Performs a basis transformation defined by `inpt` matrix.\n trunc_neg(method='clip')\n Removes negative eigenvalues and returns a new Qobj that is\n a valid density operator.\n unit(norm='tr', sparse=False, tol=0, maxiter=100000)\n Returns normalized quantum object.\n\n \"\"\"\n __array_priority__ = 100 # sets Qobj priority above numpy arrays\n\n def __init__(self, inpt=None, dims=[[], []], shape=[],\n type=None, isherm=None, copy=True,\n fast=False, superrep=None, isunitary=None):\n \"\"\"\n Qobj constructor.\n \"\"\"\n self._isherm = isherm\n self._type = type\n self.superrep = superrep\n self._isunitary = isunitary\n\n if fast == 'mc':\n # fast Qobj construction for use in mcsolve with ket output\n self._data = inpt\n self.dims = dims\n self._isherm = False\n return\n\n if fast == 'mc-dm':\n # fast Qobj construction for use in mcsolve with dm output\n self._data = inpt\n self.dims = dims\n self._isherm = True\n return\n\n if isinstance(inpt, Qobj):\n # if input is already Qobj then return identical copy\n\n self._data = fast_csr_matrix((inpt.data.data, inpt.data.indices,\n inpt.data.indptr),\n shape=inpt.shape, copy=copy)\n\n if not np.any(dims):\n # Dimensions of quantum object used for keeping track of tensor\n # components\n self.dims = inpt.dims\n else:\n self.dims = dims\n\n self.superrep = inpt.superrep\n self._isunitary = inpt._isunitary\n\n elif inpt is None:\n # initialize an empty Qobj with correct dimensions and shape\n\n if any(dims):\n N, M = np.prod(dims[0]), np.prod(dims[1])\n self.dims = dims\n\n elif shape:\n N, M = shape\n self.dims = [[N], [M]]\n\n else:\n N, M = 1, 1\n self.dims = [[N], [M]]\n\n self._data = fast_csr_matrix(shape=(N, M))\n\n elif isinstance(inpt, list) or isinstance(inpt, tuple):\n # case where input is a list\n data = np.array(inpt)\n if len(data.shape) == 1:\n # if list has only one dimension (i.e [5,4])\n data = data.transpose()\n\n _tmp = sp.csr_matrix(data, dtype=complex)\n self._data = fast_csr_matrix((_tmp.data, _tmp.indices, _tmp.indptr),\n shape=_tmp.shape)\n if not np.any(dims):\n self.dims = [[int(data.shape[0])], [int(data.shape[1])]]\n else:\n self.dims = dims\n\n elif isinstance(inpt, np.ndarray) or sp.issparse(inpt):\n # case where input is array or sparse\n if inpt.ndim == 1:\n inpt = inpt[:, np.newaxis]\n\n do_copy = copy\n if not isinstance(inpt, fast_csr_matrix):\n _tmp = sp.csr_matrix(inpt, dtype=complex, copy=do_copy)\n _tmp.sort_indices() #Make sure indices are sorted.\n do_copy = 0\n else:\n _tmp = inpt\n self._data = fast_csr_matrix((_tmp.data, _tmp.indices, _tmp.indptr),\n shape=_tmp.shape, copy=do_copy)\n\n if not np.any(dims):\n self.dims = [[int(inpt.shape[0])], [int(inpt.shape[1])]]\n else:\n self.dims = dims\n\n elif isinstance(inpt, (int, float, complex,\n np.integer, np.floating, np.complexfloating)):\n # if input is int, float, or complex then convert to array\n _tmp = sp.csr_matrix([[inpt]], dtype=complex)\n self._data = fast_csr_matrix((_tmp.data, _tmp.indices, _tmp.indptr),\n shape=_tmp.shape)\n if not np.any(dims):\n self.dims = [[1], [1]]\n else:\n self.dims = dims\n\n else:\n warnings.warn(\"Initializing Qobj from unsupported type: %s\" %\n builtins.type(inpt))\n inpt = np.array([[0]])\n _tmp = sp.csr_matrix(inpt, dtype=complex, copy=copy)\n self._data = fast_csr_matrix((_tmp.data, _tmp.indices, _tmp.indptr),\n shape = _tmp.shape)\n self.dims = [[int(inpt.shape[0])], [int(inpt.shape[1])]]\n\n if type == 'super':\n # Type is not super, i.e. dims not explicitly passed, but oper shape\n if dims== [[], []] and self.shape[0] == self.shape[1]:\n sub_shape = np.sqrt(self.shape[0])\n # check if root of shape is int\n if (sub_shape % 1) != 0:\n raise Exception('Invalid shape for a super operator.')\n else:\n sub_shape = int(sub_shape)\n self.dims = [[[sub_shape], [sub_shape]]]*2\n\n\n if superrep:\n self.superrep = superrep\n else:\n if self.type == 'super' and self.superrep is None:\n self.superrep = 'super'\n\n # clear type cache\n self._type = None\n\n def copy(self):\n \"\"\"Create identical copy\"\"\"\n return Qobj(inpt=self)\n\n def get_data(self):\n return self._data\n #Here we perfrom a check of the csr matrix type during setting of Q.data\n def set_data(self, data):\n if not isinstance(data, fast_csr_matrix):\n raise TypeError('Qobj data must be in fast_csr format.')\n else:\n self._data = data\n data = property(get_data, set_data)\n\n def __add__(self, other):\n \"\"\"\n ADDITION with Qobj on LEFT [ ex. Qobj+4 ]\n \"\"\"\n self._isunitary = None\n\n if isinstance(other, eseries):\n return other.__radd__(self)\n\n if not isinstance(other, Qobj):\n if isinstance(other, (int, float, complex, np.integer, np.floating,\n np.complexfloating, np.ndarray, list, tuple)) \\\n or sp.issparse(other):\n other = Qobj(other)\n else:\n return NotImplemented\n\n if np.prod(other.shape) == 1 and np.prod(self.shape) != 1:\n # case for scalar quantum object\n dat = other.data[0, 0]\n if dat == 0:\n return self\n\n out = Qobj()\n\n if self.type in ['oper', 'super']:\n out.data = self.data + dat * fast_identity(\n self.shape[0])\n else:\n out.data = self.data\n out.data.data = out.data.data + dat\n\n out.dims = self.dims\n\n if settings.auto_tidyup: out.tidyup()\n\n if isinstance(dat, (int, float)):\n out._isherm = self._isherm\n else:\n # We use _isherm here to prevent recalculating on self and\n # other, relying on that bool(None) == False.\n out._isherm = (True if self._isherm and other._isherm\n else out.isherm)\n\n out.superrep = self.superrep\n\n return out\n\n elif np.prod(self.shape) == 1 and np.prod(other.shape) != 1:\n # case for scalar quantum object\n dat = self.data[0, 0]\n if dat == 0:\n return other\n\n out = Qobj()\n if other.type in ['oper', 'super']:\n out.data = dat * fast_identity(other.shape[0]) + other.data\n else:\n out.data = other.data\n out.data.data = out.data.data + dat\n out.dims = other.dims\n\n if settings.auto_tidyup: out.tidyup()\n\n if isinstance(dat, complex):\n out._isherm = out.isherm\n else:\n out._isherm = self._isherm\n\n out.superrep = self.superrep\n\n return out\n\n elif self.dims != other.dims:\n raise TypeError('Incompatible quantum object dimensions')\n\n elif self.shape != other.shape:\n raise TypeError('Matrix shapes do not match')\n\n else: # case for matching quantum objects\n out = Qobj()\n out.data = self.data + other.data\n out.dims = self.dims\n if settings.auto_tidyup: out.tidyup()\n\n if self.type in ['ket', 'bra', 'operator-ket', 'operator-bra']:\n out._isherm = False\n elif self._isherm is None or other._isherm is None:\n out._isherm = out.isherm\n elif not self._isherm and not other._isherm:\n out._isherm = out.isherm\n else:\n out._isherm = self._isherm and other._isherm\n\n if self.superrep and other.superrep:\n if self.superrep != other.superrep:\n msg = (\"Adding superoperators with different \" +\n \"representations\")\n warnings.warn(msg)\n\n out.superrep = self.superrep\n\n return out\n\n def __radd__(self, other):\n \"\"\"\n ADDITION with Qobj on RIGHT [ ex. 4+Qobj ]\n \"\"\"\n return self + other\n\n def __sub__(self, other):\n \"\"\"\n SUBTRACTION with Qobj on LEFT [ ex. Qobj-4 ]\n \"\"\"\n return self + (-other)\n\n def __rsub__(self, other):\n \"\"\"\n SUBTRACTION with Qobj on RIGHT [ ex. 4-Qobj ]\n \"\"\"\n return (-self) + other\n\n def __mul__(self, other):\n \"\"\"\n MULTIPLICATION with Qobj on LEFT [ ex. Qobj*4 ]\n \"\"\"\n self._isunitary = None\n\n if isinstance(other, Qobj):\n if self.dims[1] == other.dims[0]:\n out = Qobj()\n out.data = self.data * other.data\n dims = [self.dims[0], other.dims[1]]\n out.dims = dims\n if settings.auto_tidyup: out.tidyup()\n if (settings.auto_tidyup_dims\n and not isinstance(dims[0][0], list)\n and not isinstance(dims[1][0], list)):\n # If neither left or right is a superoperator,\n # we should implicitly partial trace over\n # matching dimensions of 1.\n # Using izip_longest allows for the left and right dims\n # to have uneven length (non-square Qobjs).\n # We use None as padding so that it doesn't match anything,\n # and will never cause a partial trace on the other side.\n mask = [l == r == 1 for l, r in zip_longest(dims[0], dims[1],\n fillvalue=None)]\n # To ensure that there are still any dimensions left, we\n # use max() to add a dimensions list of [1] if all matching dims\n # are traced out of that side.\n out.dims = [max([1],\n [dim for dim, m in zip(dims[0], mask)\n if not m]),\n max([1],\n [dim for dim, m in zip(dims[1], mask)\n if not m])]\n\n else:\n out.dims = dims\n\n out._isherm = None\n\n if self.superrep and other.superrep:\n if self.superrep != other.superrep:\n msg = (\"Multiplying superoperators with different \" +\n \"representations\")\n warnings.warn(msg)\n\n out.superrep = self.superrep\n\n return out\n\n elif np.prod(self.shape) == 1:\n out = Qobj(other)\n out.data *= self.data[0, 0]\n out.superrep = other.superrep\n return out.tidyup() if settings.auto_tidyup else out\n\n elif np.prod(other.shape) == 1:\n out = Qobj(self)\n out.data *= other.data[0, 0]\n out.superrep = self.superrep\n return out.tidyup() if settings.auto_tidyup else out\n\n else:\n raise TypeError(\"Incompatible Qobj shapes\")\n\n elif isinstance(other, np.ndarray):\n if other.dtype=='object':\n return np.array([self * item for item in other],\n dtype=object)\n else:\n return self.data * other\n\n\n elif isinstance(other, list):\n # if other is a list, do element-wise multiplication\n return np.array([self * item for item in other],\n dtype=object)\n\n elif isinstance(other, eseries):\n return other.__rmul__(self)\n\n elif isinstance(other, (int, float, complex,\n np.integer, np.floating, np.complexfloating)):\n out = Qobj()\n out.data = self.data * other\n out.dims = self.dims\n out.superrep = self.superrep\n if settings.auto_tidyup: out.tidyup()\n if isinstance(other, complex):\n out._isherm = out.isherm\n else:\n out._isherm = self._isherm\n\n return out\n\n else:\n return NotImplemented\n\n def __rmul__(self, other):\n \"\"\"\n MULTIPLICATION with Qobj on RIGHT [ ex. 4*Qobj ]\n \"\"\"\n if isinstance(other, np.ndarray):\n if other.dtype=='object':\n return np.array([item * self for item in other],\n dtype=object)\n else:\n return other * self.data\n\n elif isinstance(other, list):\n # if other is a list, do element-wise multiplication\n return np.array([item * self for item in other],\n dtype=object)\n\n elif isinstance(other, eseries):\n return other.__mul__(self)\n\n elif isinstance(other, (int, float, complex,\n np.integer, np.floating, np.complexfloating)):\n out = Qobj()\n out.data = other * self.data\n out.dims = self.dims\n out.superrep = self.superrep\n if settings.auto_tidyup: out.tidyup()\n if isinstance(other, complex):\n out._isherm = out.isherm\n else:\n out._isherm = self._isherm\n\n return out\n\n else:\n raise TypeError(\"Incompatible object for multiplication\")\n\n def __truediv__(self, other):\n return self.__div__(other)\n\n def __div__(self, other):\n \"\"\"\n DIVISION (by numbers only)\n \"\"\"\n if isinstance(other, Qobj): # if both are quantum objects\n raise TypeError(\"Incompatible Qobj shapes \" +\n \"[division with Qobj not implemented]\")\n\n if isinstance(other, (int, float, complex,\n np.integer, np.floating, np.complexfloating)):\n out = Qobj()\n out.data = self.data / other\n out.dims = self.dims\n if settings.auto_tidyup: out.tidyup()\n if isinstance(other, complex):\n out._isherm = out.isherm\n else:\n out._isherm = self._isherm\n\n out.superrep = self.superrep\n\n return out\n\n else:\n raise TypeError(\"Incompatible object for division\")\n\n def __neg__(self):\n \"\"\"\n NEGATION operation.\n \"\"\"\n out = Qobj()\n out.data = -self.data\n out.dims = self.dims\n out.superrep = self.superrep\n if settings.auto_tidyup: out.tidyup()\n out._isherm = self._isherm\n out._isunitary = self._isunitary\n return out\n\n def __getitem__(self, ind):\n \"\"\"\n GET qobj elements.\n \"\"\"\n out = self.data[ind]\n if sp.issparse(out):\n return np.asarray(out.todense())\n else:\n return out\n\n def __eq__(self, other):\n \"\"\"\n EQUALITY operator.\n \"\"\"\n if (isinstance(other, Qobj) and\n self.dims == other.dims and\n not np.any(np.abs((self.data - other.data).data) >\n settings.atol)):\n return True\n else:\n return False\n\n def __ne__(self, other):\n \"\"\"\n INEQUALITY operator.\n \"\"\"\n return not (self == other)\n\n def __pow__(self, n, m=None): # calculates powers of Qobj\n \"\"\"\n POWER operation.\n \"\"\"\n if self.type not in ['oper', 'super']:\n raise Exception(\"Raising a qobj to some power works only for \" +\n \"operators and super-operators (square matrices).\")\n\n if m is not None:\n raise NotImplementedError(\"modulo is not implemented for Qobj\")\n\n try:\n data = self.data ** n\n out = Qobj(data, dims=self.dims)\n out.superrep = self.superrep\n return out.tidyup() if settings.auto_tidyup else out\n\n except:\n raise ValueError('Invalid choice of exponent.')\n\n def __abs__(self):\n return abs(self.data)\n\n def __str__(self):\n s = \"\"\n t = self.type\n shape = self.shape\n if self.type in ['oper', 'super']:\n s += (\"Quantum object: \" +\n \"dims = \" + str(self.dims) +\n \", shape = \" + str(shape) +\n \", type = \" + t +\n \", isherm = \" + str(self.isherm) +\n (\n \", superrep = {0.superrep}\".format(self)\n if t == \"super\" and self.superrep != \"super\"\n else \"\"\n ) + \"\\n\")\n else:\n s += (\"Quantum object: \" +\n \"dims = \" + str(self.dims) +\n \", shape = \" + str(shape) +\n \", type = \" + t + \"\\n\")\n s += \"Qobj data =\\n\"\n\n if shape[0] > 10000 or shape[1] > 10000:\n # if the system is huge, don't attempt to convert to a\n # dense matrix and then to string, because it is pointless\n # and is likely going to produce memory errors. Instead print the\n # sparse data string representation\n s += str(self.data)\n\n elif all(np.imag(self.data.data) == 0):\n s += str(np.real(self.full()))\n\n else:\n s += str(self.full())\n\n return s\n\n def __repr__(self):\n # give complete information on Qobj without print statement in\n # command-line we cant realistically serialize a Qobj into a string,\n # so we simply return the informal __str__ representation instead.)\n return self.__str__()\n\n def __call__(self, other):\n \"\"\"\n Acts this Qobj on another Qobj either by left-multiplication,\n or by vectorization and devectorization, as\n appropriate.\n \"\"\"\n if not isinstance(other, Qobj):\n raise TypeError(\"Only defined for quantum objects.\")\n\n if self.type == \"super\":\n if other.type == \"ket\":\n other = qutip.states.ket2dm(other)\n\n if other.type == \"oper\":\n return qutip.superoperator.vector_to_operator(\n self * qutip.superoperator.operator_to_vector(other)\n )\n else:\n raise TypeError(\"Can only act super on oper or ket.\")\n\n elif self.type == \"oper\":\n if other.type == \"ket\":\n return self * other\n else:\n raise TypeError(\"Can only act oper on ket.\")\n\n def __getstate__(self):\n # defines what happens when Qobj object gets pickled\n self.__dict__.update({'qutip_version': __version__[:5]})\n return self.__dict__\n\n def __setstate__(self, state):\n # defines what happens when loading a pickled Qobj\n if 'qutip_version' in state.keys():\n del state['qutip_version']\n (self.__dict__).update(state)\n\n def _repr_latex_(self):\n \"\"\"\n Generate a LaTeX representation of the Qobj instance. Can be used for\n formatted output in ipython notebook.\n \"\"\"\n t = self.type\n shape = self.shape\n s = r''\n if self.type in ['oper', 'super']:\n s += (\"Quantum object: \" +\n \"dims = \" + str(self.dims) +\n \", shape = \" + str(shape) +\n \", type = \" + t +\n \", isherm = \" + str(self.isherm) +\n (\n \", superrep = {0.superrep}\".format(self)\n if t == \"super\" and self.superrep != \"super\"\n else \"\"\n ))\n else:\n s += (\"Quantum object: \" +\n \"dims = \" + str(self.dims) +\n \", shape = \" + str(shape) +\n \", type = \" + t)\n\n M, N = self.data.shape\n\n s += r'\\begin{equation*}\\left(\\begin{array}{*{11}c}'\n\n def _format_float(value):\n if value == 0.0:\n return \"0.0\"\n elif abs(value) > 1000.0 or abs(value) < 0.001:\n return (\"%.3e\" % value).replace(\"e\", r\"\\times10^{\") + \"}\"\n elif abs(value - int(value)) < 0.001:\n return \"%.1f\" % value\n else:\n return \"%.3f\" % value\n\n def _format_element(m, n, d):\n s = \" & \" if n > 0 else \"\"\n if type(d) == str:\n return s + d\n else:\n if abs(np.imag(d)) < settings.atol:\n return s + _format_float(np.real(d))\n elif abs(np.real(d)) < settings.atol:\n return s + _format_float(np.imag(d)) + \"j\"\n else:\n s_re = _format_float(np.real(d))\n s_im = _format_float(np.imag(d))\n if np.imag(d) > 0.0:\n return (s + \"(\" + s_re + \"+\" + s_im + \"j)\")\n else:\n return (s + \"(\" + s_re + s_im + \"j)\")\n\n if M > 10 and N > 10:\n # truncated matrix output\n for m in range(5):\n for n in range(5):\n s += _format_element(m, n, self.data[m, n])\n s += r' & \\cdots'\n for n in range(N - 5, N):\n s += _format_element(m, n, self.data[m, n])\n s += r'\\\\'\n\n for n in range(5):\n s += _format_element(m, n, r'\\vdots')\n s += r' & \\ddots'\n for n in range(N - 5, N):\n s += _format_element(m, n, r'\\vdots')\n s += r'\\\\'\n\n for m in range(M - 5, M):\n for n in range(5):\n s += _format_element(m, n, self.data[m, n])\n s += r' & \\cdots'\n for n in range(N - 5, N):\n s += _format_element(m, n, self.data[m, n])\n s += r'\\\\'\n\n elif M > 10 and N <= 10:\n # truncated vertically elongated matrix output\n for m in range(5):\n for n in range(N):\n s += _format_element(m, n, self.data[m, n])\n s += r'\\\\'\n\n for n in range(N):\n s += _format_element(m, n, r'\\vdots')\n s += r'\\\\'\n\n for m in range(M - 5, M):\n for n in range(N):\n s += _format_element(m, n, self.data[m, n])\n s += r'\\\\'\n\n elif M <= 10 and N > 10:\n # truncated horizontally elongated matrix output\n for m in range(M):\n for n in range(5):\n s += _format_element(m, n, self.data[m, n])\n s += r' & \\cdots'\n for n in range(N - 5, N):\n s += _format_element(m, n, self.data[m, n])\n s += r'\\\\'\n\n else:\n # full output\n for m in range(M):\n for n in range(N):\n s += _format_element(m, n, self.data[m, n])\n s += r'\\\\'\n\n s += r'\\end{array}\\right)\\end{equation*}'\n return s\n\n def dag(self):\n \"\"\"Adjoint operator of quantum object.\n \"\"\"\n out = Qobj()\n out.data = zcsr_adjoint(self.data)\n out.dims = [self.dims[1], self.dims[0]]\n out._isherm = self._isherm\n out.superrep = self.superrep\n return out\n\n def dual_chan(self):\n \"\"\"Dual channel of quantum object representing a completely positive\n map.\n \"\"\"\n # Uses the technique of Johnston and Kribs (arXiv:1102.0948), which\n # is only valid for completely positive maps.\n if not self.iscp:\n raise ValueError(\"Dual channels are only implemented for CP maps.\")\n J = sr.to_choi(self)\n tensor_idxs = enumerate_flat(J.dims)\n J_dual = tensor.tensor_swap(J, *(\n list(zip(tensor_idxs[0][1], tensor_idxs[0][0])) +\n list(zip(tensor_idxs[1][1], tensor_idxs[1][0]))\n )).trans()\n J_dual.superrep = 'choi'\n return J_dual\n\n\n def conj(self):\n \"\"\"Conjugate operator of quantum object.\n \"\"\"\n out = Qobj()\n out.data = self.data.conj()\n out.dims = [self.dims[0], self.dims[1]]\n return out\n\n def norm(self, norm=None, sparse=False, tol=0, maxiter=100000):\n \"\"\"Norm of a quantum object.\n\n Default norm is L2-norm for kets and trace-norm for operators.\n Other ket and operator norms may be specified using the `norm` and\n argument.\n\n Parameters\n ----------\n norm : str\n Which norm to use for ket/bra vectors: L2 'l2', max norm 'max',\n or for operators: trace 'tr', Frobius 'fro', one 'one', or max\n 'max'.\n\n sparse : bool\n Use sparse eigenvalue solver for trace norm. Other norms are not\n affected by this parameter.\n\n tol : float\n Tolerance for sparse solver (if used) for trace norm. The sparse\n solver may not converge if the tolerance is set too low.\n\n maxiter : int\n Maximum number of iterations performed by sparse solver (if used)\n for trace norm.\n\n Returns\n -------\n norm : float\n The requested norm of the operator or state quantum object.\n\n\n Notes\n -----\n The sparse eigensolver is much slower than the dense version.\n Use sparse only if memory requirements demand it.\n\n \"\"\"\n if self.type in ['oper', 'super']:\n if norm is None or norm == 'tr':\n _op = self*self.dag()\n vals = sp_eigs(_op.data, _op.isherm, vecs=False,\n sparse=sparse, tol=tol, maxiter=maxiter)\n return np.sum(np.sqrt(np.abs(vals)))\n elif norm == 'fro':\n return sp_fro_norm(self.data)\n elif norm == 'one':\n return sp_one_norm(self.data)\n elif norm == 'max':\n return sp_max_norm(self.data)\n else:\n raise ValueError(\n \"For matrices, norm must be 'tr', 'fro', 'one', or 'max'.\")\n else:\n if norm is None or norm == 'l2':\n return sp_L2_norm(self.data)\n elif norm == 'max':\n return sp_max_norm(self.data)\n else:\n raise ValueError(\"For vectors, norm must be 'l2', or 'max'.\")\n\n def proj(self):\n \"\"\"Form the projector from a given ket or bra vector.\n\n Parameters\n ----------\n Q : :class:`qutip.Qobj`\n Input bra or ket vector\n\n Returns\n -------\n P : :class:`qutip.Qobj`\n Projection operator.\n \"\"\"\n if self.isket:\n _out = zcsr_proj(self.data,1)\n _dims = [self.dims[0],self.dims[0]]\n elif self.isbra:\n _out = zcsr_proj(self.data,0)\n _dims = [self.dims[1],self.dims[1]]\n else:\n raise TypeError('Projector can only be formed from a bra or ket.')\n\n return Qobj(_out,dims=_dims)\n\n\n def tr(self):\n \"\"\"Trace of a quantum object.\n\n Returns\n -------\n trace : float\n Returns ``real`` if operator is Hermitian, returns ``complex``\n otherwise.\n\n \"\"\"\n return zcsr_trace(self.data, self.isherm)\n\n def full(self, order='C', squeeze=False):\n \"\"\"Dense array from quantum object.\n\n Parameters\n ----------\n order : str {'C', 'F'}\n Return array in C (default) or Fortran ordering.\n squeeze : bool {False, True}\n Squeeze output array.\n\n Returns\n -------\n data : array\n Array of complex data from quantum objects `data` attribute.\n \"\"\"\n if squeeze:\n return self.data.toarray(order=order).squeeze()\n else:\n return self.data.toarray(order=order)\n\n def __array__(self, *arg, **kwarg):\n \"\"\"Numpy array from Qobj\n For compatibility with np.array\n \"\"\"\n return self.full()\n\n def diag(self):\n \"\"\"Diagonal elements of quantum object.\n\n Returns\n -------\n diags : array\n Returns array of ``real`` values if operators is Hermitian,\n otherwise ``complex`` values are returned.\n\n \"\"\"\n out = self.data.diagonal()\n if np.any(np.imag(out) > settings.atol) or not self.isherm:\n return out\n else:\n return np.real(out)\n\n def expm(self, method='dense'):\n \"\"\"Matrix exponential of quantum operator.\n\n Input operator must be square.\n\n Parameters\n ----------\n method : str {'dense', 'sparse'}\n Use set method to use to calculate the matrix exponentiation. The\n available choices includes 'dense' and 'sparse'. Since the\n exponential of a matrix is nearly always dense, method='dense'\n is set as default.s\n\n Returns\n -------\n oper : :class:`qutip.Qobj`\n Exponentiated quantum operator.\n\n Raises\n ------\n TypeError\n Quantum operator is not square.\n\n \"\"\"\n if self.dims[0][0] != self.dims[1][0]:\n raise TypeError('Invalid operand for matrix exponential')\n\n if method == 'dense':\n F = sp_expm(self.data, sparse=False)\n\n elif method == 'sparse':\n F = sp_expm(self.data, sparse=True)\n\n else:\n raise ValueError(\"method must be 'dense' or 'sparse'.\")\n\n out = Qobj(F, dims=self.dims)\n return out.tidyup() if settings.auto_tidyup else out\n\n def check_herm(self):\n \"\"\"Check if the quantum object is hermitian.\n\n Returns\n -------\n isherm : bool\n Returns the new value of isherm property.\n \"\"\"\n self._isherm = None\n return self.isherm\n\n def sqrtm(self, sparse=False, tol=0, maxiter=100000):\n \"\"\"Sqrt of a quantum operator.\n\n Operator must be square.\n\n Parameters\n ----------\n sparse : bool\n Use sparse eigenvalue/vector solver.\n tol : float\n Tolerance used by sparse solver (0 = machine precision).\n maxiter : int\n Maximum number of iterations used by sparse solver.\n\n Returns\n -------\n oper : :class:`qutip.Qobj`\n Matrix square root of operator.\n\n Raises\n ------\n TypeError\n Quantum object is not square.\n\n Notes\n -----\n The sparse eigensolver is much slower than the dense version.\n Use sparse only if memory requirements demand it.\n\n \"\"\"\n if self.dims[0][0] == self.dims[1][0]:\n evals, evecs = sp_eigs(self.data, self.isherm, sparse=sparse,\n tol=tol, maxiter=maxiter)\n numevals = len(evals)\n dV = sp.spdiags(np.sqrt(evals, dtype=complex), 0, numevals,\n numevals, format='csr')\n if self.isherm:\n spDv = dV.dot(evecs.T.conj().T)\n else:\n spDv = dV.dot(np.linalg.inv(evecs.T))\n\n out = Qobj(evecs.T.dot(spDv), dims=self.dims)\n return out.tidyup() if settings.auto_tidyup else out\n\n else:\n raise TypeError('Invalid operand for matrix square root')\n\n\n def cosm(self):\n \"\"\"Cosine of a quantum operator.\n\n Operator must be square.\n\n Returns\n -------\n oper : :class:`qutip.Qobj`\n Matrix cosine of operator.\n\n Raises\n ------\n TypeError\n Quantum object is not square.\n\n Notes\n -----\n Uses the Q.expm() method.\n\n \"\"\"\n if self.dims[0][0] == self.dims[1][0]:\n return 0.5 * ((1j * self).expm() + (-1j * self).expm())\n else:\n raise TypeError('Invalid operand for matrix square root')\n\n\n def sinm(self):\n \"\"\"Sine of a quantum operator.\n\n Operator must be square.\n\n Returns\n -------\n oper : :class:`qutip.Qobj`\n Matrix sine of operator.\n\n Raises\n ------\n TypeError\n Quantum object is not square.\n\n Notes\n -----\n Uses the Q.expm() method.\n\n \"\"\"\n if self.dims[0][0] == self.dims[1][0]:\n return -0.5j * ((1j * self).expm() - (-1j * self).expm())\n else:\n raise TypeError('Invalid operand for matrix square root')\n\n\n\n def unit(self, inplace=False,\n norm=None, sparse=False,\n tol=0, maxiter=100000):\n \"\"\"Operator or state normalized to unity.\n\n Uses norm from Qobj.norm().\n\n Parameters\n ----------\n inplace : bool\n Do an in-place normalization\n norm : str\n Requested norm for states / operators.\n sparse : bool\n Use sparse eigensolver for trace norm. Does not affect other norms.\n tol : float\n Tolerance used by sparse eigensolver.\n maxiter : int\n Number of maximum iterations performed by sparse eigensolver.\n\n Returns\n -------\n oper : :class:`qutip.Qobj`\n Normalized quantum object if not in-place,\n else None.\n\n \"\"\"\n if inplace:\n nrm = self.norm(norm=norm, sparse=sparse,\n tol=tol, maxiter=maxiter)\n\n self.data /= nrm\n elif not inplace:\n out = self / self.norm(norm=norm, sparse=sparse,\n tol=tol, maxiter=maxiter)\n if settings.auto_tidyup:\n return out.tidyup()\n else:\n return out\n else:\n raise Exception('inplace kwarg must be bool.')\n\n def ptrace(self, sel):\n \"\"\"Partial trace of the quantum object.\n\n Parameters\n ----------\n sel : int/list\n An ``int`` or ``list`` of components to keep after partial trace.\n\n Returns\n -------\n oper : :class:`qutip.Qobj`\n Quantum object representing partial trace with selected components\n remaining.\n\n Notes\n -----\n This function is identical to the :func:`qutip.qobj.ptrace` function\n that has been deprecated.\n\n \"\"\"\n q = Qobj()\n q.data, q.dims, _ = _ptrace(self, sel)\n return q.tidyup() if settings.auto_tidyup else q\n\n def permute(self, order):\n \"\"\"Permutes a composite quantum object.\n\n Parameters\n ----------\n order : list/array\n List specifying new tensor order.\n\n Returns\n -------\n P : :class:`qutip.Qobj`\n Permuted quantum object.\n\n \"\"\"\n q = Qobj()\n q.data, q.dims = _permute(self, order)\n return q.tidyup() if settings.auto_tidyup else q\n\n def tidyup(self, atol=settings.auto_tidyup_atol):\n \"\"\"Removes small elements from the quantum object.\n\n Parameters\n ----------\n atol : float\n Absolute tolerance used by tidyup. Default is set\n via qutip global settings parameters.\n\n Returns\n -------\n oper : :class:`qutip.Qobj`\n Quantum object with small elements removed.\n\n \"\"\"\n if self.data.nnz:\n #This does the tidyup and returns True if\n #The sparse data needs to be shortened\n if use_openmp() and self.data.nnz > 500:\n if omp_tidyup(self.data.data,atol,self.data.nnz,\n settings.num_cpus):\n self.data.eliminate_zeros()\n else:\n if cy_tidyup(self.data.data,atol,self.data.nnz):\n self.data.eliminate_zeros()\n return self\n else:\n return self\n\n def transform(self, inpt, inverse=False, sparse=True):\n \"\"\"Basis transform defined by input array.\n\n Input array can be a ``matrix`` defining the transformation,\n or a ``list`` of kets that defines the new basis.\n\n\n Parameters\n ----------\n inpt : array_like\n A ``matrix`` or ``list`` of kets defining the transformation.\n inverse : bool\n Whether to return inverse transformation.\n sparse : bool\n Use sparse matrices when possible. Can be slower.\n\n Returns\n -------\n oper : :class:`qutip.Qobj`\n Operator in new basis.\n\n Notes\n -----\n This function is still in development.\n\n\n \"\"\"\n if isinstance(inpt, list) or (isinstance(inpt, np.ndarray) and\n len(inpt.shape) == 1):\n if len(inpt) != max(self.shape):\n raise TypeError(\n 'Invalid size of ket list for basis transformation')\n if sparse:\n S = sp.hstack([psi.data for psi in inpt],\n format='csr', dtype=complex).conj().T\n else:\n S = np.hstack([psi.full() for psi in inpt],\n dtype=complex).conj().T\n elif isinstance(inpt, Qobj) and inpt.isoper:\n S = inpt.data\n elif isinstance(inpt, np.ndarray):\n S = inpt.conj()\n sparse = False\n else:\n raise TypeError('Invalid operand for basis transformation')\n\n\n # transform data\n if inverse:\n if self.isket:\n data = (S.conj().T) * self.data\n elif self.isbra:\n data = self.data.dot(S)\n else:\n if sparse:\n data = (S.conj().T) * self.data * S\n else:\n data = (S.conj().T).dot(self.data.dot(S))\n else:\n if self.isket:\n data = S * self.data\n elif self.isbra:\n data = self.data.dot(S.conj().T)\n else:\n if sparse:\n data = S * self.data * (S.conj().T)\n else:\n data = S.dot(self.data.dot(S.conj().T))\n\n out = Qobj(data, dims=self.dims)\n out._isherm = self._isherm\n out.superrep = self.superrep\n\n if settings.auto_tidyup:\n return out.tidyup()\n else:\n return out\n\n\n\n def trunc_neg(self, method=\"clip\"):\n \"\"\"Truncates negative eigenvalues and renormalizes.\n\n Returns a new Qobj by removing the negative eigenvalues\n of this instance, then renormalizing to obtain a valid density\n operator.\n\n\n Parameters\n ----------\n method : str\n Algorithm to use to remove negative eigenvalues. \"clip\"\n simply discards negative eigenvalues, then renormalizes.\n \"sgs\" uses the SGS algorithm (doi:10/bb76) to find the\n positive operator that is nearest in the Shatten 2-norm.\n\n Returns\n -------\n oper : :class:`qutip.Qobj`\n A valid density operator.\n\n \"\"\"\n if not self.isherm:\n raise ValueError(\"Must be a Hermitian operator to remove negative \"\n \"eigenvalues.\")\n\n if method not in ('clip', 'sgs'):\n raise ValueError(\"Method {} not recognized.\".format(method))\n\n eigvals, eigstates = self.eigenstates()\n if all([eigval >= 0 for eigval in eigvals]):\n # All positive, so just renormalize.\n return self.unit()\n idx_nonzero = eigvals != 0\n eigvals = eigvals[idx_nonzero]\n eigstates = eigstates[idx_nonzero]\n\n if method == 'clip':\n eigvals[eigvals < 0] = 0\n elif method == 'sgs':\n eigvals = eigvals[::-1]\n eigstates = eigstates[::-1]\n\n acc = 0.0\n dim = self.shape[0]\n n_eigs = len(eigvals)\n\n for idx in reversed(range(n_eigs)):\n if eigvals[idx] + acc / (idx + 1) >= 0:\n break\n else:\n acc += eigvals[idx]\n eigvals[idx] = 0.0\n\n eigvals[:idx+1] += acc / (idx + 1)\n\n return sum([\n val * qutip.states.ket2dm(state)\n for val, state in zip(eigvals, eigstates)\n ], Qobj(np.zeros(self.shape), dims=self.dims)\n ).unit()\n\n\n def matrix_element(self, bra, ket):\n \"\"\"Calculates a matrix element.\n\n Gives the matrix element for the quantum object sandwiched between a\n `bra` and `ket` vector.\n\n Parameters\n -----------\n bra : :class:`qutip.Qobj`\n Quantum object of type 'bra' or 'ket'\n\n ket : :class:`qutip.Qobj`\n Quantum object of type 'ket'.\n\n Returns\n -------\n elem : complex\n Complex valued matrix element.\n\n Note\n ----\n It is slightly more computationally efficient to use a ket\n vector for the 'bra' input.\n\n \"\"\"\n if not self.isoper:\n raise TypeError(\"Can only get matrix elements for an operator.\")\n\n else:\n if bra.isbra and ket.isket:\n return zcsr_mat_elem(self.data,bra.data,ket.data,1)\n\n elif bra.isket and ket.isket:\n return zcsr_mat_elem(self.data,bra.data,ket.data,0)\n else:\n raise TypeError(\"Can only calculate matrix elements for bra and ket vectors.\")\n\n def overlap(self, other):\n \"\"\"Overlap between two state vectors or two operators.\n\n Gives the overlap (inner product) between the current bra or ket Qobj\n and and another bra or ket Qobj. It gives the Hilbert-Schmidt overlap\n when one of the Qobj is an operator/density matrix.\n\n Parameters\n -----------\n other : :class:`qutip.Qobj`\n Quantum object for a state vector of type 'ket', 'bra' or density\n matrix.\n\n Returns\n -------\n overlap : complex\n Complex valued overlap.\n\n Raises\n ------\n TypeError\n Can only calculate overlap between a bra, ket and density matrix\n quantum objects.\n\n Notes\n -----\n Since QuTiP mainly deals with ket vectors, the most efficient inner\n product call is the ket-ket version that computes the product\n <self|other> with both vectors expressed as kets.\n \"\"\"\n\n if isinstance(other, Qobj):\n\n if self.isbra:\n if other.isket:\n return zcsr_inner(self.data, other.data, 1)\n elif other.isbra:\n #Since we deal mainly with ket vectors, the bra-bra combo\n #is not common, and not optimized.\n return zcsr_inner(self.data, other.dag().data, 1)\n elif other.isoper:\n return (qutip.states.ket2dm(self).dag() * other).tr()\n else:\n raise TypeError(\"Can only calculate overlap for state vector Qobjs\")\n\n elif self.isket:\n if other.isbra:\n return zcsr_inner(other.data, self.data, 1)\n elif other.isket:\n return zcsr_inner(self.data, other.data, 0)\n elif other.isoper:\n return (qutip.states.ket2dm(self).dag() * other).tr()\n else:\n raise TypeError(\"Can only calculate overlap for state vector Qobjs\")\n\n elif self.isoper:\n if other.isket or other.isbra:\n return (self.dag() * qutip.states.ket2dm(other)).tr()\n elif other.isoper:\n return (self.dag() * other).tr()\n else:\n raise TypeError(\"Can only calculate overlap for state vector Qobjs\")\n\n\n raise TypeError(\"Can only calculate overlap for state vector Qobjs\")\n\n\n def eigenstates(self, sparse=False, sort='low',\n eigvals=0, tol=0, maxiter=100000):\n \"\"\"Eigenstates and eigenenergies.\n\n Eigenstates and eigenenergies are defined for operators and\n superoperators only.\n\n Parameters\n ----------\n sparse : bool\n Use sparse Eigensolver\n\n sort : str\n Sort eigenvalues (and vectors) 'low' to high, or 'high' to low.\n\n eigvals : int\n Number of requested eigenvalues. Default is all eigenvalues.\n\n tol : float\n Tolerance used by sparse Eigensolver (0 = machine precision).\n The sparse solver may not converge if the tolerance is set too low.\n\n maxiter : int\n Maximum number of iterations performed by sparse solver (if used).\n\n Returns\n -------\n eigvals : array\n Array of eigenvalues for operator.\n\n eigvecs : array\n Array of quantum operators representing the oprator eigenkets.\n Order of eigenkets is determined by order of eigenvalues.\n\n Notes\n -----\n The sparse eigensolver is much slower than the dense version.\n Use sparse only if memory requirements demand it.\n\n \"\"\"\n evals, evecs = sp_eigs(self.data, self.isherm, sparse=sparse,\n sort=sort, eigvals=eigvals, tol=tol,\n maxiter=maxiter)\n new_dims = [self.dims[0], [1] * len(self.dims[0])]\n ekets = np.array([Qobj(vec, dims=new_dims) for vec in evecs],\n dtype=object)\n norms = np.array([ket.norm() for ket in ekets])\n return evals, ekets / norms\n\n\n def eigenenergies(self, sparse=False, sort='low',\n eigvals=0, tol=0, maxiter=100000):\n \"\"\"Eigenenergies of a quantum object.\n\n Eigenenergies (eigenvalues) are defined for operators or superoperators\n only.\n\n Parameters\n ----------\n sparse : bool\n Use sparse Eigensolver\n sort : str\n Sort eigenvalues 'low' to high, or 'high' to low.\n eigvals : int\n Number of requested eigenvalues. Default is all eigenvalues.\n tol : float\n Tolerance used by sparse Eigensolver (0=machine precision).\n The sparse solver may not converge if the tolerance is set too low.\n maxiter : int\n Maximum number of iterations performed by sparse solver (if used).\n\n Returns\n -------\n eigvals : array\n Array of eigenvalues for operator.\n\n Notes\n -----\n The sparse eigensolver is much slower than the dense version.\n Use sparse only if memory requirements demand it.\n\n \"\"\"\n return sp_eigs(self.data, self.isherm, vecs=False, sparse=sparse,\n sort=sort, eigvals=eigvals, tol=tol, maxiter=maxiter)\n\n def groundstate(self, sparse=False, tol=0, maxiter=100000, safe=True):\n \"\"\"Ground state Eigenvalue and Eigenvector.\n\n Defined for quantum operators or superoperators only.\n\n Parameters\n ----------\n sparse : bool\n Use sparse Eigensolver\n tol : float\n Tolerance used by sparse Eigensolver (0 = machine precision).\n The sparse solver may not converge if the tolerance is set too low.\n maxiter : int\n Maximum number of iterations performed by sparse solver (if used).\n safe : bool (default=True)\n Check for degenerate ground state\n\n Returns\n -------\n eigval : float\n Eigenvalue for the ground state of quantum operator.\n eigvec : :class:`qutip.Qobj`\n Eigenket for the ground state of quantum operator.\n\n Notes\n -----\n The sparse eigensolver is much slower than the dense version.\n Use sparse only if memory requirements demand it.\n\n \"\"\"\n if safe:\n evals = 2\n else:\n evals = 1\n grndval, grndvec = sp_eigs(self.data, self.isherm, sparse=sparse,\n eigvals=evals, tol=tol, maxiter=maxiter)\n if safe:\n if tol == 0: tol = 1e-15\n if (grndval[1]-grndval[0]) <= 10*tol:\n print(\"WARNING: Ground state may be degenerate. \"\n \"Use Q.eigenstates()\")\n new_dims = [self.dims[0], [1] * len(self.dims[0])]\n grndvec = Qobj(grndvec[0], dims=new_dims)\n grndvec = grndvec / grndvec.norm()\n return grndval[0], grndvec\n\n def trans(self):\n \"\"\"Transposed operator.\n\n Returns\n -------\n oper : :class:`qutip.Qobj`\n Transpose of input operator.\n\n \"\"\"\n out = Qobj()\n out.data = zcsr_transpose(self.data)\n out.dims = [self.dims[1], self.dims[0]]\n return out\n\n def extract_states(self, states_inds, normalize=False):\n \"\"\"Qobj with states in state_inds only.\n\n Parameters\n ----------\n states_inds : list of integer\n The states that should be kept.\n\n normalize : True / False\n Weather or not the new Qobj instance should be normalized (default\n is False). For Qobjs that represents density matrices or state\n vectors normalized should probably be set to True, but for Qobjs\n that represents operators in for example an Hamiltonian, normalize\n should be False.\n\n Returns\n -------\n q : :class:`qutip.Qobj`\n A new instance of :class:`qutip.Qobj` that contains only the states\n corresponding to the indices in `state_inds`.\n\n Notes\n -----\n Experimental.\n\n \"\"\"\n if self.isoper:\n q = Qobj(self.data[states_inds, :][:, states_inds])\n elif self.isket:\n q = Qobj(self.data[states_inds, :])\n elif self.isbra:\n q = Qobj(self.data[:, states_inds])\n else:\n raise TypeError(\"Can only eliminate states from operators or \" +\n \"state vectors\")\n\n return q.unit() if normalize else q\n\n def eliminate_states(self, states_inds, normalize=False):\n \"\"\"Creates a new quantum object with states in state_inds eliminated.\n\n Parameters\n ----------\n states_inds : list of integer\n The states that should be removed.\n\n normalize : True / False\n Weather or not the new Qobj instance should be normalized (default\n is False). For Qobjs that represents density matrices or state\n vectors normalized should probably be set to True, but for Qobjs\n that represents operators in for example an Hamiltonian, normalize\n should be False.\n\n Returns\n -------\n q : :class:`qutip.Qobj`\n A new instance of :class:`qutip.Qobj` that contains only the states\n corresponding to indices that are **not** in `state_inds`.\n\n Notes\n -----\n Experimental.\n\n \"\"\"\n keep_indices = np.array([s not in states_inds\n for s in range(self.shape[0])]).nonzero()[0]\n\n return self.extract_states(keep_indices, normalize=normalize)\n\n def dnorm(self, B=None):\n \"\"\"Calculates the diamond norm, or the diamond distance to another\n operator.\n\n Parameters\n ----------\n B : :class:`qutip.Qobj` or None\n If B is not None, the diamond distance d(A, B) = dnorm(A - B) between\n this operator and B is returned instead of the diamond norm.\n\n Returns\n -------\n d : float\n Either the diamond norm of this operator, or the diamond distance\n from this operator to B.\n\n \"\"\"\n return mts.dnorm(self, B)\n\n\n @property\n def ishp(self):\n # FIXME: this needs to be cached in the same ways as isherm.\n if self.type in [\"super\", \"oper\"]:\n try:\n J = sr.to_choi(self)\n return J.isherm\n except:\n return False\n else:\n return False\n\n @property\n def iscp(self):\n # FIXME: this needs to be cached in the same ways as isherm.\n if self.type in [\"super\", \"oper\"]:\n try:\n J = (\n self\n # We can test with either Choi or chi, since the basis\n # transformation between them is unitary and hence\n # preserves the CP and TP conditions.\n if self.superrep in ('choi', 'chi')\n else sr.to_choi(self)\n )\n # If J isn't hermitian, then that could indicate either\n # that J is not normal, or is normal, but has complex eigenvalues.\n # In either case, it makes no sense to then demand that the\n # eigenvalues be non-negative.\n if not J.isherm:\n return False\n eigs = J.eigenenergies()\n return all(eigs >= -settings.atol)\n except:\n return False\n else:\n return False\n\n @property\n def istp(self):\n import qutip.superop_reps as sr\n if self.type in [\"super\", \"oper\"]:\n try:\n # Normalize to a super of type choi or chi.\n # We can test with either Choi or chi, since the basis\n # transformation between them is unitary and hence\n # preserves the CP and TP conditions.\n if self.type == \"super\" and self.superrep in ('choi', 'chi'):\n qobj = self\n else:\n qobj = sr.to_choi(self)\n\n # Possibly collapse dims.\n if any([len(index) > 1 for super_index in qobj.dims\n for index in super_index]):\n qobj = Qobj(qobj, dims=collapse_dims_super(qobj.dims))\n else:\n qobj = qobj\n\n # We use the condition from John Watrous' lecture notes,\n # Tr_1(J(Phi)) = identity_2.\n tr_oper = qobj.ptrace([0])\n ident = ops.identity(tr_oper.shape[0])\n return isequal(tr_oper, ident)\n except:\n return False\n else:\n return False\n\n @property\n def iscptp(self):\n from qutip.superop_reps import to_choi\n if self.type == \"super\" or self.type == \"oper\":\n reps = ('choi', 'chi')\n q_oper = to_choi(self) if self.superrep not in reps else self\n return q_oper.iscp and q_oper.istp\n else:\n return False\n\n @property\n def isherm(self):\n\n if self._isherm is not None:\n # used previously computed value\n return self._isherm\n\n self._isherm = bool(zcsr_isherm(self.data))\n\n return self._isherm\n\n @isherm.setter\n def isherm(self, isherm):\n self._isherm = isherm\n\n def check_isunitary(self):\n \"\"\"\n Checks whether qobj is a unitary matrix\n \"\"\"\n if self.isoper:\n eye_data = fast_identity(self.shape[0])\n return not (np.any(np.abs((self.data*self.dag().data\n - eye_data).data)\n > settings.atol)\n or\n np.any(np.abs((self.dag().data*self.data\n - eye_data).data) >\n settings.atol)\n )\n\n else:\n return False\n\n @property\n def isunitary(self):\n if self._isunitary is not None:\n # used previously computed value\n return self._isunitary\n\n self._isunitary = self.check_isunitary()\n\n return self._isunitary\n\n @isunitary.setter\n def isunitary(self, isunitary):\n self._isunitary = isunitary\n\n @property\n def type(self):\n if not self._type:\n self._type = type_from_dims(self.dims)\n\n return self._type\n\n @property\n def shape(self):\n if self.data.shape == (1, 1):\n return tuple([np.prod(self.dims[0]), np.prod(self.dims[1])])\n else:\n return tuple(self.data.shape)\n\n @property\n def isbra(self):\n return self.type == 'bra'\n\n @property\n def isket(self):\n return self.type == 'ket'\n\n @property\n def isoperbra(self):\n return self.type == 'operator-bra'\n\n @property\n def isoperket(self):\n return self.type == 'operator-ket'\n\n @property\n def isoper(self):\n return self.type == 'oper'\n\n @property\n def issuper(self):\n return self.type == 'super'\n\n @staticmethod\n def evaluate(qobj_list, t, args):\n \"\"\"Evaluate a time-dependent quantum object in list format. For\n example,\n\n qobj_list = [H0, [H1, func_t]]\n\n is evaluated to\n\n Qobj(t) = H0 + H1 * func_t(t, args)\n\n and\n\n qobj_list = [H0, [H1, 'sin(w * t)']]\n\n is evaluated to\n\n Qobj(t) = H0 + H1 * sin(args['w'] * t)\n\n Parameters\n ----------\n qobj_list : list\n A nested list of Qobj instances and corresponding time-dependent\n coefficients.\n t : float\n The time for which to evaluate the time-dependent Qobj instance.\n args : dictionary\n A dictionary with parameter values required to evaluate the\n time-dependent Qobj intance.\n\n Returns\n -------\n output : :class:`qutip.Qobj`\n A Qobj instance that represents the value of qobj_list at time t.\n\n \"\"\"\n\n q_sum = 0\n if isinstance(qobj_list, Qobj):\n q_sum = qobj_list\n elif isinstance(qobj_list, list):\n for q in qobj_list:\n if isinstance(q, Qobj):\n q_sum += q\n elif (isinstance(q, list) and len(q) == 2 and\n isinstance(q[0], Qobj)):\n if isinstance(q[1], types.FunctionType):\n q_sum += q[0] * q[1](t, args)\n elif isinstance(q[1], str):\n args['t'] = t\n q_sum += q[0] * float(eval(q[1], globals(), args))\n else:\n raise TypeError('Unrecognized format for ' +\n 'specification of time-dependent Qobj')\n else:\n raise TypeError('Unrecognized format for specification ' +\n 'of time-dependent Qobj')\n else:\n raise TypeError(\n 'Unrecongized format for specification of time-dependent Qobj')\n\n return q_sum\n\n\n# -----------------------------------------------------------------------------\n# This functions evaluates a time-dependent quantum object on the list-string\n# and list-function formats that are used by the time-dependent solvers.\n# Although not used directly in by those solvers, it can for test purposes be\n# conventient to be able to evaluate the expressions passed to the solver for\n# arbitrary value of time. This function provides this functionality.\n#\ndef qobj_list_evaluate(qobj_list, t, args):\n \"\"\"\n Depracated: See Qobj.evaluate\n \"\"\"\n warnings.warn(\"Deprecated: Use Qobj.evaluate\", DeprecationWarning)\n return Qobj.evaluate(qobj_list, t, args)\n\n\n# -----------------------------------------------------------------------------\n#\n# A collection of tests used to determine the type of quantum objects, and some\n# functions for increased compatibility with quantum optics toolbox.\n#\n\ndef dag(A):\n \"\"\"Adjont operator (dagger) of a quantum object.\n\n Parameters\n ----------\n A : :class:`qutip.Qobj`\n Input quantum object.\n\n Returns\n -------\n oper : :class:`qutip.Qobj`\n Adjoint of input operator\n\n Notes\n -----\n This function is for legacy compatibility only. It is recommended to use\n the ``dag()`` Qobj method.\n\n \"\"\"\n if not isinstance(A, Qobj):\n raise TypeError(\"Input is not a quantum object\")\n\n return A.dag()\n\n\ndef ptrace(Q, sel):\n \"\"\"Partial trace of the Qobj with selected components remaining.\n\n Parameters\n ----------\n Q : :class:`qutip.Qobj`\n Composite quantum object.\n sel : int/list\n An ``int`` or ``list`` of components to keep after partial trace.\n\n Returns\n -------\n oper : :class:`qutip.Qobj`\n Quantum object representing partial trace with selected components\n remaining.\n\n Notes\n -----\n This function is for legacy compatibility only. It is recommended to use\n the ``ptrace()`` Qobj method.\n\n \"\"\"\n if not isinstance(Q, Qobj):\n raise TypeError(\"Input is not a quantum object\")\n\n return Q.ptrace(sel)\n\n\ndef dims(inpt):\n \"\"\"Returns the dims attribute of a quantum object.\n\n Parameters\n ----------\n inpt : :class:`qutip.Qobj`\n Input quantum object.\n\n Returns\n -------\n dims : list\n A ``list`` of the quantum objects dimensions.\n\n Notes\n -----\n This function is for legacy compatibility only. Using the `Qobj.dims`\n attribute is recommended.\n\n \"\"\"\n if isinstance(inpt, Qobj):\n return inpt.dims\n else:\n raise TypeError(\"Input is not a quantum object\")\n\n\ndef shape(inpt):\n \"\"\"Returns the shape attribute of a quantum object.\n\n Parameters\n ----------\n inpt : :class:`qutip.Qobj`\n Input quantum object.\n\n Returns\n -------\n shape : list\n A ``list`` of the quantum objects shape.\n\n Notes\n -----\n This function is for legacy compatibility only. Using the `Qobj.shape`\n attribute is recommended.\n\n \"\"\"\n if isinstance(inpt, Qobj):\n return Qobj.shape\n else:\n return np.shape(inpt)\n\n\ndef isket(Q):\n \"\"\"\n Determines if given quantum object is a ket-vector.\n\n Parameters\n ----------\n Q : :class:`qutip.Qobj`\n Quantum object\n\n Returns\n -------\n isket : bool\n True if qobj is ket-vector, False otherwise.\n\n Examples\n --------\n >>> psi = basis(5,2)\n >>> isket(psi)\n True\n\n Notes\n -----\n This function is for legacy compatibility only. Using the `Qobj.isket`\n attribute is recommended.\n\n \"\"\"\n return True if isinstance(Q, Qobj) and Q.isket else False\n\n\ndef isbra(Q):\n \"\"\"Determines if given quantum object is a bra-vector.\n\n Parameters\n ----------\n Q : :class:`qutip.Qobj`\n Quantum object\n\n Returns\n -------\n isbra : bool\n True if Qobj is bra-vector, False otherwise.\n\n Examples\n --------\n >>> psi = basis(5,2)\n >>> isket(psi)\n False\n\n Notes\n -----\n This function is for legacy compatibility only. Using the `Qobj.isbra`\n attribute is recommended.\n\n \"\"\"\n return True if isinstance(Q, Qobj) and Q.isbra else False\n\n\ndef isoperket(Q):\n \"\"\"Determines if given quantum object is an operator in column vector form\n (operator-ket).\n\n Parameters\n ----------\n Q : :class:`qutip.Qobj`\n Quantum object\n\n Returns\n -------\n isoperket : bool\n True if Qobj is operator-ket, False otherwise.\n\n Notes\n -----\n This function is for legacy compatibility only. Using the `Qobj.isoperket`\n attribute is recommended.\n\n \"\"\"\n return True if isinstance(Q, Qobj) and Q.isoperket else False\n\n\ndef isoperbra(Q):\n \"\"\"Determines if given quantum object is an operator in row vector form\n (operator-bra).\n\n Parameters\n ----------\n Q : :class:`qutip.Qobj`\n Quantum object\n\n Returns\n -------\n isoperbra : bool\n True if Qobj is operator-bra, False otherwise.\n\n Notes\n -----\n This function is for legacy compatibility only. Using the `Qobj.isoperbra`\n attribute is recommended.\n\n \"\"\"\n return True if isinstance(Q, Qobj) and Q.isoperbra else False\n\n\ndef isoper(Q):\n \"\"\"Determines if given quantum object is a operator.\n\n Parameters\n ----------\n Q : :class:`qutip.Qobj`\n Quantum object\n\n Returns\n -------\n isoper : bool\n True if Qobj is operator, False otherwise.\n\n Examples\n --------\n >>> a = destroy(5)\n >>> isoper(a)\n True\n\n Notes\n -----\n This function is for legacy compatibility only. Using the `Qobj.isoper`\n attribute is recommended.\n\n \"\"\"\n return True if isinstance(Q, Qobj) and Q.isoper else False\n\n\ndef issuper(Q):\n \"\"\"Determines if given quantum object is a super-operator.\n\n Parameters\n ----------\n Q : :class:`qutip.Qobj`\n Quantum object\n\n Returns\n -------\n issuper : bool\n True if Qobj is superoperator, False otherwise.\n\n Notes\n -----\n This function is for legacy compatibility only. Using the `Qobj.issuper`\n attribute is recommended.\n\n \"\"\"\n return True if isinstance(Q, Qobj) and Q.issuper else False\n\n\ndef isequal(A, B, tol=None):\n \"\"\"Determines if two qobj objects are equal to within given tolerance.\n\n Parameters\n ----------\n A : :class:`qutip.Qobj`\n Qobj one\n B : :class:`qutip.Qobj`\n Qobj two\n tol : float\n Tolerence for equality to be valid\n\n Returns\n -------\n isequal : bool\n True if qobjs are equal, False otherwise.\n\n Notes\n -----\n This function is for legacy compatibility only. Instead, it is recommended\n to use the equality operator of Qobj instances instead: A == B.\n\n \"\"\"\n if tol is None:\n tol = settings.atol\n\n if not isinstance(A, Qobj) or not isinstance(B, Qobj):\n return False\n\n if A.dims != B.dims:\n return False\n\n Adat = A.data\n Bdat = B.data\n elems = (Adat - Bdat).data\n if np.any(np.abs(elems) > tol):\n return False\n\n return True\n\n\ndef isherm(Q):\n \"\"\"Determines if given operator is Hermitian.\n\n Parameters\n ----------\n Q : :class:`qutip.Qobj`\n Quantum object\n\n Returns\n -------\n isherm : bool\n True if operator is Hermitian, False otherwise.\n\n Examples\n --------\n >>> a = destroy(4)\n >>> isherm(a)\n False\n\n Notes\n -----\n This function is for legacy compatibility only. Using the `Qobj.isherm`\n attribute is recommended.\n\n \"\"\"\n return True if isinstance(Q, Qobj) and Q.isherm else False\n\n\n# TRAILING IMPORTS\n# We do a few imports here to avoid circular dependencies.\nfrom qutip.eseries import eseries\nimport qutip.superop_reps as sr\nimport qutip.tensor as tensor\nimport qutip.operators as ops\nimport qutip.metrics as mts\nimport qutip.states\nimport qutip.superoperator\n",
"# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\"\"\"\nThis module provides solvers for the unitary Schrodinger equation.\n\"\"\"\n\n__all__ = ['sesolve']\n\nimport os\nimport types\nimport numpy as np\nimport scipy.integrate\nfrom scipy.linalg import norm as la_norm\nfrom qutip.cy.stochastic import normalize_inplace\nimport qutip.settings as qset\nfrom qutip.qobj import Qobj\nfrom qutip.qobjevo import QobjEvo\nfrom qutip.cy.spconvert import dense1D_to_fastcsr_ket, dense2D_to_fastcsr_fmode\nfrom qutip.cy.spmatfuncs import (cy_expect_psi, cy_ode_psi_func_td,\n cy_ode_psi_func_td_with_state)\nfrom qutip.solver import Result, Options, config, solver_safe, SolverSystem\nfrom qutip.superoperator import vec2mat\nfrom qutip.ui.progressbar import (BaseProgressBar, TextProgressBar)\nfrom qutip.cy.openmp.utilities import check_use_openmp, openmp_components\n\ndef sesolve(H, psi0, tlist, e_ops=[], args={}, options=Options(),\n progress_bar=BaseProgressBar(), _safe_mode=True):\n \"\"\"\n Schrodinger equation evolution of a state vector or unitary matrix\n for a given Hamiltonian.\n\n Evolve the state vector (`psi0`) using a given\n Hamiltonian (`H`), by integrating the set of ordinary differential\n equations that define the system. Alternatively evolve a unitary matrix in\n solving the Schrodinger operator equation.\n\n The output is either the state vector or unitary matrix at arbitrary points\n in time (`tlist`), or the expectation values of the supplied operators\n (`e_ops`). If e_ops is a callback function, it is invoked for each\n time in `tlist` with time and the state as arguments, and the function\n does not use any return values. e_ops cannot be used in conjunction\n with solving the Schrodinger operator equation\n\n Parameters\n ----------\n\n H : :class:`qutip.qobj`, :class:`qutip.qobjevo`, *list*, *callable*\n system Hamiltonian as a Qobj, list of Qobj and coefficient, QobjEvo,\n or a callback function for time-dependent Hamiltonians.\n list format and options can be found in QobjEvo's description.\n\n psi0 : :class:`qutip.qobj`\n initial state vector (ket)\n or initial unitary operator `psi0 = U`\n\n tlist : *list* / *array*\n list of times for :math:`t`.\n\n e_ops : list of :class:`qutip.qobj` / callback function\n single operator or list of operators for which to evaluate\n expectation values.\n For list operator evolution, the overlapse is computed:\n tr(e_ops[i].dag()*op(t))\n\n args : *dictionary*\n dictionary of parameters for time-dependent Hamiltonians\n\n options : :class:`qutip.Qdeoptions`\n with options for the ODE solver.\n\n progress_bar : BaseProgressBar\n Optional instance of BaseProgressBar, or a subclass thereof, for\n showing the progress of the simulation.\n\n Returns\n -------\n\n output: :class:`qutip.solver`\n\n An instance of the class :class:`qutip.solver`, which contains either\n an *array* of expectation values for the times specified by `tlist`, or\n an *array* or state vectors corresponding to the\n times in `tlist` [if `e_ops` is an empty list], or\n nothing if a callback function was given inplace of operators for\n which to calculate the expectation values.\n\n \"\"\"\n if isinstance(e_ops, Qobj):\n e_ops = [e_ops]\n elif isinstance(e_ops, dict):\n e_ops_dict = e_ops\n e_ops = [e for e in e_ops.values()]\n else:\n e_ops_dict = None\n\n if progress_bar is True:\n progress_bar = TextProgressBar()\n\n if not (psi0.isket or psi0.isunitary):\n raise TypeError(\"The unitary solver requires psi0 to be\"\n \" a ket as initial state\"\n \" or a unitary as initial operator.\")\n\n if options.rhs_reuse and not isinstance(H, SolverSystem):\n # TODO: deprecate when going to class based solver.\n if \"sesolve\" in solver_safe:\n # print(\" \")\n H = solver_safe[\"sesolve\"]\n else:\n pass\n # raise Exception(\"Could not find the Hamiltonian to reuse.\")\n\n #check if should use OPENMP\n check_use_openmp(options)\n\n if isinstance(H, SolverSystem):\n ss = H\n elif isinstance(H, (list, Qobj, QobjEvo)):\n ss = _sesolve_QobjEvo(H, tlist, args, options)\n elif callable(H):\n ss = _sesolve_func_td(H, args, options)\n else:\n raise Exception(\"Invalid H type\")\n\n func, ode_args = ss.makefunc(ss, psi0, args, options)\n\n if _safe_mode:\n v = psi0.full().ravel('F')\n func(0., v, *ode_args) + v\n\n res = _generic_ode_solve(func, ode_args, psi0, tlist, e_ops, options,\n progress_bar, dims=psi0.dims)\n if e_ops_dict:\n res.expect = {e: res.expect[n]\n for n, e in enumerate(e_ops_dict.keys())}\n res.SolverSystem = ss\n return res\n\n\n# -----------------------------------------------------------------------------\n# A time-dependent unitary wavefunction equation on the list-function format\n#\ndef _sesolve_QobjEvo(H, tlist, args, opt):\n \"\"\"\n Prepare the system for the solver, H can be an QobjEvo.\n \"\"\"\n H_td = -1.0j * QobjEvo(H, args, tlist)\n if opt.rhs_with_state:\n H_td._check_old_with_state()\n nthread = opt.openmp_threads if opt.use_openmp else 0\n H_td.compile(omp=nthread)\n\n ss = SolverSystem()\n ss.H = H_td\n ss.makefunc = _qobjevo_set\n solver_safe[\"sesolve\"] = ss\n return ss\n\ndef _qobjevo_set(HS, psi, args, opt):\n \"\"\"\n From the system, get the ode function and args\n \"\"\"\n H_td = HS.H\n H_td.arguments(args)\n if psi.isunitary:\n func = H_td.compiled_qobjevo.ode_mul_mat_f_vec\n elif psi.isket:\n func = H_td.compiled_qobjevo.mul_vec\n else:\n raise TypeError(\"The unitary solver requires psi0 to be\"\n \" a ket as initial state\"\n \" or a unitary as initial operator.\")\n return func, ()\n\n\n# -----------------------------------------------------------------------------\n# Wave function evolution using a ODE solver (unitary quantum evolution), for\n# time dependent hamiltonians.\n#\ndef _sesolve_func_td(H_func, args, opt):\n \"\"\"\n Prepare the system for the solver, H is a function.\n \"\"\"\n ss = SolverSystem()\n ss.H = H_func\n ss.makefunc = _Hfunc_set\n solver_safe[\"sesolve\"] = ss\n return ss\n\ndef _Hfunc_set(HS, psi, args, opt):\n \"\"\"\n From the system, get the ode function and args\n \"\"\"\n H_func = HS.H\n if psi.isunitary:\n if not opt.rhs_with_state:\n print(\"_ode_oper_func_td\")\n func = _ode_oper_func_td\n else:\n print(\"_ode_oper_func_td_with_state\")\n func = _ode_oper_func_td_with_state\n else:\n if not opt.rhs_with_state:\n print(\"cy_ode_psi_func_td\")\n func = cy_ode_psi_func_td\n else:\n print(\"cy_ode_psi_func_td_with_state\")\n func = cy_ode_psi_func_td_with_state\n\n return func, (H_func, args)\n\n\n# -----------------------------------------------------------------------------\n# evaluate dU(t)/dt according to the schrodinger equation\n#\ndef _ode_oper_func_td(t, y, H_func, args):\n H = H_func(t, args).data * -1j\n ym = vec2mat(y)\n return (H * ym).ravel(\"F\")\n\ndef _ode_oper_func_td_with_state(t, y, H_func, args):\n H = H_func(t, y, args).data * -1j\n ym = vec2mat(y)\n return (H * ym).ravel(\"F\")\n\n\n# -----------------------------------------------------------------------------\n# Solve an ODE for func.\n# Calculate the required expectation values or invoke callback\n# function at each time step.\ndef _generic_ode_solve(func, ode_args, psi0, tlist, e_ops, opt,\n progress_bar, dims=None):\n \"\"\"\n Internal function for solving ODEs.\n \"\"\"\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # This function is made similar to mesolve's one for futur merging in a\n # solver class\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n # prepare output array\n n_tsteps = len(tlist)\n output = Result()\n output.solver = \"sesolve\"\n output.times = tlist\n\n if psi0.isunitary:\n initial_vector = psi0.full().ravel('F')\n oper_evo = True\n size = psi0.shape[0]\n # oper_n = dims[0][0]\n # norm_dim_factor = np.sqrt(oper_n)\n elif psi0.isket:\n initial_vector = psi0.full().ravel()\n oper_evo = False\n # norm_dim_factor = 1.0\n\n r = scipy.integrate.ode(func)\n r.set_integrator('zvode', method=opt.method, order=opt.order,\n atol=opt.atol, rtol=opt.rtol, nsteps=opt.nsteps,\n first_step=opt.first_step, min_step=opt.min_step,\n max_step=opt.max_step)\n if ode_args:\n r.set_f_params(*ode_args)\n r.set_initial_value(initial_vector, tlist[0])\n\n e_ops_data = []\n output.expect = []\n if callable(e_ops):\n n_expt_op = 0\n expt_callback = True\n output.num_expect = 1\n elif isinstance(e_ops, list):\n n_expt_op = len(e_ops)\n expt_callback = False\n output.num_expect = n_expt_op\n if n_expt_op == 0:\n # fallback on storing states\n opt.store_states = True\n else:\n for op in e_ops:\n if op.isherm:\n output.expect.append(np.zeros(n_tsteps))\n else:\n output.expect.append(np.zeros(n_tsteps, dtype=complex))\n if oper_evo:\n for e in e_ops:\n e_ops_data.append(e.dag().data)\n else:\n for e in e_ops:\n e_ops_data.append(e.data)\n else:\n raise TypeError(\"Expectation parameter must be a list or a function\")\n\n if opt.store_states:\n output.states = []\n\n if oper_evo:\n def get_curr_state_data(r):\n return vec2mat(r.y)\n else:\n def get_curr_state_data(r):\n return r.y\n\n #\n # start evolution\n #\n dt = np.diff(tlist)\n cdata = None\n progress_bar.start(n_tsteps)\n for t_idx, t in enumerate(tlist):\n progress_bar.update(t_idx)\n if not r.successful():\n raise Exception(\"ODE integration error: Try to increase \"\n \"the allowed number of substeps by increasing \"\n \"the nsteps parameter in the Options class.\")\n # get the current state / oper data if needed\n if opt.store_states or opt.normalize_output or n_expt_op > 0 or expt_callback:\n cdata = get_curr_state_data(r)\n\n if opt.normalize_output:\n # normalize per column\n if oper_evo:\n cdata /= la_norm(cdata, axis=0)\n #cdata *= norm_dim_factor / la_norm(cdata)\n r.set_initial_value(cdata.ravel('F'), r.t)\n else:\n #cdata /= la_norm(cdata)\n norm = normalize_inplace(cdata)\n if norm > 1e-12:\n # only reset the solver if state changed\n r.set_initial_value(cdata, r.t)\n else:\n r._y = cdata\n\n if opt.store_states:\n if oper_evo:\n fdata = dense2D_to_fastcsr_fmode(cdata, size, size)\n output.states.append(Qobj(fdata, dims=dims))\n else:\n fdata = dense1D_to_fastcsr_ket(cdata)\n output.states.append(Qobj(fdata, dims=dims, fast='mc'))\n\n if expt_callback:\n # use callback method\n output.expect.append(e_ops(t, Qobj(cdata, dims=dims)))\n\n if oper_evo:\n for m in range(n_expt_op):\n output.expect[m][t_idx] = (e_ops_data[m] * cdata).trace()\n else:\n for m in range(n_expt_op):\n output.expect[m][t_idx] = cy_expect_psi(e_ops_data[m], cdata,\n e_ops[m].isherm)\n\n if t_idx < n_tsteps - 1:\n r.integrate(r.t + dt[t_idx])\n\n progress_bar.finished()\n\n if opt.store_final_state:\n cdata = get_curr_state_data(r)\n if opt.normalize_output:\n cdata /= la_norm(cdata, axis=0)\n # cdata *= norm_dim_factor / la_norm(cdata)\n output.final_state = Qobj(cdata, dims=dims)\n\n return output\n"
] | [
[
"numpy.imag",
"numpy.abs",
"scipy.sparse.issparse",
"numpy.sqrt",
"numpy.linalg.inv",
"scipy.sparse.csr_matrix",
"numpy.real",
"numpy.shape",
"numpy.any",
"numpy.prod",
"scipy.sparse.hstack",
"numpy.array",
"numpy.zeros"
],
[
"numpy.diff",
"numpy.zeros",
"scipy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
}
] |
biologioholic/sktime | [
"9d0391a04b11d22bd783b452f01aa5b4529b41a2",
"9d0391a04b11d22bd783b452f01aa5b4529b41a2",
"9d0391a04b11d22bd783b452f01aa5b4529b41a2",
"9d0391a04b11d22bd783b452f01aa5b4529b41a2",
"9d0391a04b11d22bd783b452f01aa5b4529b41a2",
"9d0391a04b11d22bd783b452f01aa5b4529b41a2",
"9d0391a04b11d22bd783b452f01aa5b4529b41a2",
"9d0391a04b11d22bd783b452f01aa5b4529b41a2"
] | [
"sktime/classification/interval_based/tests/test_drcif.py",
"sktime/annotation/adapters/_pyod.py",
"sktime/clustering/utils/plotting/_plot_partitions.py",
"sktime/registry/_lookup.py",
"sktime/datatypes/_panel/_registry.py",
"sktime/classification/early_classification/_probability_threshold.py",
"sktime/datatypes/_proba/_registry.py",
"sktime/forecasting/compose/_stack.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"DrCIF test code.\"\"\"\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\n\nfrom sktime.classification.interval_based import DrCIF\nfrom sktime.datasets import load_unit_test\n\n\ndef test_drcif_train_estimate():\n \"\"\"Test of DrCIF on unit test data.\"\"\"\n # load unit test data\n X_train, y_train = load_unit_test(split=\"train\")\n\n # train DrCIF\n drcif = DrCIF(\n n_estimators=2,\n n_intervals=2,\n att_subsample_size=2,\n random_state=0,\n save_transformed_data=True,\n )\n drcif.fit(X_train, y_train)\n\n # test train estimate\n train_probas = drcif._get_train_probs(X_train, y_train)\n assert train_probas.shape == (20, 2)\n train_preds = drcif.classes_[np.argmax(train_probas, axis=1)]\n assert accuracy_score(y_train, train_preds) >= 0.6\n\n\ndef test_contracted_drcif():\n \"\"\"Test of contracted DrCIF on unit test data.\"\"\"\n # load unit test data\n X_train, y_train = load_unit_test(split=\"train\")\n\n # train contracted DrCIF\n drcif = DrCIF(\n time_limit_in_minutes=0.25,\n contract_max_n_estimators=2,\n n_intervals=2,\n att_subsample_size=2,\n random_state=0,\n )\n drcif.fit(X_train, y_train)\n\n assert len(drcif.estimators_) > 1\n",
"#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements outlier detection from pyOD.\"\"\"\n\nimport numpy as np\nfrom sklearn.base import clone\n\nfrom sktime.annotation.base._base import BaseSeriesAnnotator\n\n__author__ = [\"mloning\", \"satya-pattnaik\", \"fkiraly\"]\n\nimport pandas as pd\n\n\nclass PyODAnnotator(BaseSeriesAnnotator):\n \"\"\"Transformer that applies outlier detector from pyOD.\n\n Parameters\n ----------\n estimator : PyOD estimator\n See ``https://pyod.readthedocs.io/en/latest/`` documentation for a detailed\n description of all options.\n fmt : str {\"dense\", \"sparse\"}, optional (default=\"dense\")\n Annotation output format:\n * If \"sparse\", a sub-series of labels for only the outliers in X is returned,\n * If \"dense\", a series of labels for all values in X is returned.\n labels : str {\"indicator\", \"score\"}, optional (default=\"indicator\")\n Annotation output labels:\n * If \"indicator\", returned values are boolean, indicating whether a value is an\n outlier,\n * If \"score\", returned values are floats, giving the outlier score.\n \"\"\"\n\n def __init__(self, estimator, fmt=\"dense\", labels=\"indicator\"):\n self.estimator = estimator # pyod estimator\n super(PyODAnnotator, self).__init__(fmt=fmt, labels=labels)\n\n def _fit(self, X, Y=None):\n \"\"\"Fit to training data.\n\n core logic\n\n Parameters\n ----------\n X : pd.DataFrame\n training data to fit model to, time series\n Y : pd.Series, optional\n ground truth annotations for training if annotator is supervised\n Returns\n -------\n self : returns a reference to self\n\n Notes\n -----\n Create fitted model that sets attributes ending in \"_\".\n \"\"\"\n X_np = X.to_numpy()\n\n if len(X_np.shape) == 1:\n X_np = X_np.reshape(-1, 1)\n\n self.estimator_ = clone(self.estimator)\n self.estimator_.fit(X_np)\n\n return self\n\n def _predict(self, X):\n \"\"\"Create annotations on test/deployment data.\n\n Parameters\n ----------\n X : pd.DataFrame - data to annotate, time series\n\n Returns\n -------\n Y : pd.Series - annotations for sequence X\n exact format depends on annotation type\n \"\"\"\n fmt = self.fmt\n labels = self.labels\n\n X_np = X.to_numpy()\n\n if len(X_np.shape) == 1:\n X_np = X_np.reshape(-1, 1)\n\n Y_np = self.estimator_.predict(X_np)\n\n if labels == \"score\":\n Y_val_np = self.estimator_.decision_function(X_np)\n elif labels == \"indicator\":\n Y_val_np = Y_np\n\n if fmt == \"dense\":\n Y = pd.Series(Y_val_np, index=X.index)\n elif fmt == \"sparse\":\n Y_loc = np.where(Y_np)\n Y = pd.Series(Y_val_np[Y_loc], index=X.index[Y_loc])\n\n return Y\n",
"# -*- coding: utf-8 -*-\n\"\"\"Cluster plotting tools.\"\"\"\n\n__author__ = [\"Christopher Holder\", \"Tony Bagnall\"]\n__all__ = [\"plot_cluster_algorithm\"]\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.clustering.base import TimeSeriesInstances\nfrom sktime.clustering.partitioning import TimeSeriesLloyds\nfrom sktime.datatypes import convert_to\nfrom sktime.utils.validation._dependencies import _check_soft_dependencies\n\n\ndef _plot(cluster_values, center, axes):\n for cluster_series in cluster_values:\n for cluster in cluster_series:\n axes.plot(cluster, color=\"b\")\n axes.plot(center[0], color=\"r\")\n\n\ndef _get_cluster_values(cluster_indexes: np.ndarray, X: np.ndarray, k: int):\n ts_in_center = []\n for i in range(k):\n curr_indexes = np.where(cluster_indexes == i)[0]\n ts_in_center.append(X[curr_indexes])\n\n return ts_in_center\n\n\ndef plot_series(X: TimeSeriesInstances):\n _check_soft_dependencies(\"matplotlib\")\n import matplotlib.patches as mpatches\n import matplotlib.pyplot as plt\n\n if isinstance(X, pd.DataFrame):\n X = convert_to(X, \"numpy3D\")\n plt.figure(figsize=(5, 10))\n plt.rcParams[\"figure.dpi\"] = 100\n\n fig, axes = plt.subplots(nrows=len(X), ncols=1)\n for i in range(len(X)):\n curr = X[i][0]\n curr_axes = axes[i]\n curr_axes.plot(curr, color=\"b\")\n\n blue_patch = mpatches.Patch(color=\"blue\", label=\"Series that belong to the cluster\")\n plt.legend(\n handles=[blue_patch],\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.40),\n fancybox=True,\n shadow=True,\n ncol=5,\n )\n plt.tight_layout()\n plt.show()\n\n\ndef plot_cluster_algorithm(model: TimeSeriesLloyds, X: TimeSeriesInstances, k: int):\n \"\"\"Plot the results from a univariate partitioning algorithm.\n\n Parameters\n ----------\n model: BaseClusterer\n Clustering model to plot\n predict_series: np.ndarray or pd.Dataframe or List[pd.Dataframe]\n The series to predict the values for\n k: int\n Number of centers\n \"\"\"\n _check_soft_dependencies(\"matplotlib\")\n import matplotlib.patches as mpatches\n import matplotlib.pyplot as plt\n\n if isinstance(X, pd.DataFrame):\n predict_series = convert_to(X, \"numpy3D\")\n plt.figure(figsize=(5, 10))\n plt.rcParams[\"figure.dpi\"] = 100\n indexes = model.predict(predict_series)\n\n centers = model.cluster_centers_\n series_values = _get_cluster_values(indexes, predict_series, k)\n\n fig, axes = plt.subplots(nrows=k, ncols=1)\n for i in range(k):\n _plot(series_values[i], centers[i], axes[i])\n\n blue_patch = mpatches.Patch(color=\"blue\", label=\"Series that belong to the cluster\")\n red_patch = mpatches.Patch(color=\"red\", label=\"Cluster centers\")\n plt.legend(\n handles=[red_patch, blue_patch],\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.40),\n fancybox=True,\n shadow=True,\n ncol=5,\n )\n plt.tight_layout()\n plt.show()\n",
"# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"\nRegistry lookup methods.\n\nThis module exports the following methods for registry lookup:\n\nall_estimators(estimator_types, filter_tags)\n lookup and filtering of estimators\n\nall_tags(estimator_types)\n lookup and filtering of estimator tags\n\"\"\"\n\nimport inspect\nimport pkgutil\nfrom copy import deepcopy\nfrom importlib import import_module\nfrom operator import itemgetter\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom sktime.base import BaseEstimator\nfrom sktime.registry._base_classes import (\n BASE_CLASS_LIST,\n BASE_CLASS_LOOKUP,\n TRANSFORMER_MIXIN_LIST,\n)\nfrom sktime.registry._tags import ESTIMATOR_TAG_REGISTER\n\nVALID_TRANSFORMER_TYPES = tuple(TRANSFORMER_MIXIN_LIST)\nVALID_ESTIMATOR_BASE_TYPES = tuple(BASE_CLASS_LIST)\n\nVALID_ESTIMATOR_TYPES = (\n BaseEstimator,\n *VALID_ESTIMATOR_BASE_TYPES,\n *VALID_TRANSFORMER_TYPES,\n)\n\n\ndef all_estimators(\n estimator_types=None,\n filter_tags=None,\n exclude_estimators=None,\n return_names=True,\n as_dataframe=False,\n return_tags=None,\n suppress_import_stdout=True,\n):\n \"\"\"Get a list of all estimators from sktime.\n\n This function crawls the module and gets all classes that inherit\n from sktime's and sklearn's base classes.\n\n Not included are: the base classes themselves, classes defined in test\n modules.\n\n Parameters\n ----------\n estimator_types: str, list of str, optional (default=None)\n Which kind of estimators should be returned.\n if None, no filter is applied and all estimators are returned.\n if str or list of str, strings define scitypes specified in search\n only estimators that are of (at least) one of the scitypes are returned\n possible str values are entries of registry.BASE_CLASS_REGISTER (first col)\n for instance 'classifier', 'regressor', 'transformer', 'forecaster'\n return_names: bool, optional (default=True)\n if True, estimator class name is included in the all_estimators()\n return in the order: name, estimator class, optional tags, either as\n a tuple or as pandas.DataFrame columns\n if False, estimator class name is removed from the all_estimators()\n return.\n filter_tags: dict of (str or list of str), optional (default=None)\n For a list of valid tag strings, use the registry.all_tags utility.\n subsets the returned estimators as follows:\n each key/value pair is statement in \"and\"/conjunction\n key is tag name to sub-set on\n value str or list of string are tag values\n condition is \"key must be equal to value, or in set(value)\"\n exclude_estimators: str, list of str, optional (default=None)\n Names of estimators to exclude.\n as_dataframe: bool, optional (default=False)\n if True, all_estimators will return a pandas.DataFrame with named\n columns for all of the attributes being returned.\n if False, all_estimators will return a list (either a list of\n estimators or a list of tuples, see Returns)\n return_tags: str or list of str, optional (default=None)\n Names of tags to fetch and return each estimator's value of.\n For a list of valid tag strings, use the registry.all_tags utility.\n if str or list of str,\n the tag values named in return_tags will be fetched for each\n estimator and will be appended as either columns or tuple entries.\n suppress_import_stdout : bool, optional. Default=True\n whether to suppress stdout printout upon import.\n\n Returns\n -------\n all_estimators will return one of the following:\n 1. list of estimators, if return_names=False, and return_tags is None\n 2. list of tuples (optional estimator name, class, ~optional estimator\n tags), if return_names=True or return_tags is not None.\n 3. pandas.DataFrame if as_dataframe = True\n if list of estimators:\n entries are estimators matching the query,\n in alphabetical order of estimator name\n if list of tuples:\n list of (optional estimator name, estimator, optional estimator\n tags) matching the query, in alphabetical order of estimator name,\n where\n ``name`` is the estimator name as string, and is an\n optional return\n ``estimator`` is the actual estimator\n ``tags`` are the estimator's values for each tag in return_tags\n and is an optional return.\n if dataframe:\n all_estimators will return a pandas.DataFrame.\n column names represent the attributes contained in each column.\n \"estimators\" will be the name of the column of estimators, \"names\"\n will be the name of the column of estimator class names and the string(s)\n passed in return_tags will serve as column names for all columns of\n tags that were optionally requested.\n\n References\n ----------\n Modified version from scikit-learn's `all_estimators()`.\n \"\"\"\n import io\n import sys\n import warnings\n\n MODULES_TO_IGNORE = (\"tests\", \"setup\", \"contrib\", \"benchmarking\", \"utils\")\n\n all_estimators = []\n ROOT = str(Path(__file__).parent.parent) # sktime package root directory\n\n def _is_abstract(klass):\n if not (hasattr(klass, \"__abstractmethods__\")):\n return False\n if not len(klass.__abstractmethods__):\n return False\n return True\n\n def _is_private_module(module):\n return \"._\" in module\n\n def _is_ignored_module(module):\n module_parts = module.split(\".\")\n return any(part in MODULES_TO_IGNORE for part in module_parts)\n\n def _is_base_class(name):\n return name.startswith(\"_\") or name.startswith(\"Base\")\n\n def _is_estimator(name, klass):\n # Check if klass is subclass of base estimators, not an base class itself and\n # not an abstract class\n return (\n issubclass(klass, VALID_ESTIMATOR_TYPES)\n and klass not in VALID_ESTIMATOR_TYPES\n and not _is_abstract(klass)\n and not _is_base_class(name)\n )\n\n # Ignore deprecation warnings triggered at import time and from walking\n # packages\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=FutureWarning)\n warnings.simplefilter(\"module\", category=ImportWarning)\n warnings.filterwarnings(\n \"ignore\", category=UserWarning, message=\".*has been moved to.*\"\n )\n for _, module_name, _ in pkgutil.walk_packages(path=[ROOT], prefix=\"sktime.\"):\n\n # Filter modules\n if _is_ignored_module(module_name) or _is_private_module(module_name):\n continue\n\n try:\n if suppress_import_stdout:\n # setup text trap, import, then restore\n sys.stdout = io.StringIO()\n module = import_module(module_name)\n sys.stdout = sys.__stdout__\n else:\n module = import_module(module_name)\n classes = inspect.getmembers(module, inspect.isclass)\n\n # Filter classes\n estimators = [\n (name, klass)\n for name, klass in classes\n if _is_estimator(name, klass)\n ]\n all_estimators.extend(estimators)\n except ModuleNotFoundError as e:\n # Skip missing soft dependencies\n if \"soft dependency\" not in str(e):\n raise e\n warnings.warn(str(e), ImportWarning)\n\n # Drop duplicates\n all_estimators = set(all_estimators)\n\n # Filter based on given estimator types\n def _is_in_estimator_types(estimator, estimator_types):\n return any(\n [\n issubclass(estimator, estimator_type)\n for estimator_type in estimator_types\n ]\n )\n\n if estimator_types:\n estimator_types = _check_estimator_types(estimator_types)\n all_estimators = [\n (name, estimator)\n for name, estimator in all_estimators\n if _is_in_estimator_types(estimator, estimator_types)\n ]\n\n # Filter based on given exclude list\n if exclude_estimators:\n exclude_estimators = _check_list_of_str_or_error(\n exclude_estimators, \"exclude_estimators\"\n )\n all_estimators = [\n (name, estimator)\n for name, estimator in all_estimators\n if name not in exclude_estimators\n ]\n\n # Drop duplicates, sort for reproducibility\n # itemgetter is used to ensure the sort does not extend to the 2nd item of\n # the tuple\n all_estimators = sorted(all_estimators, key=itemgetter(0))\n\n if filter_tags:\n all_estimators = [\n (n, est) for (n, est) in all_estimators if _check_tag_cond(est, filter_tags)\n ]\n\n # remove names if return_names=False\n if not return_names:\n all_estimators = [estimator for (name, estimator) in all_estimators]\n columns = [\"estimator\"]\n else:\n columns = [\"name\", \"estimator\"]\n\n # add new tuple entries to all_estimators for each tag in return_tags:\n if return_tags:\n return_tags = _check_list_of_str_or_error(return_tags, \"return_tags\")\n # enrich all_estimators by adding the values for all return_tags tags:\n if all_estimators:\n if isinstance(all_estimators[0], tuple):\n all_estimators = [\n (name, est) + _get_return_tags(est, return_tags)\n for (name, est) in all_estimators\n ]\n else:\n all_estimators = [\n tuple([est]) + _get_return_tags(est, return_tags)\n for est in all_estimators\n ]\n columns = columns + return_tags\n\n # convert to pandas.DataFrame if as_dataframe=True\n if as_dataframe:\n all_estimators = pd.DataFrame(all_estimators, columns=columns)\n\n return all_estimators\n\n\ndef _check_list_of_str_or_error(arg_to_check, arg_name):\n \"\"\"Check that certain arguments are str or list of str.\n\n Parameters\n ----------\n arg_to_check: argument we are testing the type of\n arg_name: str,\n name of the argument we are testing, will be added to the error if\n ``arg_to_check`` is not a str or a list of str\n\n Returns\n -------\n arg_to_check: list of str,\n if arg_to_check was originally a str it converts it into a list of str\n so that it can be iterated over.\n\n Raises\n ------\n TypeError if arg_to_check is not a str or list of str\n \"\"\"\n # check that return_tags has the right type:\n if isinstance(arg_to_check, str):\n arg_to_check = [arg_to_check]\n if not isinstance(arg_to_check, list) or not all(\n isinstance(value, str) for value in arg_to_check\n ):\n raise TypeError(\n f\"Error in all_estimators! Argument {arg_name} must be either\\\n a str or list of str\"\n )\n return arg_to_check\n\n\ndef _get_return_tags(estimator, return_tags):\n \"\"\"Fetch a list of all tags for every_entry of all_estimators.\n\n Parameters\n ----------\n estimator: BaseEstimator, an sktime estimator\n return_tags: list of str,\n names of tags to get values for the estimator\n\n Returns\n -------\n tags: a tuple with all the estimators values for all tags in return tags.\n a value is None if it is not a valid tag for the estimator provided.\n \"\"\"\n tags = tuple(estimator.get_class_tag(tag) for tag in return_tags)\n return tags\n\n\ndef _check_tag_cond(estimator, filter_tags=None, as_dataframe=True):\n \"\"\"Check whether estimator satisfies filter_tags condition.\n\n Parameters\n ----------\n estimator: BaseEstimator, an sktime estimator\n filter_tags: dict of (str or list of str), default=None\n subsets the returned estimators as follows:\n each key/value pair is statement in \"and\"/conjunction\n key is tag name to sub-set on\n value str or list of string are tag values\n condition is \"key must be equal to value, or in set(value)\"\n as_dataframe: bool, default=False\n if False, return is as described below;\n if True, return is converted into a pandas.DataFrame for pretty\n display\n\n Returns\n -------\n cond_sat: bool, whether estimator satisfies condition in filter_tags\n \"\"\"\n if not isinstance(filter_tags, dict):\n raise TypeError(\"filter_tags must be a dict\")\n\n cond_sat = True\n\n for (key, value) in filter_tags.items():\n if not isinstance(value, list):\n value = [value]\n cond_sat = cond_sat and estimator.get_class_tag(key) in set(value)\n\n return cond_sat\n\n\ndef all_tags(\n estimator_types=None,\n as_dataframe=False,\n):\n \"\"\"Get a list of all tags from sktime.\n\n Retrieves tags directly from `_tags`, offers filtering functionality.\n\n Parameters\n ----------\n estimator_types: string, list of string, optional (default=None)\n Which kind of estimators should be returned.\n - If None, no filter is applied and all estimators are returned.\n - Possible values are 'classifier', 'regressor', 'transformer' and\n 'forecaster' to get estimators only of these specific types, or a list of\n these to get the estimators that fit at least one of the types.\n as_dataframe: bool, optional (default=False)\n if False, return is as described below;\n if True, return is converted into a pandas.DataFrame for pretty\n display\n\n Returns\n -------\n tags: list of tuples (a, b, c, d),\n in alphabetical order by a\n a : string - name of the tag as used in the _tags dictionary\n b : string - name of the scitype this tag applies to\n must be in _base_classes.BASE_CLASS_SCITYPE_LIST\n c : string - expected type of the tag value\n should be one of:\n \"bool\" - valid values are True/False\n \"int\" - valid values are all integers\n \"str\" - valid values are all strings\n (\"str\", list_of_string) - any string in list_of_string is valid\n (\"list\", list_of_string) - any individual string and sub-list is valid\n d : string - plain English description of the tag\n \"\"\"\n\n def is_tag_for_type(tag, estimator_types):\n tag_types = tag[1]\n tag_types = _check_list_of_str_or_error(tag_types, \"tag_types\")\n\n if isinstance(estimator_types, str):\n estimator_types = [estimator_types]\n\n tag_types = set(tag_types)\n estimator_types = set(estimator_types)\n is_valid_tag_for_type = len(tag_types.intersection(estimator_types)) > 0\n\n return is_valid_tag_for_type\n\n all_tags = ESTIMATOR_TAG_REGISTER\n\n if estimator_types:\n # checking, but not using the return since that is classes, not strings\n _check_estimator_types(estimator_types)\n all_tags = [tag for tag in all_tags if is_tag_for_type(tag, estimator_types)]\n\n all_tags = sorted(all_tags, key=itemgetter(0))\n\n # convert to pd.DataFrame if as_dataframe=True\n if as_dataframe:\n columns = [\"name\", \"scitype\", \"type\", \"description\"]\n all_tags = pd.DataFrame(all_tags, columns=columns)\n\n return all_tags\n\n\ndef _check_estimator_types(estimator_types):\n \"\"\"Return list of classes corresponding to type strings.\"\"\"\n estimator_types = deepcopy(estimator_types)\n\n if not isinstance(estimator_types, list):\n estimator_types = [estimator_types] # make iterable\n\n def _get_err_msg(estimator_type):\n return (\n f\"Parameter `estimator_type` must be None, a string or a list of \"\n f\"strings. Valid string values are: \"\n f\"{tuple(BASE_CLASS_LOOKUP.keys())}, but found: \"\n f\"{repr(estimator_type)}\"\n )\n\n for i, estimator_type in enumerate(estimator_types):\n if not isinstance(estimator_type, (type, str)):\n raise ValueError(\n \"Please specify `estimator_types` as a list of str or \" \"types.\"\n )\n if isinstance(estimator_type, str):\n if estimator_type not in BASE_CLASS_LOOKUP.keys():\n raise ValueError(_get_err_msg(estimator_type))\n estimator_type = BASE_CLASS_LOOKUP[estimator_type]\n estimator_types[i] = estimator_type\n elif isinstance(estimator_type, type):\n pass\n else:\n raise ValueError(_get_err_msg(estimator_type))\n return estimator_types\n",
"# -*- coding: utf-8 -*-\n\nimport pandas as pd\n\n__all__ = [\n \"MTYPE_REGISTER_PANEL\",\n \"MTYPE_LIST_PANEL\",\n]\n\n\nMTYPE_REGISTER_PANEL = [\n (\n \"nested_univ\",\n \"Panel\",\n \"pd.DataFrame with one column per variable, pd.Series in cells\",\n ),\n (\n \"numpy3D\",\n \"Panel\",\n \"3D np.array of format (n_instances, n_columns, n_timepoints)\",\n ),\n (\n \"numpyflat\",\n \"Panel\",\n \"WARNING: only for internal use, not a fully supported Panel mtype. \"\n \"2D np.array of format (n_instances, n_columns*n_timepoints)\",\n ),\n (\"pd-multiindex\", \"Panel\", \"pd.DataFrame with multi-index (instances, timepoints)\"),\n (\"pd-wide\", \"Panel\", \"pd.DataFrame in wide format, cols = (instance*timepoints)\"),\n (\n \"pd-long\",\n \"Panel\",\n \"pd.DataFrame in long format, cols = (index, time_index, column)\",\n ),\n (\"df-list\", \"Panel\", \"list of pd.DataFrame\"),\n]\n\nMTYPE_LIST_PANEL = pd.DataFrame(MTYPE_REGISTER_PANEL)[0].values\n",
"# -*- coding: utf-8 -*-\n\"\"\"Probability Threshold Early Classifier.\n\nAn early classifier using a prediction probability threshold with a time series\nclassifier.\n\"\"\"\n\n__author__ = [\"MatthewMiddlehurst\"]\n__all__ = [\"ProbabilityThresholdEarlyClassifier\"]\n\nimport copy\n\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.utils import check_random_state\n\nfrom sktime.base._base import _clone_estimator\nfrom sktime.classification.base import BaseClassifier\nfrom sktime.classification.interval_based import CanonicalIntervalForest\nfrom sktime.utils.validation.panel import check_X\n\n\nclass ProbabilityThresholdEarlyClassifier(BaseClassifier):\n \"\"\"Probability Threshold Early Classifier.\n\n An early classifier which uses a threshold of prediction probability to determine\n whether an early prediction is safe or not.\n\n Overview:\n Build n classifiers, where n is the number of classification_points.\n While a prediction is still deemed unsafe:\n Make a prediction using the series length at classification point i.\n Decide whether the predcition is safe or not using decide_prediction_safety.\n\n Parameters\n ----------\n probability_threshold : float, default=0.85\n The class prediction probability required to deem a prediction as safe.\n consecutive_predictions : int, default=1\n The number of consecutive predictions for a class above the threshold required\n to deem a prediction as safe.\n estimator: sktime classifier, default=None\n An sktime estimator to be built using the transformed data. Defaults to a\n CanonicalIntervalForest.\n classification_points : List or None, default=None\n List of integer time series time stamps to build classifiers and allow\n predictions at. Early predictions must have a series length that matches a value\n in the _classification_points List. Duplicate values will be removed, and the\n full series length will be appeneded if not present.\n If None, will use 20 thresholds linearly spaces from 0 to the series length.\n n_jobs : int, default=1\n The number of jobs to run in parallel for both `fit` and `predict`.\n ``-1`` means using all processors.\n random_state : int or None, default=None\n Seed for random number generation.\n\n Attributes\n ----------\n n_classes_ : int\n The number of classes.\n classes_ : list\n The unique class labels.\n\n Examples\n --------\n >>> from sktime.classification.early_classification import (\n ... ProbabilityThresholdEarlyClassifier\n ... )\n >>> from sktime.classification.interval_based import TimeSeriesForestClassifier\n >>> from sktime.datasets import load_unit_test\n >>> X_train, y_train = load_unit_test(split=\"train\", return_X_y=True)\n >>> X_test, y_test = load_unit_test(split=\"test\", return_X_y=True)\n >>> clf = ProbabilityThresholdEarlyClassifier(\n ... classification_points=[6, 16, 24],\n ... estimator=TimeSeriesForestClassifier(n_estimators=10)\n ... )\n >>> clf.fit(X_train, y_train)\n ProbabilityThresholdEarlyClassifier(...)\n >>> y_pred = clf.predict(X_test)\n \"\"\"\n\n _tags = {\n \"capability:multivariate\": True,\n \"capability:multithreading\": True,\n \"capability:early_prediction\": True,\n }\n\n def __init__(\n self,\n probability_threshold=0.85,\n consecutive_predictions=1,\n estimator=None,\n classification_points=None,\n n_jobs=1,\n random_state=None,\n ):\n self.probability_threshold = probability_threshold\n self.consecutive_predictions = consecutive_predictions\n self.estimator = estimator\n self.classification_points = classification_points\n\n self.n_jobs = n_jobs\n self.random_state = random_state\n\n self._estimators = []\n self._classification_points = []\n\n super(ProbabilityThresholdEarlyClassifier, self).__init__()\n\n def _fit(self, X, y):\n m = getattr(self.estimator, \"predict_proba\", None)\n if not callable(m):\n raise ValueError(\"Base estimator must have a predict_proba method.\")\n\n _, _, series_length = X.shape\n\n self._classification_points = (\n copy.deepcopy(self.classification_points)\n if self.classification_points is not None\n else [round(series_length / i) for i in range(1, 21)]\n )\n # remove duplicates\n self._classification_points = list(set(self._classification_points))\n self._classification_points.sort()\n # remove classification points that are less than 3 time stamps\n self._classification_points = [i for i in self._classification_points if i >= 3]\n # make sure the full series length is included\n if self._classification_points[-1] != series_length:\n self._classification_points.append(series_length)\n # create dictionary of classification point indices\n self._classification_point_dictionary = {}\n for index, classification_point in enumerate(self._classification_points):\n self._classification_point_dictionary[classification_point] = index\n\n m = getattr(self.estimator, \"n_jobs\", None)\n threads = self._threads_to_use if m is None else 1\n\n self._estimators = Parallel(n_jobs=threads)(\n delayed(self._fit_estimator)(\n X,\n y,\n i,\n )\n for i in range(len(self._classification_points))\n )\n\n return self\n\n def _predict(self, X) -> np.ndarray:\n rng = check_random_state(self.random_state)\n return np.array(\n [\n self.classes_[int(rng.choice(np.flatnonzero(prob == prob.max())))]\n for prob in self._predict_proba(X)\n ]\n )\n\n def _predict_proba(self, X) -> np.ndarray:\n _, _, series_length = X.shape\n idx = self._classification_point_dictionary.get(series_length, -1)\n if idx == -1:\n raise ValueError(\n f\"Input series length does not match the classification points produced\"\n f\" in fit. Current classification points: {self._classification_points}\"\n )\n\n return self._estimators[idx].predict_proba(X)\n\n def decide_prediction_safety(self, X, X_probabilities, state_info):\n \"\"\"Decide on the safety of an early classification.\n\n Parameters\n ----------\n X : 3D np.array (any number of dimensions, equal length series) of shape =\n [n_instances,n_dimensions,series_length] or pd.DataFrame with each column a\n dimension, each cell a pd.Series (any number of dimensions, equal or unequal\n length series).\n The prediction time series data.\n X_probabilities : 2D numpy array of shape = [n_instances,n_classes].\n The predicted probabilities for X.\n state_info : List or None\n A List containing the state info for each decision in X. contains\n information for future decisions on the data. Inputs should be None for the\n first decision made, the returned List new_state_info for subsequent\n decisions.\n\n Returns\n -------\n decisions : List\n A List of booleans, containing the decision of whether a prediction is safe\n to use or not.\n new_state_info : List\n A List containing the state info for each decision in X, contains\n information for future decisions on the data.\n \"\"\"\n X = check_X(X, coerce_to_numpy=True)\n\n n_instances, _, series_length = X.shape\n idx = self._classification_point_dictionary.get(series_length, -1)\n\n if idx == -1:\n raise ValueError(\n f\"Input series length does not match the classification points produced\"\n f\" in fit. Current classification points: {self._classification_points}\"\n )\n\n # If this is the smallest dataset, there should be no state_info, else we\n # should have state info for each, and they should all be the same length\n if state_info is None and (\n idx == 0 or idx == len(self._classification_points) - 1\n ):\n state_info = [(0, 0, 0) for _ in range(n_instances)]\n elif isinstance(state_info, list) and idx > 0:\n if not all(si[0] == idx for si in state_info):\n raise ValueError(\"All input instances must be of the same length.\")\n else:\n raise ValueError(\n \"state_info should be None for first time input, and a list of \"\n \"state_info outputs from the previous decision making for later inputs.\"\n )\n\n # if we have the full series, always return true\n if idx == len(self._classification_points) - 1:\n return [True for _ in range(n_instances)], None\n\n # find predicted class for each instance\n rng = check_random_state(self.random_state)\n preds = [\n int(rng.choice(np.flatnonzero(prob == prob.max())))\n for prob in X_probabilities\n ]\n\n # make a decision based on probability threshold, record consecutive class\n # decisions\n decisions = [\n X_probabilities[i][preds[i]] >= self.probability_threshold\n for i in range(n_instances)\n ]\n new_state_info = [\n (\n # next classification point index\n idx + 1,\n # consecutive predictions, add one if positive decision and same class\n state_info[i][1] + 1 if decisions[i] and preds[i] == state_info[i][2]\n # set to 0 if the decision is negative, 1 if its positive but different\n # class\n else 1 if decisions[i] else 0,\n # predicted class index\n preds[i],\n )\n for i in range(n_instances)\n ]\n\n # return the safety decisions and new state information for the instances\n if self.consecutive_predictions < 2:\n return decisions, new_state_info\n else:\n return [\n True if new_state_info[i][1] >= self.consecutive_predictions else False\n for i in range(n_instances)\n ], new_state_info\n\n def _fit_estimator(self, X, y, i):\n rs = 255 if self.random_state == 0 else self.random_state\n rs = None if self.random_state is None else rs * 37 * (i + 1)\n rng = check_random_state(rs)\n\n estimator = _clone_estimator(\n CanonicalIntervalForest() if self.estimator is None else self.estimator,\n rng,\n )\n\n m = getattr(estimator, \"n_jobs\", None)\n if m is not None:\n estimator.n_jobs = self._threads_to_use\n\n estimator.fit(X[:, :, : self._classification_points[i]], y)\n\n return estimator\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n\n\n Returns\n -------\n params : dict or list of dict, default = {}\n Parameters to create testing instances of the class.\n \"\"\"\n from sktime.classification.feature_based import Catch22Classifier\n\n params = {\n \"classification_points\": [3],\n \"estimator\": Catch22Classifier(\n estimator=RandomForestClassifier(n_estimators=2)\n ),\n }\n return params\n",
"# -*- coding: utf-8 -*-\n\nimport pandas as pd\n\n__all__ = [\n \"MTYPE_REGISTER_PROBA\",\n \"MTYPE_LIST_PROBA\",\n]\n\n\nMTYPE_REGISTER_PROBA = [\n (\"pred_interval\", \"Proba\", \"predictive intervals\"),\n (\"pred_quantiles\", \"Proba\", \"quantile predictions\"),\n (\"pred_var\", \"Proba\", \"variance predictions\"),\n # (\"pred_dost\", \"Proba\", \"full distribution predictions, tensorflow-probability\"),\n]\n\nMTYPE_LIST_PROBA = pd.DataFrame(MTYPE_REGISTER_PROBA)[0].values\n",
"#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements forecasters for combining forecasts via stacking.\"\"\"\n\n__author__ = [\"mloning\", \"fkiraly\"]\n__all__ = [\"StackingForecaster\"]\n\nfrom warnings import warn\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.forecasting.base._meta import _HeterogenousEnsembleForecaster\nfrom sktime.forecasting.model_selection import SingleWindowSplitter\nfrom sktime.utils.validation.forecasting import check_regressor\n\n\nclass StackingForecaster(_HeterogenousEnsembleForecaster):\n \"\"\"StackingForecaster.\n\n Stacks two or more Forecasters and uses a meta-model (regressor) to infer\n the final predictions from the predictions of the given forecasters.\n\n Parameters\n ----------\n forecasters : list of (str, estimator) tuples\n Estimators to apply to the input series.\n regressor: sklearn-like regressor, optional, default=None.\n The regressor is used as a meta-model and trained with the predictions\n of the ensemble forecasters as exog data and with y as endog data. The\n length of the data is dependent to the given fh. If None, then\n a GradientBoostingRegressor(max_depth=5) is used.\n The regressor can also be a sklearn.Pipeline().\n random_state : int, RandomState instance or None, default=None\n Used to set random_state of the default regressor.\n n_jobs : int or None, optional (default=None)\n The number of jobs to run in parallel for fit. None means 1 unless\n in a joblib.parallel_backend context.\n -1 means using all processors.\n\n Attributes\n ----------\n regressor_ : sklearn-like regressor\n Fitted meta-model (regressor)\n\n Examples\n --------\n >>> from sktime.forecasting.compose import StackingForecaster\n >>> from sktime.forecasting.naive import NaiveForecaster\n >>> from sktime.forecasting.trend import PolynomialTrendForecaster\n >>> from sktime.datasets import load_airline\n >>> y = load_airline()\n >>> forecasters = [\n ... (\"trend\", PolynomialTrendForecaster()),\n ... (\"naive\", NaiveForecaster()),\n ... ]\n >>> forecaster = StackingForecaster(forecasters=forecasters)\n >>> forecaster.fit(y=y, fh=[1,2,3])\n StackingForecaster(...)\n >>> y_pred = forecaster.predict()\n \"\"\"\n\n _required_parameters = [\"forecasters\"]\n _tags = {\n \"ignores-exogeneous-X\": True,\n \"requires-fh-in-fit\": True,\n \"handles-missing-data\": False,\n \"scitype:y\": \"univariate\",\n }\n\n def __init__(self, forecasters, regressor=None, random_state=None, n_jobs=None):\n super(StackingForecaster, self).__init__(forecasters=forecasters, n_jobs=n_jobs)\n self.regressor = regressor\n self.random_state = random_state\n\n def _fit(self, y, X=None, fh=None):\n \"\"\"Fit to training data.\n\n Parameters\n ----------\n y : pd.Series\n Target time series to which to fit the forecaster.\n fh : int, list or np.array, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n _, forecasters = self._check_forecasters()\n self.regressor_ = check_regressor(\n regressor=self.regressor, random_state=self.random_state\n )\n\n # split training series into training set to fit forecasters and\n # validation set to fit meta-learner\n cv = SingleWindowSplitter(fh=fh.to_relative(self.cutoff))\n train_window, test_window = next(cv.split(y))\n y_fcst = y.iloc[train_window]\n y_meta = y.iloc[test_window].values\n if X is not None:\n X_meta = X.iloc[test_window]\n else:\n X_meta = None\n\n # fit forecasters on training window\n self._fit_forecasters(forecasters, y_fcst, fh=fh, X=X)\n X_meta = np.column_stack(self._predict_forecasters(fh=fh, X=X_meta))\n\n # fit final regressor on on validation window\n self.regressor_.fit(X_meta, y_meta)\n\n # refit forecasters on entire training series\n self._fit_forecasters(forecasters, y, fh=self.fh, X=X)\n\n return self\n\n def _update(self, y, X=None, update_params=True):\n \"\"\"Update fitted parameters.\n\n Parameters\n ----------\n y : pd.Series\n X : pd.DataFrame\n update_params : bool, optional (default=True)\n\n Returns\n -------\n self : an instance of self\n \"\"\"\n if update_params:\n warn(\"Updating `final regressor is not implemented\")\n for forecaster in self.forecasters_:\n forecaster.update(y, X, update_params=update_params)\n return self\n\n def _predict(self, fh=None, X=None):\n \"\"\"Forecast time series at future horizon.\n\n Parameters\n ----------\n fh : int, list, np.array or ForecastingHorizon\n Forecasting horizon\n X : pd.DataFrame, optional (default=None)\n Exogenous time series\n\n Returns\n -------\n y_pred : pd.Series\n Point predictions\n \"\"\"\n y_preds = np.column_stack(self._predict_forecasters(fh=fh, X=X))\n y_pred = self.regressor_.predict(y_preds)\n # index = y_preds.index\n index = self.fh.to_absolute(self.cutoff)\n return pd.Series(y_pred, index=index)\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n\n\n Returns\n -------\n params : dict or list of dict\n \"\"\"\n from sktime.forecasting.naive import NaiveForecaster\n\n FORECASTER = NaiveForecaster()\n params = {\"forecasters\": [(\"f1\", FORECASTER), (\"f2\", FORECASTER)]}\n return params\n"
] | [
[
"numpy.argmax",
"sklearn.metrics.accuracy_score"
],
[
"sklearn.base.clone",
"numpy.where",
"pandas.Series"
],
[
"matplotlib.pyplot.legend",
"matplotlib.patches.Patch",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.where",
"matplotlib.pyplot.figure"
],
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"sklearn.utils.check_random_state",
"sklearn.ensemble.RandomForestClassifier"
],
[
"pandas.DataFrame"
],
[
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
sasmirnov/numba-dppy | [
"6ec41a5adab3034ddcfba2df312117afd6e2327b"
] | [
"numba_dppy/dpnp_glue/dpnp_array_creations_impl.py"
] | [
"# Copyright 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numba_dppy.dpnp_glue.dpnpimpl as dpnp_ext\nfrom numba import types\nfrom numba.core.typing import signature\nfrom . import stubs\nimport numba_dppy.dpnp_glue as dpnp_lowering\nfrom numba.core.extending import overload, register_jitable\nimport numpy as np\nfrom numba_dppy import dpctl_functions\nimport numba_dppy\n\n\n@register_jitable\ndef common_impl(a, b, out, dpnp_func, PRINT_DEBUG):\n if a.size == 0:\n raise ValueError(\"Passed Empty array\")\n\n sycl_queue = dpctl_functions.get_current_queue()\n\n b_usm = dpctl_functions.malloc_shared(b.size * b.itemsize, sycl_queue)\n dpctl_functions.queue_memcpy(sycl_queue, b_usm, b.ctypes, b.size * b.itemsize)\n\n out_usm = dpctl_functions.malloc_shared(out.size * out.itemsize, sycl_queue)\n\n dpnp_func(out_usm, b_usm, a.size)\n\n dpctl_functions.queue_memcpy(\n sycl_queue, out.ctypes, out_usm, out.size * out.itemsize\n )\n\n dpctl_functions.free_with_queue(b_usm, sycl_queue)\n dpctl_functions.free_with_queue(out_usm, sycl_queue)\n\n dpnp_ext._dummy_liveness_func([a.size, out.size])\n\n if PRINT_DEBUG:\n print(\"dpnp implementation\")\n\n\n@register_jitable\ndef common_shape_impl(a, out, dpnp_func, PRINT_DEBUG):\n if a.size == 0:\n raise ValueError(\"Passed Empty array\")\n\n sycl_queue = dpctl_functions.get_current_queue()\n\n a_usm = dpctl_functions.malloc_shared(a.size * a.itemsize, sycl_queue)\n dpctl_functions.queue_memcpy(sycl_queue, a_usm, a.ctypes, a.size * a.itemsize)\n\n out_usm = dpctl_functions.malloc_shared(out.size * out.itemsize, sycl_queue)\n\n dpnp_func(a_usm, out_usm, a.shapeptr, a.ndim)\n\n dpctl_functions.queue_memcpy(\n sycl_queue, out.ctypes, out_usm, out.size * out.itemsize\n )\n\n dpctl_functions.free_with_queue(a_usm, sycl_queue)\n dpctl_functions.free_with_queue(out_usm, sycl_queue)\n\n dpnp_ext._dummy_liveness_func([a.size, out.size])\n\n if PRINT_DEBUG:\n print(\"dpnp implementation\")\n\n\n@overload(stubs.dpnp.zeros_like)\ndef dpnp_zeros_like_impl(a, dtype=None):\n name = \"zeros_like\"\n dpnp_lowering.ensure_dpnp(name)\n\n ret_type = types.void\n \"\"\"\n dpnp source:\n https://github.com/IntelPython/dpnp/blob/0.5.1/dpnp/backend/kernels/dpnp_krnl_common.cpp#L224\n\n Function declaration:\n void dpnp_initval_c(void* result1, void* value, size_t size)\n\n \"\"\"\n res_dtype = dtype\n if dtype == types.none:\n res_dtype = a.dtype\n name_dtype = res_dtype.name\n else:\n name_dtype = res_dtype.dtype.name\n\n sig = signature(ret_type, types.voidptr, types.voidptr, types.intp)\n dpnp_func = dpnp_ext.dpnp_func(\"dpnp_\" + name, [name_dtype, \"NONE\"], sig)\n\n PRINT_DEBUG = dpnp_lowering.DEBUG\n\n def dpnp_impl(a, dtype=None):\n b = np.zeros(1, dtype=res_dtype)\n out = np.zeros(a.shape, dtype=res_dtype)\n common_impl(a, b, out, dpnp_func, PRINT_DEBUG)\n return out\n\n return dpnp_impl\n\n\n@overload(stubs.dpnp.ones_like)\ndef dpnp_ones_like_impl(a, dtype=None):\n name = \"ones_like\"\n dpnp_lowering.ensure_dpnp(name)\n\n ret_type = types.void\n \"\"\"\n dpnp source:\n https://github.com/IntelPython/dpnp/blob/0.5.1/dpnp/backend/kernels/dpnp_krnl_common.cpp#L224\n\n Function declaration:\n void dpnp_initval_c(void* result1, void* value, size_t size)\n\n \"\"\"\n res_dtype = dtype\n if dtype == types.none:\n res_dtype = a.dtype\n name_dtype = res_dtype.name\n else:\n name_dtype = res_dtype.dtype.name\n\n sig = signature(ret_type, types.voidptr, types.voidptr, types.intp)\n dpnp_func = dpnp_ext.dpnp_func(\"dpnp_\" + name, [name_dtype, \"NONE\"], sig)\n\n PRINT_DEBUG = dpnp_lowering.DEBUG\n\n def dpnp_impl(a, dtype=None):\n b = np.ones(1, dtype=res_dtype)\n out = np.ones(a.shape, dtype=res_dtype)\n common_impl(a, b, out, dpnp_func, PRINT_DEBUG)\n return out\n\n return dpnp_impl\n\n\n@overload(stubs.dpnp.full_like)\ndef dpnp_full_like_impl(a, b):\n name = \"full_like\"\n dpnp_lowering.ensure_dpnp(name)\n\n ret_type = types.void\n \"\"\"\n dpnp source:\n https://github.com/IntelPython/dpnp/blob/0.5.1/dpnp/backend/kernels/dpnp_krnl_common.cpp#L224\n\n Function declaration:\n void dpnp_initval_c(void* result1, void* value, size_t size)\n\n \"\"\"\n sig = signature(ret_type, types.voidptr, types.voidptr, types.intp)\n dpnp_func = dpnp_ext.dpnp_func(\"dpnp_\" + name, [b.dtype.name, \"NONE\"], sig)\n\n res_dtype = b.dtype\n PRINT_DEBUG = dpnp_lowering.DEBUG\n\n def dpnp_impl(a, b):\n out = np.ones(a.shape, dtype=res_dtype)\n common_impl(a, b, out, dpnp_func, PRINT_DEBUG)\n return out\n\n return dpnp_impl\n\n\n# TODO: This implementation is incorrect\n@overload(stubs.dpnp.full)\ndef dpnp_full_impl(a, b):\n name = \"full\"\n dpnp_lowering.ensure_dpnp(name)\n\n ret_type = types.void\n \"\"\"\n dpnp source:\n https://github.com/IntelPython/dpnp/blob/0.5.1/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp#L70\n\n Function declaration:\n void dpnp_full_c(void* array_in, void* result, const size_t size)\n\n \"\"\"\n sig = signature(ret_type, types.voidptr, types.voidptr, types.intp)\n dpnp_func = dpnp_ext.dpnp_func(\"dpnp_\" + name, [b.dtype.name, \"NONE\"], sig)\n\n res_dtype = b.dtype\n PRINT_DEBUG = dpnp_lowering.DEBUG\n\n def dpnp_impl(a, b):\n if a.size == 0:\n raise ValueError(\"Passed Empty array\")\n\n sycl_queue = dpctl_functions.get_current_queue()\n\n b_usm = dpctl_functions.malloc_shared(b.size * b.itemsize, sycl_queue)\n dpctl_functions.queue_memcpy(sycl_queue, b_usm, b.ctypes, b.size * b.itemsize)\n\n out = np.arange(a.size, dtype=res_dtype)\n out_usm = dpctl_functions.malloc_shared(out.size * out.itemsize, sycl_queue)\n\n dpnp_func(b_usm, out_usm, a.size)\n\n dpctl_functions.queue_memcpy(\n sycl_queue, out.ctypes, out_usm, out.size * out.itemsize\n )\n\n dpctl_functions.free_with_queue(b_usm, sycl_queue)\n dpctl_functions.free_with_queue(out_usm, sycl_queue)\n\n dpnp_ext._dummy_liveness_func([a.size, out.size])\n\n if PRINT_DEBUG:\n print(\"dpnp implementation\")\n return out\n\n return dpnp_impl\n\n\n@overload(stubs.dpnp.trace)\ndef dpnp_trace_impl(a):\n name = \"trace\"\n dpnp_lowering.ensure_dpnp(name)\n\n ret_type = types.void\n \"\"\"\n dpnp source:\n https://github.com/IntelPython/dpnp/blob/0.6.2/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp#L218\n\n Function declaration:\n void dpnp_trace_c(const void* array1_in, void* result1, const size_t* shape_, const size_t ndim)\n\n \"\"\"\n sig = signature(ret_type, types.voidptr, types.voidptr, types.voidptr, types.intp)\n dpnp_func = dpnp_ext.dpnp_func(\"dpnp_\" + name, [a.dtype.name, \"NONE\"], sig)\n\n PRINT_DEBUG = dpnp_lowering.DEBUG\n\n def dpnp_impl(a):\n diag_arr = numba_dppy.dpnp.diagonal(a, 0)\n out = np.zeros(diag_arr.shape[:-1], dtype=a.dtype)\n common_shape_impl(diag_arr, out, dpnp_func, PRINT_DEBUG)\n return out\n\n return dpnp_impl\n"
] | [
[
"numpy.arange",
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
computerwala/tensorflow | [
"97164413d009aa6506f269eff7fb78411419146d",
"766eb63f2f3e43cd8b23c1cbb05fe63dd918ffa3"
] | [
"tensorflow/python/ops/ragged/ragged_math_ops.py",
"tensorflow/python/autograph/impl/api.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Support for ragged tensors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import gen_ragged_math_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.ragged import ragged_functional_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.ops.ragged import ragged_util\nfrom tensorflow.python.ops.ragged import segment_id_ops\n\n\n#===============================================================================\n# ragged.range\n#===============================================================================\n# pylint: disable=redefined-builtin\ndef range(starts, limits=None, deltas=1, dtype=None, name=None):\n \"\"\"Returns a `RaggedTensor` containing the specified sequences of numbers.\n\n Each row of the returned `RaggedTensor` contains a single sequence:\n\n ```python\n ragged.range(starts, limits, deltas)[i] ==\n tf.range(starts[i], limits[i], deltas[i])\n ```\n\n If `start[i] < limits[i] and deltas[i] > 0`, then `output[i]` will be an\n empty list. Similarly, if `start[i] > limits[i] and deltas[i] < 0`, then\n `output[i]` will be an empty list. This behavior is consistent with the\n Python `range` function, but differs from the `tf.range` op, which returns\n an error for these cases.\n\n Examples:\n\n ```python\n >>> ragged.range([3, 5, 2]).eval().tolist()\n [[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]]\n >>> ragged.range([0, 5, 8], [3, 3, 12]).eval().tolist()\n [[0, 1, 2], [], [8, 9, 10, 11]]\n >>> ragged.range([0, 5, 8], [3, 3, 12], 2).eval().tolist()\n [[0, 2], [], [8, 10]]\n ```\n\n The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.\n The vector inputs must all have the same size. Scalar inputs are broadcast\n to match the size of the vector inputs.\n\n Args:\n starts: Vector or scalar `Tensor`. Specifies the first entry for each range\n if `limits` is not `None`; otherwise, specifies the range limits, and the\n first entries default to `0`.\n limits: Vector or scalar `Tensor`. Specifies the exclusive upper limits for\n each range.\n deltas: Vector or scalar `Tensor`. Specifies the increment for each range.\n Defaults to `1`.\n dtype: The type of the elements of the resulting tensor. If not specified,\n then a value is chosen based on the other args.\n name: A name for the operation.\n\n Returns:\n A `RaggedTensor` of type `dtype` with `ragged_rank=1`.\n \"\"\"\n if limits is None:\n starts, limits = 0, starts\n\n with ops.name_scope(name, 'RaggedRange', [starts, limits, deltas]) as name:\n starts = ops.convert_to_tensor(starts, dtype=dtype, name='starts')\n limits = ops.convert_to_tensor(limits, dtype=dtype, name='limits')\n deltas = ops.convert_to_tensor(deltas, dtype=dtype, name='deltas')\n\n # infer dtype if not explicitly provided\n if dtype is None:\n starts, limits, deltas = _infer_matching_dtype(\n [starts, limits, deltas],\n [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64])\n\n result = gen_ragged_math_ops.ragged_range(starts, limits, deltas, name=name)\n return ragged_tensor.RaggedTensor.from_row_splits(result.rt_dense_values,\n result.rt_nested_splits)\n\n\ndef _infer_matching_dtype(tensors, dtype_hierarchy):\n \"\"\"Infers a matching dtype for tensors, and casts them to that dtype.\"\"\"\n assert all(t.dtype in dtype_hierarchy for t in tensors)\n inferred_dtype = max([t.dtype for t in tensors], key=dtype_hierarchy.index)\n return [math_ops.cast(t, inferred_dtype) for t in tensors]\n\n\n#===============================================================================\n# ragged_segment_<AGGREGATE>\n#===============================================================================\n\n# Docstring template used for the raggged_segment_<AGGREGATE> ops.\n_RAGGED_SEGMENT_DOCSTRING = \"\"\"\\\nComputes the %(combination)s along segments of a RaggedTensor.\n\n Returns a RaggedTensor `output` with `num_segments` rows, where the row\n `output[i]` is formed by taking the %(combination)s of all rows of `data`\n whose corresponding `segment_id` is `i`.\n\n The length of the row `output[i]` will be the maximum of the lengths of\n all rows of `data` whose corresponding `segment_id` is `i`. If no `data`\n rows correspond to a given segment ID, then the output row for that segment\n ID will be empty.\n\n Args:\n data: A `RaggedTensor` containing the values to combine.\n segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or\n `int32`. `segment_ids.shape` must be a prefix of `data.shape`.\n Must be greater than or equal to zero, and less than `num_segments`.\n `segment_ids` is not required to be sorted.\n num_segments: An `int32` or `int64` scalar specifying the number of\n distinct segment ids.\n name: A name prefix for the returned tensor (optional).\n Returns:\n A `RaggedTensor` containing the %(combined)s values. The returned tensor\n has the same dtype as `data`, and its shape is\n `[num_segments] + data.shape[segment_ids.rank:]`.\n Raises:\n ValueError: If `segment_ids.shape` is not a prefix of `data.shape`.\n\"\"\"\n\n\ndef _ragged_segment_aggregate(unsorted_segment_op,\n data,\n segment_ids,\n num_segments,\n name=None):\n \"\"\"Aggregates along segments of a RaggedTensor using `unsorted_segment_op`.\n\n Returns a RaggedTensor `output` with `num_segments` rows, where the row\n `output[i]` is formed by combining all rows of `data` whose corresponding\n `segment_id` is `i`. The values in each row are combined using\n `unsorted_segment_op`.\n\n The length of the row `output[i]` will be the maximum of the lengths of\n all rows of `data` whose corresponding `segment_id` is `i`. If no `data`\n rows correspond to a given segment ID, then the output row for that segment\n ID will be empty.\n\n Args:\n unsorted_segment_op: The tensorflow `op` that should be used to combine\n values in each row. Must have the same signature and basic behavior as\n `unsorted_segment_sum`, `unsorted_segment_max`, etc.\n data: A `RaggedTensor` containing the values to be combined.\n segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or\n `int32`. `segment_ids.shape` must be a prefix of `data.shape`.\n `segment_ids` is not required to be sorted.\n num_segments: An `int32` or `int64` scalar.\n name: A name prefix for the returned tensor (optional).\n\n Returns:\n A `RaggedTensor` containing the aggregated values. The returned tensor\n has the same dtype as `data`, and its shape is\n `[num_segments] + data.shape[segment_ids.rank:]`.\n Raises:\n ValueError: If segment_ids.shape is not a prefix of data.shape.\n \"\"\"\n if not (ragged_tensor.is_ragged(data) or\n ragged_tensor.is_ragged(segment_ids)):\n return unsorted_segment_op(data, segment_ids, num_segments, name)\n\n with ops.name_scope(name, 'RaggedSegment',\n [data, segment_ids, num_segments]) as name:\n data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data')\n segment_ids = ragged_tensor.convert_to_tensor_or_ragged_tensor(\n segment_ids, name='segment_ids')\n\n if ragged_tensor.is_ragged(segment_ids):\n if not ragged_tensor.is_ragged(data):\n raise ValueError('segment_ids.shape must be a prefix of data.shape, '\n 'but segment_ids is ragged and data is not.')\n check_splits = check_ops.assert_equal(\n segment_ids.row_splits,\n data.row_splits,\n message='segment_ids.shape must be a prefix of data.shape')\n with ops.control_dependencies([check_splits]):\n return _ragged_segment_aggregate(unsorted_segment_op, data.values,\n segment_ids.values, num_segments, name)\n\n segment_ids = math_ops.cast(segment_ids, dtypes.int64)\n\n # Find the length of each row in data. (dtype=int64, shape=[data_nrows])\n data_row_lengths = data.row_splits[1:] - data.row_splits[:-1]\n\n # Find the length that each output row will have. The length of the row\n # corresponding to segment `id` is `max(data_row_lengths[i])` where\n # `segment_ids[i]=id`. (dtype=int64, shape=[output_nrows])\n output_row_lengths = math_ops.maximum(\n math_ops.unsorted_segment_max(data_row_lengths, segment_ids,\n num_segments), 0)\n assert output_row_lengths.dtype == dtypes.int64\n\n # Build the splits tensor for the output RaggedTensor.\n output_splits = array_ops.concat([\n array_ops.zeros([1], dtypes.int64),\n math_ops.cumsum(output_row_lengths)\n ],\n axis=0)\n\n # For each row in `data`, find the start & limit position where that row's\n # values will be aggregated in output.values.\n data_row_to_out_row_start = array_ops.gather(output_splits, segment_ids)\n data_row_to_out_row_limit = data_row_to_out_row_start + data_row_lengths\n\n # For each value in `data.values`, find the position where it will\n # aggregated in `output.values`.\n # Get the target output values index for each data values index.\n data_val_to_out_val_index = range(data_row_to_out_row_start,\n data_row_to_out_row_limit).values\n\n # Recursively aggregate the values.\n output_values = _ragged_segment_aggregate(unsorted_segment_op, data.values,\n data_val_to_out_val_index,\n output_splits[-1])\n return ragged_tensor.RaggedTensor.from_row_splits(output_values,\n output_splits)\n\n\ndef segment_sum(data, segment_ids, num_segments, name=None):\n # For docs, see: _RAGGED_SEGMENT_DOCSTRING\n return _ragged_segment_aggregate(math_ops.unsorted_segment_sum, data,\n segment_ids, num_segments, name or\n 'RaggedSegmentSum')\n\n\ndef segment_prod(data, segment_ids, num_segments, name=None):\n # For docs, see: _RAGGED_SEGMENT_DOCSTRING\n return _ragged_segment_aggregate(math_ops.unsorted_segment_prod, data,\n segment_ids, num_segments, name or\n 'RaggedSegmentProd')\n\n\ndef segment_min(data, segment_ids, num_segments, name=None):\n # For docs, see: _RAGGED_SEGMENT_DOCSTRING\n return _ragged_segment_aggregate(math_ops.unsorted_segment_min, data,\n segment_ids, num_segments, name or\n 'RaggedSegmentMin')\n\n\ndef segment_max(data, segment_ids, num_segments, name=None):\n # For docs, see: _RAGGED_SEGMENT_DOCSTRING\n return _ragged_segment_aggregate(math_ops.unsorted_segment_max, data,\n segment_ids, num_segments, name or\n 'RaggedSegmentMax')\n\n\ndef segment_mean(data, segment_ids, num_segments, name=None):\n \"\"\"For docs, see: _RAGGED_SEGMENT_DOCSTRING.\"\"\"\n with ops.name_scope(name, 'RaggedSegmentMean',\n [data, segment_ids, num_segments]):\n total = segment_sum(data, segment_ids, num_segments)\n ones = ragged_tensor.RaggedTensor.from_nested_row_splits(\n array_ops.ones_like(data.flat_values), data.nested_row_splits)\n count = segment_sum(ones, segment_ids, num_segments)\n if ragged_tensor.is_ragged(total):\n return total.with_flat_values(total.flat_values / count.flat_values)\n else:\n return total / count\n\n\ndef segment_sqrt_n(data, segment_ids, num_segments, name=None):\n \"\"\"For docs, see: _RAGGED_SEGMENT_DOCSTRING.\"\"\"\n with ops.name_scope(name, 'RaggedSegmentSqrtN',\n [data, segment_ids, num_segments]):\n total = segment_sum(data, segment_ids, num_segments)\n ones = ragged_tensor.RaggedTensor.from_nested_row_splits(\n array_ops.ones_like(data.flat_values), data.nested_row_splits)\n count = segment_sum(ones, segment_ids, num_segments)\n if ragged_tensor.is_ragged(total):\n return total.with_flat_values(\n total.flat_values / math_ops.sqrt(count.flat_values))\n else:\n return total / math_ops.sqrt(count)\n\n\ndef _set_ragged_segment_docstring(func, combination, combined):\n func.__doc__ = _RAGGED_SEGMENT_DOCSTRING % dict(\n combination=combination, combined=combined)\n\n\n_set_ragged_segment_docstring(segment_sum, 'sum', 'summed')\n_set_ragged_segment_docstring(segment_prod, 'product', 'multiplied')\n_set_ragged_segment_docstring(segment_min, 'minimum', 'minimized')\n_set_ragged_segment_docstring(segment_max, 'maximum', 'maximized')\n_set_ragged_segment_docstring(segment_mean, 'mean', 'averaged')\n_set_ragged_segment_docstring(segment_sqrt_n, 'sum divided by sqrt(N)',\n 'summed')\n\n#===============================================================================\n# ragged_reduce_<AGGREGATE>\n#===============================================================================\n\n# Docstring template used for ragged_reduce_<AGGREGATE> ops.\n_RAGGED_REDUCE_DOCSTRING = \"\"\"\\\nComputes the %(combination)s of elements across dimensions of a `RaggedTensor`.\n\n Reduces `input_tensor` along the dimensions given in `axis` by taking the\n %(combination)s of values. If a reduced dimension has no elements for\n some index, then the value for that index will be %(default)s.\n\n The rank of the tensor is reduced by `1` for each entry in `axis`. If\n `axis` is not specified, then all dimensions are reduced, and a scalar\n value is returned.\n Args:\n input_tensor: A `RaggedTensor` containing the values to be %(combined)s.\n axis: The dimensions to reduce. May be `None` (to reduce all axes), an\n `int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce\n a given set of axes), or a `Tensor` with a constant value. Must be in\n the range `[0, input_tensor.rank]`.\n name: A name prefix for the returned tensor (optional).\n Returns:\n A `RaggedTensor` containing the %(combined)s values. The returned tensor\n has the same dtype as `data`, and its shape is given by removing the\n dimensions specified in `axis` from `input_tensor.shape`. The `ragged_rank`\n of the returned tensor is given by substracting any ragged dimensions\n specified in `axis` from `input_tensor.ragged_rank`.\n Raises:\n ValueError: If `axis` contains a `Tensor` whose value is not constant.\n ####Example:\n ```python%(example)s ```\n\"\"\"\n_RAGGED_REDUCE_SUM_EXAMPLE = \"\"\"\n >>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])\n >>> ragged.reduce_sum(rt, axis=0).eval().tolist()\n [15, 12, 4] # = [3+1+9+2, 1+5+6, 4]\n >>> ragged.reduce_sum(rt, axis=1).eval().tolist()\n [8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6]\n\"\"\"\n_RAGGED_REDUCE_PROD_EXAMPLE = \"\"\"\n >>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])\n >>> ragged.reduce_prod(rt, axis=0).eval().tolist()\n [54, 30, 4] # = [3*1*9*2, 1*5*6, 4]\n >>> ragged.reduce_prod(rt, axis=1).eval().tolist()\n [12, 5, 9, 12] # = [3*1*4, 1*5, 9, 2*6]\n\"\"\"\n_RAGGED_REDUCE_MIN_EXAMPLE = \"\"\"\n >>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])\n >>> ragged.reduce_min(rt, axis=0).eval().tolist()\n [1, 1, 4] # = [min(3, 1, 9, 2), min(1, 5, 6), 4]\n >>> ragged.reduce_min(rt, axis=1).eval().tolist()\n [1, 1, 9, 2] # = [min(3, 1, 4), min(1, 5), 9, min(2, 6)]\n\"\"\"\n_RAGGED_REDUCE_MAX_EXAMPLE = \"\"\"\n >>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])\n >>> ragged.reduce_max(rt, axis=0).eval().tolist()\n [9, 6, 4] # = [max(3, 1, 9, 2), max(1, 5, 6), 4]\n >>> ragged.reduce_max(rt, axis=1).eval().tolist()\n [4, 5, 9, 6] # = [max(3, 1, 4), max(1, 5), 9, max(2, 6)]\n\"\"\"\n_RAGGED_REDUCE_MEAN_EXAMPLE = \"\"\"\n >>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])\n >>> ragged.reduce_mean(rt, axis=0).eval().tolist()\n [3.75, 4, 4] # = [mean(3, 1, 9, 2), mean(1, 5, 6), 4]\n >>> ragged.reduce_mean(rt, axis=1).eval().tolist()\n [2.66666, 3, 9, 4] # = [mean(3, 1, 4), mean(1, 5), 9, mean(2, 6)]\n\"\"\"\n_RAGGED_REDUCE_ALL_EXAMPLE = \"\"\"\n >>> rt = ragged.constant([[True, True], [True, True, False, True], [False, True]])\n >>> ragged.reduce_all(rt, axis=0).eval().tolist()\n [False, True, False, True]\n >>> ragged.reduce_all(rt, axis=1).eval().tolist()\n [True, False, False]\n\"\"\"\n_RAGGED_REDUCE_ANY_EXAMPLE = \"\"\"\n >>> rt = ragged.constant([[True, True], [True, True, False, True], [False, True]])\n >>> ragged.reduce_any(rt, axis=0).eval().tolist()\n [True, True, False, True]\n >>> ragged.reduce_any(rt, axis=1).eval().tolist()\n [True, True, True]\n\"\"\"\n\n\ndef _ragged_reduce_aggregate(reduce_op,\n unsorted_segment_op,\n rt_input,\n axis,\n keepdims,\n name=None):\n \"\"\"Aggregates across axes of a RaggedTensor using the given `Tensor` ops.\n\n Reduces `rt_input` along the dimensions given in `axis`. The rank of the\n tensor is reduced by 1 for each entry in `axis`. If `axis` is not specified,\n then all dimensions are reduced, and a scalar value is returned.\n\n This op assumes that `reduce_op` and `unsorted_segment_op` are associative;\n if not, then reducing multiple axes will return incorrect results. (In\n particular, reducing multiple axes is currently implemented by reducing the\n axes one at a time.)\n\n Args:\n reduce_op: The tensorflow `op` that should be used to reduce values in\n uniform dimensions. Must have the same signature and basic behavior as\n `reduce_sum`, `reduce_max`, etc.\n unsorted_segment_op: The tensorflow `op` that should be used to combine\n values in ragged dimensions. Must have the same signature and basic\n behavior as `unsorted_segment_sum`, `unsorted_segment_max`, etc.\n rt_input: A `Tensor` or `RaggedTensor` containing the values to be reduced.\n axis: The axis or axes to reduce. May be `None` (to reduce all axes), an\n `int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce a\n given set of axes), or a `Tensor` with a constant value. Must be in the\n range `[0, rt_input.rank)`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name prefix for the returned tensor (optional).\n\n Returns:\n A `RaggedTensor` containing the reduced values. The returned tensor\n has the same dtype as `data`, and its shape is given by removing the\n dimensions specified in `axis` from `rt_input.shape`. The `ragged_rank`\n of the returned tensor is given by substracting any ragged dimensions\n specified in `axis` from `rt_input.ragged_rank`.\n Raises:\n ValueError: If `axis` contains a `Tensor` whose value is not constant.\n \"\"\"\n if not ragged_tensor.is_ragged(rt_input):\n return reduce_op(rt_input, axis, name=name)\n\n if keepdims:\n raise ValueError('keepdims=True is not supported for RaggedTensors.')\n\n if isinstance(axis, ops.Tensor):\n axis = tensor_util.constant_value(axis)\n if axis is None:\n raise ValueError('axis must be known at graph construction time.')\n if isinstance(axis, np.ndarray):\n axis = axis.tolist()\n\n # When reducing all axes, just ignore splits & reduce the inner values.\n if axis is None:\n return reduce_op(rt_input.flat_values, None, name=name)\n\n with ops.name_scope(name, 'RaggedReduce', [rt_input, axis]):\n if isinstance(axis, (tuple, list)):\n if not axis:\n return rt_input\n elif len(axis) == 1:\n axis = axis[0]\n else:\n # When reducing multiple axes, just reduce one at a time. This is less\n # efficient, and only works for associative ops. (In particular, it\n # does not work for reduce_mean.) However, reducing multiple axes at\n # once will probably require a nontrivial c++ op.\n axis = sorted(axis)\n inner_reduced = _ragged_reduce_aggregate(reduce_op, unsorted_segment_op,\n rt_input, axis[-1], keepdims)\n return _ragged_reduce_aggregate(reduce_op, unsorted_segment_op,\n inner_reduced, axis[:-1], keepdims)\n\n rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(\n rt_input, name='rt_input')\n\n axis = ragged_util.get_positive_axis(axis, rt_input.shape.ndims)\n\n if axis == 0:\n # out[i_1, i_2, ..., i_N] = sum_{j} rt_input[j, i_1, i_2, ..., i_N]\n row_lengths = rt_input.row_splits[1:] - rt_input.row_splits[:-1]\n num_segments = math_ops.maximum(math_ops.reduce_max(row_lengths), 0)\n segment_ids = range(row_lengths).values\n return _ragged_segment_aggregate(unsorted_segment_op, rt_input.values,\n segment_ids, num_segments)\n elif axis == 1:\n # out[i_0, i_1, i_2, ..., i_N] = sum_{j} rt_input[i_0, j, i_2, ..., i_N]\n num_segments = array_ops.shape(rt_input.row_splits)[0] - 1\n segment_ids = segment_id_ops.row_splits_to_segment_ids(\n rt_input.row_splits)\n return _ragged_segment_aggregate(unsorted_segment_op, rt_input.values,\n segment_ids, num_segments)\n else:\n # out[i_0, ..., i_[axis-1], i_axis+1], ..., i_N] =\n # sum_{j} rt_input [i_0, ..., i_[axis-1], j, i_axis+1], ..., i_N]\n return rt_input.with_values(\n _ragged_reduce_aggregate(reduce_op, unsorted_segment_op,\n rt_input.values, axis - 1, keepdims))\n\n\ndef reduce_sum(input_tensor, axis=None, keepdims=None, name=None):\n \"\"\"For docs, see: _RAGGED_REDUCE_DOCSTRING.\"\"\"\n return _ragged_reduce_aggregate(math_ops.reduce_sum,\n math_ops.unsorted_segment_sum, input_tensor,\n axis, keepdims, name or 'RaggedReduceSum')\n\n\ndef reduce_prod(input_tensor, axis=None, keepdims=None, name=None):\n \"\"\"For docs, see: _RAGGED_REDUCE_DOCSTRING.\"\"\"\n return _ragged_reduce_aggregate(math_ops.reduce_prod,\n math_ops.unsorted_segment_prod, input_tensor,\n axis, keepdims, name or 'RaggedReduceProd')\n\n\ndef reduce_min(input_tensor, axis=None, keepdims=None, name=None):\n \"\"\"For docs, see: _RAGGED_REDUCE_DOCSTRING.\"\"\"\n return _ragged_reduce_aggregate(math_ops.reduce_min,\n math_ops.unsorted_segment_min, input_tensor,\n axis, keepdims, name or 'RaggedReduceMin')\n\n\ndef reduce_max(input_tensor, axis=None, keepdims=None, name=None):\n \"\"\"For docs, see: _RAGGED_REDUCE_DOCSTRING.\"\"\"\n return _ragged_reduce_aggregate(math_ops.reduce_max,\n math_ops.unsorted_segment_max, input_tensor,\n axis, keepdims, name or 'RaggedReduceMax')\n\n\ndef reduce_mean(input_tensor, axis=None, keepdims=None, name=None):\n \"\"\"For docs, see: _RAGGED_REDUCE_DOCSTRING.\"\"\"\n with ops.name_scope(name, 'RaggedReduceMean', [input_tensor, axis]):\n total = reduce_sum(input_tensor, axis, keepdims)\n if ragged_tensor.is_ragged(input_tensor):\n ones = ragged_tensor.RaggedTensor.from_nested_row_splits(\n array_ops.ones_like(input_tensor.flat_values),\n input_tensor.nested_row_splits)\n else:\n ones = array_ops.ones_like(input_tensor)\n count = reduce_sum(ones, axis, keepdims)\n if ragged_tensor.is_ragged(total):\n return ragged_tensor.RaggedTensor.from_nested_row_splits(\n total.flat_values / count.flat_values, total.nested_row_splits)\n else:\n return total / count\n\n\ndef _cast(input_tensor, dtype):\n return ragged_functional_ops.map_flat_values(math_ops.cast, input_tensor,\n dtype)\n\n\ndef reduce_all(input_tensor, axis=None, keepdims=None, name=None):\n \"\"\"For docs, see: _RAGGED_REDUCE_DOCSTRING.\"\"\"\n with ops.name_scope(name, 'RaggedReduceAll', [input_tensor, axis]):\n return _cast(\n reduce_prod(_cast(input_tensor, dtypes.int32), axis, keepdims),\n dtypes.bool)\n\n\ndef reduce_any(input_tensor, axis=None, keepdims=None, name=None):\n \"\"\"For docs, see: _RAGGED_REDUCE_DOCSTRING.\"\"\"\n with ops.name_scope(name, 'RaggedReduceAny', [input_tensor, axis]):\n return _cast(\n reduce_sum(_cast(input_tensor, dtypes.int32), axis, keepdims),\n dtypes.bool)\n\n\ndef _set_ragged_reduce_docstring(func, combination, combined, default, example):\n func.__doc__ = _RAGGED_REDUCE_DOCSTRING % dict(\n combination=combination,\n combined=combined,\n default=default,\n example=example)\n\n\n_set_ragged_reduce_docstring(reduce_sum, 'sum', 'summed', '0',\n _RAGGED_REDUCE_SUM_EXAMPLE)\n_set_ragged_reduce_docstring(reduce_prod, 'product', 'multiplied', '1',\n _RAGGED_REDUCE_PROD_EXAMPLE)\n_set_ragged_reduce_docstring(reduce_min, 'minimum', 'minimized',\n '`input_tensor.dtype.min`',\n _RAGGED_REDUCE_MIN_EXAMPLE)\n_set_ragged_reduce_docstring(reduce_max, 'maximum', 'maximized',\n '`input_tensor.dtype.max`',\n _RAGGED_REDUCE_MAX_EXAMPLE)\n_set_ragged_reduce_docstring(reduce_mean, 'mean', 'averaged', 'NaN',\n _RAGGED_REDUCE_MEAN_EXAMPLE)\n\n_set_ragged_reduce_docstring(reduce_all, 'logical and', 'and-ed', 'True',\n _RAGGED_REDUCE_ALL_EXAMPLE)\n_set_ragged_reduce_docstring(reduce_any, 'logical or', 'or-ed', 'False',\n _RAGGED_REDUCE_ANY_EXAMPLE)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"This module contains the user-facing API for AutoGraph.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport sys\n\nfrom enum import Enum\n\n# pylint:disable=g-bad-import-order\nimport numpy as np\n# pylint:enable=g-bad-import-order\n\n\nfrom tensorflow.python.autograph.core import config\nfrom tensorflow.python.autograph.core import converter\nfrom tensorflow.python.autograph.impl import conversion\nfrom tensorflow.python.autograph.operators import py_builtins\nfrom tensorflow.python.autograph.pyct import compiler\nfrom tensorflow.python.autograph.pyct import inspect_utils\nfrom tensorflow.python.autograph.utils import py_func\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util import tf_inspect\n\n# TODO(mdan): Properly document the type hints.\n# TODO(mdan): Reduce the type hint information to (module, type).\n# (currently we require (module + class name, type))\n\n\n# TODO(mdan): This should behave like to_graph (e.g. convert statically).\n# TODO(znado): Make an alias so can write Verbosity directly without needing\n# to write converter.\ndef convert(\n recursive=False,\n verbose=converter.Verbosity.BRIEF,\n optional_features=converter.Feature.ALL):\n \"\"\"Decorator that compiles a function to use TensorFlow ops.\n\n The decorator is dynamic - it recompiles the target whenever the decorated\n function is called. This means the parameter values are known at conversion.\n It also means that repeated calls with different types of parameters will be\n correctly processed.\n\n Args:\n recursive: bool, whether to recursively convert any functions or classes\n that the converted function may use.\n verbose: converter.Verbosity, the level of verbosity.\n optional_features: converted.Feature, allows toggling optional or\n experimental features. When set to None, only the core features are\n enabled.\n\n Returns:\n Callable, a decorator that converts the given function into an equivalent\n function that uses TensorFlow ops.\n \"\"\"\n\n def decorator(f):\n \"\"\"Decorator implementation.\"\"\"\n\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n return converted_call(\n f, None,\n converter.ConversionOptions(\n recursive=recursive,\n verbose=verbose,\n force_conversion=True,\n optional_features=optional_features,\n ), *args, **kwargs)\n\n wrapper = tf_decorator.make_decorator(f, wrapper)\n\n # Sometimes the decorator is just desugared, making it impossible to detect.\n # This attribute makes detection easier.\n setattr(wrapper, '__ag_compiled', True)\n return wrapper\n\n return decorator\n\n\nclass RunMode(Enum):\n \"\"\"Specifies the way a converted function or method should be executed in TF.\n\n Attributes:\n * GRAPH: Call this function directly, as-is. This is suitable for functions\n that were already designed for TF graphs and contain ops.\n * PY_FUNC: Wrap this function into a py_func op. This is suitable for code\n that will only run correctly in Python, for example code that renders\n to the display, reads keyboard input, etc.\n \"\"\"\n GRAPH = 1\n PY_FUNC = 2\n\n\ndef do_not_convert(run_as=RunMode.GRAPH, return_dtypes=None):\n \"\"\"Decorator that suppresses the conversion of a function.\n\n See also: docs/pyfunc_dtypes.md\n\n Args:\n run_as: RunMode, specifies how to use the function in TensorFlow.\n return_dtypes: Optional[Iterable[ Union[tf.DType,\n utils.py_func.MatchDType]]], the return data types of the converted\n function, if run_as is RunMode.PY_FUNC. Ignored otherwise. May be set to\n None if the function has no return values.\n\n Returns:\n Callable, a decorator that wraps the original function.\n \"\"\"\n\n def decorator(f):\n \"\"\"Decorator implementation.\"\"\"\n\n @functools.wraps(f)\n def graph_wrapper(*args, **kwargs):\n return f(*args, **kwargs)\n\n @functools.wraps(f)\n def py_func_wrapper(*args, **kwargs):\n if kwargs:\n raise NotImplementedError('RunMode.PY_FUNC does not yet support kwargs')\n # TODO(mdan): Add support for kwargs.\n return py_func.wrap_py_func(\n f, return_dtypes, args, kwargs, use_dummy_return=not return_dtypes)\n\n if run_as == RunMode.GRAPH:\n wrapper = graph_wrapper\n elif run_as == RunMode.PY_FUNC:\n wrapper = py_func_wrapper\n else:\n raise ValueError('unknown value for run_as: %s' % run_as)\n\n # Sometimes the decorator is just desugared, making it impossible to detect.\n # This attribute makes detection easier.\n setattr(wrapper, '__ag_compiled', True)\n return wrapper\n\n return decorator\n\n\n# TODO(mdan): Move to a private, undocumented module.\ndef converted_call(f, owner, options, *args, **kwargs):\n \"\"\"Compiles a function call inline. For internal use only.\"\"\"\n if options.verbose >= converter.Verbosity.VERBOSE:\n logging.info('Converted call: {}; owner: {}'.format(f, owner))\n\n if owner is not None:\n if not isinstance(f, str):\n raise ValueError(\n 'When owner is specified, the function name must be specified as'\n ' a string: {}'.format(f))\n\n # Special case when the owner is a 'super' object. In that case lookups of\n # dynamic attributes won't work. See\n # inspect_utils.SuperWrapperForDynamicAttrs.\n if isinstance(owner, super):\n owner = inspect_utils.SuperWrapperForDynamicAttrs(owner)\n\n f = getattr(owner, f)\n\n if inspect_utils.isbuiltin(f):\n return py_builtins.overload_of(f)(*args, **kwargs)\n\n # TODO(mdan): This needs cleanup.\n # In particular, we may want to avoid renaming functions altogether.\n if not options.force_conversion and conversion.is_whitelisted_for_graph(f):\n\n # Args typically include `self`, as required by the conversion process.\n # When conversion is skipped, `self` is not necessary, because the\n # original bound method is being executed. This code removes it.\n if tf_inspect.ismethod(f) and args:\n f_class = inspect_utils.getmethodclass(f)\n if args[0] is f_class:\n args = args[1:]\n\n return f(*args, **kwargs)\n\n # internal_convert_user_code is for example turned off when issuing a dynamic\n # call conversion from generated code while in nonrecursive mode. In that\n # case we evidently don't want to recurse, but we still have to convert\n # things like builtins.\n if not options.internal_convert_user_code:\n return f(*args, **kwargs)\n\n # Unwrap functools.partial objects\n # TODO(allenl, mdan): Consider sharing unwrapping logic with tf_inspect.\n while isinstance(f, functools.partial):\n args = f.args + args\n new_kwargs = {}\n if f.keywords is not None:\n new_kwargs.update(f.keywords)\n new_kwargs.update(kwargs)\n kwargs = new_kwargs\n f = f.func\n\n if tf_inspect.isfunction(f) or tf_inspect.ismethod(f):\n # Regular functions\n target_entity = f\n arg_map_target = f\n f_class = inspect_utils.getmethodclass(f)\n\n # TODO(b/119246461): This may be more elegantly handled using __get__?\n if f_class is not None:\n # If this is a method call, it may or may not include self.\n #\n # Example when self is included:\n # converted_call(to_graph(foo.bar), foo)\n #\n # Example when self is not included:\n # super(...).foo(args)\n #\n if owner is not None and (not args or args[0] is not owner):\n effective_args = (owner,) + args\n else:\n # When the owner is not specified, use the result of\n # inspect_utils.getmethodclass.\n # TODO(b/119246461): Make sure an owner is always specified.\n if not args or args[0] is not f_class:\n effective_args = (f_class,) + args\n else:\n effective_args = (f_class,) + args[1:]\n partial_types = (f_class,)\n else:\n effective_args = args\n partial_types = ()\n\n elif tf_inspect.isclass(f):\n # Constructors\n target_entity = f\n arg_map_target = f.__init__\n effective_args = args\n partial_types = ()\n\n elif hasattr(f, '__call__') and hasattr(f, '__class__'):\n # Callable objects\n target_entity = f.__call__\n arg_map_target = f.__call__\n effective_args = (f,) + args\n partial_types = (f.__class__,)\n\n else:\n NotImplementedError('unknown callable type \"%s\"' % type(f))\n\n arg_values = tf_inspect.getcallargs(arg_map_target, *args, **kwargs)\n arg_types = {}\n for name, arg in arg_values.items():\n arg_class = arg.__class__\n arg_types[name] = (arg_class.__name__, arg_class)\n\n # When called from within a decorator, this is the only indication that\n # the function is a method - it appears that the decorator is applied\n # before the method is bound.\n if not partial_types:\n if 'self' in arg_values:\n if tf_inspect.isclass(arg_values['self'].__class__):\n partial_types = (arg_values['self'].__class__,)\n elif 'cls' in arg_values:\n if tf_inspect.isclass(arg_values['cls']):\n partial_types = (arg_values['cls'],)\n\n converted_f = to_graph(\n target_entity,\n recursive=options.recursive,\n verbose=options.verbose,\n arg_values=arg_values,\n arg_types=arg_types,\n partial_types=partial_types,\n strip_decorators=options.strip_decorators,\n optional_features=options.optional_features)\n\n result = converted_f(*effective_args, **kwargs)\n\n # The converted function's closure is simply inserted into the function's\n # module __dict__. Since modules are permanently cached, that results in\n # leaking the entire closure.\n # Normally, it's not safe to delete the module because that may release said\n # closure as well. However, in the case of converted_call we are certain the\n # function will not be executed again, so the closure should no longer be\n # needed so long as the function doesn't return any executable code.\n # TODO(mdan): Attach the closure properly, using cells.\n if all(map(_is_not_callable, nest.flatten(result))):\n del sys.modules[converted_f.__module__]\n\n return result\n\n\ndef _is_not_callable(obj):\n # TODO(brianklee): Handle case when obj is a tensor dependent on a py_func.\n if isinstance(obj, (int, float, complex, str, bool)):\n return True\n if isinstance(obj, (np.ndarray, np.generic)):\n return True\n if tensor_util.is_tensor(obj):\n return True\n return False\n\n\n# TODO(mdan): Rename: to_ops?\n# TODO(mdan): Look into overloading as function and decorator, like tfe.defun?\n# TODO(mdan): Remove partial_types.\ndef to_graph(e,\n recursive=True,\n verbose=converter.Verbosity.VERBOSE,\n arg_values=None,\n arg_types=None,\n partial_types=None,\n strip_decorators=None,\n optional_features=converter.Feature.ALL):\n \"\"\"Converts a Python entity into equivalent code that uses TensorFlow ops.\n\n Supported Python entities include:\n * functions\n * classes\n\n Classes are converted by converting all their methods into a new class.\n\n Args:\n e: Union[Callable, Type], the Python entity to convert.\n recursive: bool, whether to recursively convert any functions that the\n converted function may call.\n verbose: converter.Verbosity, the level of printing verbosity to use.\n arg_values: Optional[Dict[Text, Any]], value hints for symbols including\n function arguments.\n arg_types: Optional[Dict[Text, Type]], type hints for symbols including\n function arguments.\n partial_types: Set[Type], reserved for internal use.\n strip_decorators: Tuple[Callable], same as\n ConversionOptions.strip_decorators.\n optional_features: Union[Feature, Set[Feature]], same as\n ConversionOptions.optional_features.\n\n Returns:\n Union[Callable, Type], the converted entity, which is the same kind as e\n (that is, a function is e is a function, a class if e is a class, etc.) but\n its code has been converted to use TF ops.\n\n Raises:\n ValueError: If the entity could not be converted.\n \"\"\"\n if strip_decorators is None:\n strip_decorators = ()\n strip_decorators += (convert, do_not_convert, converted_call)\n\n program_ctx = converter.ProgramContext(\n options=converter.ConversionOptions(\n recursive=recursive,\n verbose=verbose,\n strip_decorators=strip_decorators,\n optional_features=optional_features),\n partial_types=partial_types,\n autograph_module=tf_inspect.getmodule(to_graph),\n uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)\n _, name, namespace = conversion.entity_to_graph(e, program_ctx, arg_values,\n arg_types)\n\n nodes = []\n for dep in reversed(program_ctx.conversion_order):\n nodes.extend(program_ctx.dependency_cache[dep])\n\n compiled_module, _ = compiler.ast_to_object(\n nodes,\n source_prefix=program_ctx.required_imports,\n include_source_map=True)\n\n # The compiled code should see everything the entry entity saw.\n # TODO(mdan): This might not work well if the call tree spans modules?\n for key, val in namespace.items():\n # Avoid overwriting entities that have been transformed.\n if key not in compiled_module.__dict__:\n compiled_module.__dict__[key] = val\n compiled = getattr(compiled_module, name)\n\n if tf_inspect.isfunction(e):\n compiled.__defaults__ = e.__defaults__\n\n if hasattr(compiled, '__globals__'):\n # Remove self to avoid circular references. This will probably only work\n # so long as the function is not reentrant.\n del compiled.__globals__[name]\n\n # Need this so the source_mapping attribute is available for the context\n # manager to access for runtime errors.\n #\n # Note that compiler.ast_to_object attaches the source map 'ag_source_map__'\n # symbol to the compiled module.\n # TODO(mdan): Record this statically in the generated code.\n # TODO(mdan): Rename this attribute to 'autograph_info__'\n source_map_attribute_name = 'ag_source_map'\n if getattr(compiled, source_map_attribute_name, None) is not None:\n raise ValueError('cannot convert %s because is has an attribute '\n '\"%s\", which is reserved for AutoGraph.' %\n (compiled, source_map_attribute_name))\n setattr(compiled, source_map_attribute_name,\n compiled_module.__dict__['ag_source_map__'])\n\n return compiled\n\n\ndef to_code(e,\n recursive=True,\n arg_values=None,\n arg_types=None,\n partial_types=None,\n indentation=' '):\n \"\"\"Returns the equivalent code that uses TensorFlow ops.\n\n Also see: `to_graph`, `convert`\n\n Args:\n e: Union[Callable, Type], the Python entity to convert.\n recursive: bool, whether to recursively convert any functions that the\n converted function may call.\n arg_values: Optional[Dict[Text, Any]], value hints for symbols including\n function arguments.\n arg_types: Optional[Dict[Text, Type]], type hints for symbols including\n function arguments.\n partial_types: Set[Type], reserved for internal use.\n indentation: Text, when to use for each level of indentation.\n\n Returns:\n Text, the converted code.\n \"\"\"\n program_ctx = converter.ProgramContext(\n options=converter.ConversionOptions(\n recursive=recursive,\n strip_decorators=(convert, do_not_convert, converted_call)),\n partial_types=partial_types,\n autograph_module=tf_inspect.getmodule(to_graph),\n uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)\n conversion.entity_to_graph(e, program_ctx, arg_values, arg_types)\n\n code = '\\n'.join(\n compiler.ast_to_source(program_ctx.dependency_cache[dep], indentation)\n for dep in reversed(program_ctx.conversion_order))\n\n return program_ctx.required_imports + '\\n\\n' + code\n"
] | [
[
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.reduce_max",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.python.ops.ragged.segment_id_ops.row_splits_to_segment_ids",
"tensorflow.python.ops.ragged.ragged_util.get_positive_axis",
"tensorflow.python.ops.gen_ragged_math_ops.ragged_range",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.ragged.ragged_tensor.convert_to_tensor_or_ragged_tensor",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.unsorted_segment_max",
"tensorflow.python.ops.check_ops.assert_equal",
"tensorflow.python.ops.ragged.ragged_tensor.is_ragged",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.ragged.ragged_functional_ops.map_flat_values",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_nested_row_splits",
"tensorflow.python.ops.math_ops.cumsum",
"tensorflow.python.framework.ops.name_scope"
],
[
"tensorflow.python.util.tf_inspect.isclass",
"tensorflow.python.util.tf_inspect.getcallargs",
"tensorflow.python.util.tf_inspect.ismethod",
"tensorflow.python.autograph.pyct.inspect_utils.getmethodclass",
"tensorflow.python.autograph.pyct.compiler.ast_to_object",
"tensorflow.python.autograph.utils.py_func.wrap_py_func",
"tensorflow.python.autograph.pyct.inspect_utils.SuperWrapperForDynamicAttrs",
"tensorflow.python.autograph.operators.py_builtins.overload_of",
"tensorflow.python.autograph.core.converter.ConversionOptions",
"tensorflow.python.util.tf_inspect.getmodule",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.util.tf_inspect.isfunction",
"tensorflow.python.autograph.impl.conversion.is_whitelisted_for_graph",
"tensorflow.python.autograph.impl.conversion.entity_to_graph",
"tensorflow.python.util.tf_decorator.make_decorator",
"tensorflow.python.autograph.pyct.compiler.ast_to_source",
"tensorflow.python.data.util.nest.flatten",
"tensorflow.python.autograph.pyct.inspect_utils.isbuiltin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
makoziol0/pyne | [
"660b1bdd608d9b227d6a432737303f7e82af4a25"
] | [
"pyne/transmute/origen22.py"
] | [
"\"\"\"This module implements an ORIGEN v2.2 transmutation solver.\n\"\"\"\nfrom __future__ import print_function, division\n\nimport os\nimport subprocess\nimport tempfile\nfrom collections import Mapping\nfrom warnings import warn\nfrom pyne.utils import QAWarning\n\nimport numpy as np\n\nfrom pyne import data\nfrom pyne import rxname\nfrom pyne import nucname\nfrom pyne import nuc_data\nfrom pyne import origen22\nfrom pyne import utils\nfrom pyne.material import Material, from_atom_frac\nfrom pyne.xs.data_source import NullDataSource, SimpleDataSource, EAFDataSource\nfrom pyne.xs.cache import XSCache\n\nwarn(__name__ + \" is not yet QA compliant.\", QAWarning)\n\nclass Transmuter(object):\n \"\"\"A class for transmuting materials using ORIGEN v2.2.\"\"\"\n\n def __init__(self, t=0.0, phi=0.0, temp=300.0, tol=1e-7, cwd='',\n base_tape9=origen22.BASE_TAPE9, xscache=None, \n o2exe='o2_therm_linux.exe', *args, **kwargs):\n \"\"\"Parameters\n ----------\n t : float\n Transmutations time [sec].\n phi : float or array of floats\n Neutron flux vector [n/cm^2/sec]. Currently this must either be \n a scalar or match the group structure of EAF.\n temp : float, optional\n Temperature [K] of material, defaults to 300.0.\n tol : float\n Tolerance level for chain truncation.\n cwd : str, optional\n Current working directory for origen runs. Defaults to this dir.\n base_tape9 : str or dict, optional\n A base TAPE9.INP file. If this is a str it is interpreted as a path \n to a file, which is then read in and parsed. If this is a dict, it is\n assumed to be in the format described in the main origen22 module.\n xscache : XSCache, optional\n A cross section cache to generate cross sections with.\n o2exe : str, optional\n Name or path to ORIGEN 2.2 executable.\n args : tuple, optional\n Other arguments ignored for compatibility with other Transmuters.\n kwargs : dict, optional\n Other keyword arguments ignored for compatibility with other Transmuters.\n \"\"\"\n if not isinstance(base_tape9, Mapping):\n base_tape9 = origen22.parse_tape9(tape9=base_tape9)\n self.base_tape9 = base_tape9\n\n if xscache is None:\n eafds = EAFDataSource()\n eafds.load(temp=temp)\n gs = np.array([eafds.src_group_struct[0], eafds.src_group_struct[-1]])\n eafds.dst_group_struct = gs\n xscache = XSCache(group_struct=gs, data_source_classes=[SimpleDataSource, \n NullDataSource])\n xscache.load(temp=temp)\n xscache.data_sources.insert(0, eafds)\n self.xscache = xscache\n\n self.t = t\n self._phi = None\n self.phi = phi\n self.temp = temp\n self.tol = tol\n self.cwd = os.path.abspath(cwd)\n self.o2exe = o2exe\n\n @property\n def phi(self):\n return self._phi\n\n @phi.setter\n def phi(self, flux):\n \"\"\"Ensures that the flux is correctly formatted.\"\"\"\n flux = np.asarray(flux)\n if flux.ndim == 0:\n _ = np.empty(175, float)\n _.fill(flux / 175.0)\n flux = _\n elif flux.ndim == 1 and flux.shape[0] != 175:\n raise ValueError(\"Group structure must match EAF.\")\n elif flux.ndim > 1:\n raise ValueError(\"The flux vector must be 0- or 1-dimensional.\")\n if not np.all(flux >= 0.0):\n raise ValueError(\"Flux entries must be non-negative.\")\n for ds in self.xscache.data_sources:\n ds.src_phi_g = flux\n self.xscache['phi_g'] = np.array([flux.sum()])\n self._phi = flux\n\n def transmute(self, x, t=None, phi=None, tol=None, cwd=None, xscache=None, \n o2exe=None, *args, **kwargs):\n \"\"\"Transmutes a material into its daughters.\n\n Parameters\n ----------\n x : Material or similar\n Input material for transmutation.\n t : float\n Transmutations time [sec].\n phi : float or array of floats\n Neutron flux vector [n/cm^2/sec]. Currently this must either be \n a scalar or match the group structure of EAF.\n tol : float\n Tolerance level for chain truncation.\n cwd : str, optional\n Current working directory for origen runs. Defaults to this dir.\n xscache : XSCache, optional\n A cross section cache to generate cross sections with.\n o2exe : str, optional\n Name or path to ORIGEN 2.2 executable.\n\n Returns\n -------\n y : Material\n The output material post-transmutation.\n\n \"\"\"\n if not isinstance(x, Material):\n x = Material(x)\n if t is not None:\n self.t = t\n if phi is not None:\n self.phi = phi\n if tol is not None:\n self.tol = tol\n if cwd is not None:\n self.cwd = os.path.abspath(cwd)\n if xscache is not None:\n self.xscache = xscache\n if o2exe is not None:\n self.o2exe = o2exe\n\n # prepare new tape9\n nucs = set(x.comp.keys())\n base_tape9 = self.base_tape9\n decay_nlb, xsfpy_nlb = origen22.nlbs(base_tape9)\n new_tape9 = origen22.xslibs(nucs=nucs, xscache=self.xscache, nlb=xsfpy_nlb)\n t9 = origen22.merge_tape9([new_tape9, base_tape9])\n\n # write out files\n origen22.write_tape4(x, outfile=os.path.join(self.cwd, 'TAPE4.INP'))\n origen22.write_tape5_irradiation('IRF', self.t/86400.0, self.xscache['phi_g'][0], \n outfile=os.path.join(self.cwd, 'TAPE5.INP'), decay_nlb=decay_nlb, \n xsfpy_nlb=xsfpy_nlb, cut_off=self.tol)\n origen22.write_tape9(t9, outfile=os.path.join(self.cwd, 'TAPE9.INP'))\n\n # run origen & get results\n f = tempfile.NamedTemporaryFile()\n try:\n subprocess.check_call([self.o2exe], cwd=self.cwd, stdout=f, stderr=f)\n except subprocess.CalledProcessError:\n f.seek(0)\n print(\"ORIGEN output:\\n\\n{0}\".format(f.read()))\n raise\n finally:\n f.close()\n t6 = origen22.parse_tape6(tape6=os.path.join(self.cwd, 'TAPE6.OUT'))\n y = t6['materials'][-1]\n return y\n\n"
] | [
[
"numpy.asarray",
"numpy.all",
"numpy.array",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SkyAndCloud/Coursera-AndrewNG-ML-Python | [
"586edffdcc3e0811ac186a00b78897b0a75a07d0",
"586edffdcc3e0811ac186a00b78897b0a75a07d0",
"586edffdcc3e0811ac186a00b78897b0a75a07d0",
"586edffdcc3e0811ac186a00b78897b0a75a07d0",
"586edffdcc3e0811ac186a00b78897b0a75a07d0"
] | [
"ex7/computeCentroids.py",
"ex8/cofiCostFunc.py",
"ex5/validationCurve.py",
"ex6/emailFeatures.py",
"ex3/displayData.py"
] | [
"import numpy as np\n\n\ndef computeCentroids(X, idx, K):\n \"\"\"returns the new centroids by\n computing the means of the data points assigned to each centroid. It is\n given a dataset X where each row is a single data point, a vector\n idx of centroid assignments (i.e. each entry in range [1..K]) for each\n example, and K, the number of centroids. You should return a matrix\n centroids, where each row of centroids is the mean of the data points\n assigned to it.\n \"\"\"\n\n# Useful variables\n m, n = X.shape\n\n# You need to return the following variables correctly.\n centroids = np.zeros((K, X.shape[1]))\n\n# ====================== YOUR CODE HERE ======================\n# Instructions: Go over every centroid and compute mean of all points that\n# belong to it. Concretely, the row vector centroids(i, :)\n# should contain the mean of the data points assigned to\n# centroid i.\n#\n# Note: You can use a for-loop over the centroids to compute this.\n# \n idx_count = np.bincount(idx)\n for k in xrange(K):\n if k < len(idx_count):\n centroids[k] = np.sum(X[idx == k,:], axis=0) / float(idx_count[k])\n\n# =============================================================\n\n return centroids\n\n# if __name__ == '__main__':\n# print computeCentroids(\n# np.array([[1, 2, 3, 4], [4, 3, 2, 1]]),\n# np.array([0, 0]),\n# 2\n# )",
"import numpy as np\n\n\ndef cofiCostFunc(params, Y, R, num_users, num_movies, num_features, Lambda):\n \"\"\"returns the cost and gradient for the\n \"\"\"\n\n # Unfold the U and W matrices from params\n X = np.array(params[:num_movies*num_features]).reshape(num_features, num_movies).T.copy()\n Theta = np.array(params[num_movies*num_features:]).reshape(num_features, num_users).T.copy()\n\n\n # You need to return the following values correctly\n J = 0\n X_grad = np.zeros(X.shape)\n Theta_grad = np.zeros(Theta.shape)\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the cost function and gradient for collaborative\n # filtering. Concretely, you should first implement the cost\n # function (without regularization) and make sure it is\n # matches our costs. After that, you should implement the\n # gradient and use the checkCostFunction routine to check\n # that the gradient is correct. Finally, you should implement\n # regularization.\n #\n # Notes: X - num_movies x num_features matrix of movie features\n # Theta - num_users x num_features matrix of user features\n # Y - num_movies x num_users matrix of user ratings of movies\n # R - num_movies x num_users matrix, where R(i, j) = 1 if the\n # i-th movie was rated by the j-th user\n #\n # You should set the following variables correctly:\n #\n # X_grad - num_movies x num_features matrix, containing the\n # partial derivatives w.r.t. to each element of X\n # Theta_grad - num_users x num_features matrix, containing the\n # partial derivatives w.r.t. to each element of Theta\n J = np.sum(np.square(X.dot(Theta.T) - Y) * R) / float(2) + \\\n np.sum(np.square(Theta)) * Lambda / float(2) + \\\n np.sum(np.square(X)) * Lambda / float(2)\n X_grad = ((X.dot(Theta.T) - Y) * R).dot(Theta) + Lambda * X\n Theta_grad = ((X.dot(Theta.T) - Y) * R).T.dot(X) + Lambda * Theta\n # =============================================================\n\n grad = np.hstack((X_grad.T.flatten(),Theta_grad.T.flatten()))\n\n return J, grad\n",
"import numpy as np\n\nfrom trainLinearReg import trainLinearReg\nfrom linearRegCostFunction import linearRegCostFunction\n\ndef validationCurve(X, y, Xval, yval):\n \"\"\"returns the train\n and validation errors (in error_train, error_val)\n for different values of lambda. You are given the training set (X,\n y) and validation set (Xval, yval).\n \"\"\"\n\n# Selected values of lambda (you should not change this)\n lambda_vec = np.array([0, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10])\n\n# You need to return these variables correctly.\n error_train = np.zeros(lambda_vec.size)\n error_val = np.zeros(lambda_vec.size)\n\n# ====================== YOUR CODE HERE ======================\n# Instructions: Fill in this function to return training errors in \n# error_train and the validation errors in error_val. The \n# vector lambda_vec contains the different lambda parameters \n# to use for each calculation of the errors, i.e, \n# error_train(i), and error_val(i) should give \n# you the errors obtained after training with \n# lambda = lambda_vec(i)\n#\n# Note: You can loop over lambda_vec with the following:\n#\n# for i = 1:length(lambda_vec)\n# lambda = lambda_vec(i)\n# # Compute train / val errors when training linear \n# # regression with regularization parameter lambda\n# # You should store the result in error_train(i)\n# # and error_val(i)\n# ....\n# \n# end\n#\n#\n for i in xrange(1, lambda_vec.size + 1):\n Lambda = lambda_vec[i - 1]\n theta = trainLinearReg(X, y, Lambda)\n error_train[i - 1] = linearRegCostFunction(X, y, theta, 0)[0]\n error_val[i - 1] = linearRegCostFunction(Xval, yval, theta, 0)[0]\n\n\n# =========================================================================\n\n return lambda_vec, error_train, error_val",
"import numpy as np\n\n\ndef emailFeatures(word_indices):\n \"\"\"takes in a word_indices vector and\n produces a feature vector from the word indices.\n \"\"\"\n\n# Total number of words in the dictionary\n n = 1899\n\n# You need to return the following variables correctly.\n x = np.zeros(n)\n# ====================== YOUR CODE HERE ======================\n# Instructions: Fill in this function to return a feature vector for the\n# given email (word_indices). To help make it easier to \n# process the emails, we have have already pre-processed each\n# email and converted each word in the email into an index in\n# a fixed dictionary (of 1899 words). The variable\n# word_indices contains the list of indices of the words\n# which occur in one email.\n# \n# Concretely, if an email has the text:\n#\n# The quick brown fox jumped over the lazy dog.\n#\n# Then, the word_indices vector for this text might look \n# like:\n# \n# 60 100 33 44 10 53 60 58 5\n#\n# where, we have mapped each word onto a number, for example:\n#\n# the -- 60\n# quick -- 100\n# ...\n#\n# (note: the above numbers are just an example and are not the\n# actual mappings).\n#\n# Your task is take one such word_indices vector and construct\n# a binary feature vector that indicates whether a particular\n# word occurs in the email. That is, x(i) = 1 when word i\n# is present in the email. Concretely, if the word 'the' (say,\n# index 60) appears in the email, then x(60) = 1. The feature\n# vector should look like:\n#\n# x = [ 0 0 0 0 1 0 0 0 ... 0 0 0 0 1 ... 0 0 0 1 0 ..]\n#\n#\n\n for idx in word_indices:\n \t# assign 1 to index idx in x\n \tx[int(idx)] = 1\n\n# =========================================================================\n\n return x",
"import math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef displayData(X, example_width=None):\n#DISPLAYDATA Display 2D data in a nice grid\n# [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data\n# stored in X in a nice grid. It returns the figure handle h and the \n# displayed array if requested.\n\n\t# closes previously opened figure. preventing a\n\t# warning after opening too many figures\n\tplt.close()\n\n\t# creates new figure \n\tplt.figure()\n\n # turns 1D X array into 2D\n\tif X.ndim == 1:\n\t\tX = np.reshape(X, (-1,X.shape[0]))\n\n\t# Set example_width automatically if not passed in\n\tif not example_width or not 'example_width' in locals():\n\t\texample_width = int(round(math.sqrt(X.shape[1])))\n\n\t# Gray Image\n\tplt.set_cmap(\"gray\")\n\n\t# Compute rows, cols\n\tm, n = X.shape\n\texample_height = n / example_width\n\n\t# Compute number of items to display\n\tdisplay_rows = int(math.floor(math.sqrt(m)))\n\tdisplay_cols = int(math.ceil(m / display_rows))\n\n\t# Between images padding\n\tpad = 1\n\n\t# Setup blank display\n\tdisplay_array = -np.ones((pad + display_rows * (example_height + pad), pad + display_cols * (example_width + pad)))\n\n\t# Copy each example into a patch on the display array\n\tcurr_ex = 1\n\tfor j in xrange(1,display_rows+1):\n\t\tfor i in xrange (1,display_cols+1):\n\t\t\tif curr_ex > m:\n\t\t\t\tbreak\n\t\t\n\t\t\t# Copy the patch\n\t\t\t\n\t\t\t# Get the max value of the patch to normalize all examples\n\t\t\tmax_val = max(abs(X[curr_ex-1, :]))\n\t\t\trows = pad + (j - 1) * (example_height + pad) + np.array(range(example_height))\n\t\t\tcols = pad + (i - 1) * (example_width + pad) + np.array(range(example_width ))\n\n\t\t\t# Basic (vs. advanced) indexing/slicing is necessary so that we look can assign\n\t\t\t# \tvalues directly to display_array and not to a copy of its subarray.\n\t\t\t# \tfrom stackoverflow.com/a/7960811/583834 and \n\t\t\t# \tbytes.com/topic/python/answers/759181-help-slicing-replacing-matrix-sections\n\t\t\t# Also notice the order=\"F\" parameter on the reshape call - this is because python's \n\t\t\t#\tdefault reshape function uses \"C-like index order, with the last axis index \n\t\t\t#\tchanging fastest, back to the first axis index changing slowest\" i.e. \n\t\t\t#\tit first fills out the first row/the first index, then the second row, etc. \n\t\t\t#\tmatlab uses \"Fortran-like index order, with the first index changing fastest, \n\t\t\t#\tand the last index changing slowest\" i.e. it first fills out the first column, \n\t\t\t#\tthen the second column, etc. This latter behaviour is what we want.\n\t\t\t#\tAlternatively, we can keep the deault order=\"C\" and then transpose the result\n\t\t\t#\tfrom the reshape call.\n\t\t\tdisplay_array[rows[0]:rows[-1]+1 , cols[0]:cols[-1]+1] = np.reshape(X[curr_ex-1, :], (example_height, example_width), order=\"F\") / max_val\n\t\t\tcurr_ex += 1\n\t\n\t\tif curr_ex > m:\n\t\t\tbreak\n\n\t# Display Image\n\th = plt.imshow(display_array, vmin=-1, vmax=1)\n\n\t# Do not show axis\n\tplt.axis('off')\n\n\tplt.show(block=False)\n\n\treturn h, display_array\n"
] | [
[
"numpy.sum",
"numpy.zeros",
"numpy.bincount"
],
[
"numpy.square",
"numpy.array",
"numpy.zeros"
],
[
"numpy.array",
"numpy.zeros"
],
[
"numpy.zeros"
],
[
"matplotlib.pyplot.imshow",
"numpy.reshape",
"matplotlib.pyplot.set_cmap",
"numpy.ones",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YaxinCui/TorchSSL | [
"5603d16dfcb62e558c298f999a613f6f9d2c49de"
] | [
"pimodel.py"
] | [
"# import needed library\nimport os\nimport logging\nimport random\nimport warnings\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nfrom utils import net_builder, get_logger, count_parameters, over_write_args_from_file\nfrom train_utils import TBLog, get_optimizer, get_cosine_schedule_with_warmup\nfrom models.pimodel.pimodel import PiModel\nfrom datasets.ssl_dataset import SSL_Dataset\nfrom datasets.data_utils import get_data_loader\n\n\ndef main(args):\n '''\n For (Distributed)DataParallelism,\n main(args) spawn each process (main_worker) to each GPU.\n '''\n\n save_path = os.path.join(args.save_dir, args.save_name)\n if os.path.exists(save_path) and not args.overwrite:\n raise Exception('already existing model: {}'.format(save_path))\n if args.resume:\n if args.load_path is None:\n raise Exception('Resume of training requires --load_path in the args')\n if os.path.abspath(save_path) == os.path.abspath(args.load_path) and not args.overwrite:\n raise Exception('Saving & Loading pathes are same. \\\n If you want over-write, give --overwrite in the argument.')\n\n if args.seed is not None:\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n # distributed: true if manually selected or if world_size > 1\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n ngpus_per_node = torch.cuda.device_count() # number of gpus of each node\n\n if args.multiprocessing_distributed:\n # now, args.world_size means num of total processes in all nodes\n args.world_size = ngpus_per_node * args.world_size\n\n # args=(,) means the arguments of main_worker\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n '''\n main_worker is conducted on each GPU.\n '''\n\n global best_acc1\n args.gpu = gpu\n\n # random seed has to be set for the syncronization of labeled data sampling in each process.\n assert args.seed is not None\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n cudnn.deterministic = True\n\n # SET UP FOR DISTRIBUTED TRAINING\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n args.rank = args.rank * ngpus_per_node + gpu # compute global rank\n\n # set distributed group:\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n\n # SET save_path and logger\n save_path = os.path.join(args.save_dir, args.save_name)\n logger_level = \"WARNING\"\n tb_log = None\n if args.rank % ngpus_per_node == 0:\n tb_log = TBLog(save_path, 'tensorboard', use_tensorboard=args.use_tensorboard)\n logger_level = \"INFO\"\n\n logger = get_logger(args.save_name, save_path, logger_level)\n logger.warning(f\"USE GPU: {args.gpu} for training\")\n\n args.bn_momentum = 1.0 - 0.999\n if 'imagenet' in args.dataset.lower():\n _net_builder = net_builder('ResNet50', False, None, is_remix=False)\n else:\n _net_builder = net_builder(args.net,\n args.net_from_name,\n {'first_stride': 2 if 'stl' in args.dataset else 1,\n 'depth': args.depth,\n 'widen_factor': args.widen_factor,\n 'leaky_slope': args.leaky_slope,\n 'bn_momentum': args.bn_momentum,\n 'dropRate': args.dropout,\n 'use_embed': False,\n 'is_remix': False},\n )\n\n model = PiModel(_net_builder,\n args.num_classes,\n args.ulb_loss_ratio,\n num_eval_iter=args.num_eval_iter,\n tb_log=tb_log,\n ema_m=args.ema_m,\n logger=logger)\n\n logger.info(f'Number of Trainable Params: {count_parameters(model.model)}')\n\n # SET Optimizer & LR Scheduler\n ## construct SGD and cosine lr scheduler\n optimizer = get_optimizer(model.model, args.optim, args.lr, args.momentum, args.weight_decay)\n scheduler = get_cosine_schedule_with_warmup(optimizer,\n args.num_train_iter,\n num_warmup_steps=args.num_train_iter * 0)\n ## set SGD and cosine lr\n model.set_optimizer(optimizer, scheduler)\n\n # SET Devices for (Distributed) DataParallel\n if not torch.cuda.is_available():\n raise Exception('ONLY GPU TRAINING IS SUPPORTED')\n elif args.distributed:\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n\n '''\n batch_size: batch_size per node -> batch_size per gpu\n workers: workers per node -> workers per gpu\n '''\n args.batch_size = int(args.batch_size / ngpus_per_node)\n model.model.cuda(args.gpu)\n model.model = torch.nn.parallel.DistributedDataParallel(model.model,\n device_ids=[args.gpu],\n broadcast_buffers=False,\n find_unused_parameters=True)\n\n else:\n # if arg.gpu is None, DDP will divide and allocate batch_size\n # to all available GPUs if device_ids are not set.\n model.cuda()\n model = torch.nn.parallel.DistributedDataParallel(model)\n\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.model = model.model.cuda(args.gpu)\n\n else:\n model.model = torch.nn.DataParallel(model.model).cuda()\n\n logger.info(f\"model_arch: {model}\")\n logger.info(f\"Arguments: {args}\")\n\n cudnn.benchmark = True\n\n # Construct Dataset & DataLoader\n train_dset = SSL_Dataset(args, alg='pimodel', name=args.dataset, train=True,\n num_classes=args.num_classes, data_dir=args.data_dir)\n lb_dset, ulb_dset = train_dset.get_ssl_dset(args.num_labels)\n\n _eval_dset = SSL_Dataset(args, alg='pimodel', name=args.dataset, train=False,\n num_classes=args.num_classes, data_dir=args.data_dir)\n eval_dset = _eval_dset.get_dset()\n\n loader_dict = {}\n dset_dict = {'train_lb': lb_dset, 'train_ulb': ulb_dset, 'eval': eval_dset}\n\n loader_dict['train_lb'] = get_data_loader(dset_dict['train_lb'],\n args.batch_size,\n data_sampler=args.train_sampler,\n num_iters=args.num_train_iter,\n num_workers=args.num_workers,\n distributed=args.distributed)\n\n loader_dict['train_ulb'] = get_data_loader(dset_dict['train_ulb'],\n args.batch_size * args.uratio,\n data_sampler=args.train_sampler,\n num_iters=args.num_train_iter,\n num_workers=4 * args.num_workers,\n distributed=args.distributed)\n\n loader_dict['eval'] = get_data_loader(dset_dict['eval'],\n args.eval_batch_size,\n num_workers=args.num_workers,\n drop_last=False)\n\n ## set DataLoader\n model.set_data_loader(loader_dict)\n\n # If args.resume, load checkpoints from args.load_path\n if args.resume:\n model.load_model(args.load_path)\n\n # START TRAINING\n trainer = model.train\n for epoch in range(args.epoch):\n trainer(args)\n\n if not args.multiprocessing_distributed or \\\n (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):\n model.save_model('latest_model.pth', save_path)\n\n logging.warning(f\"GPU {args.rank} training is FINISHED\")\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description='')\n\n '''\n Saving & loading of the model.\n '''\n parser.add_argument('--save_dir', type=str, default='./saved_models')\n parser.add_argument('--save_name', type=str, default='pimodel')\n parser.add_argument('--resume', type=str2bool, default=False)\n parser.add_argument('--load_path', type=str, default=None)\n parser.add_argument('--overwrite', type=str2bool, default=False)\n parser.add_argument('--use_tensorboard', action='store_true', help='Use tensorboard to plot and save curves, otherwise save the curves locally.')\n\n '''\n Training Configuration of PiModel\n '''\n\n parser.add_argument('--epoch', type=int, default=1)\n parser.add_argument('--num_train_iter', type=int, default=2 ** 20,\n help='total number of training iterations')\n parser.add_argument('--num_eval_iter', type=int, default=1000,\n help='evaluation frequency')\n parser.add_argument('--unsup_warmup_pos', type=float, default=0.4,\n help='Relative position at which constraint loss warmup ends.')\n parser.add_argument('--num_labels', type=int, default=4000)\n parser.add_argument('--batch_size', type=int, default=64,\n help='total number of batch size of labeled data')\n parser.add_argument('--uratio', type=int, default=1,\n help='the ratio of unlabeled data to labeld data in each mini-batch')\n parser.add_argument('--eval_batch_size', type=int, default=1024,\n help='batch size of evaluation data loader (it does not affect the accuracy)')\n\n parser.add_argument('--ema_m', type=float, default=0.999)\n parser.add_argument('--ulb_loss_ratio', type=float, default=10.0)\n '''\n Optimizer configurations\n '''\n parser.add_argument('--optim', type=str, default='SGD')\n parser.add_argument('--lr', type=float, default=3e-2)\n parser.add_argument('--momentum', type=float, default=0.9)\n parser.add_argument('--weight_decay', type=float, default=5e-4)\n parser.add_argument('--amp', type=str2bool, default=False, help='use mixed precision training or not')\n parser.add_argument('--clip', type=float, default=0)\n\n '''\n Backbone Net Configurations\n '''\n parser.add_argument('--net', type=str, default='WideResNet')\n parser.add_argument('--net_from_name', type=str2bool, default=False)\n parser.add_argument('--depth', type=int, default=28)\n parser.add_argument('--widen_factor', type=int, default=2)\n parser.add_argument('--leaky_slope', type=float, default=0.1)\n parser.add_argument('--dropout', type=float, default=0.0)\n\n '''\n Data Configurations\n '''\n\n parser.add_argument('--data_dir', type=str, default='./data')\n parser.add_argument('--dataset', type=str, default='cifar10')\n parser.add_argument('--train_sampler', type=str, default='RandomSampler')\n parser.add_argument('--num_classes', type=int, default=10)\n parser.add_argument('--num_workers', type=int, default=1)\n\n '''\n multi-GPUs & Distrbitued Training\n '''\n\n ## args for distributed training (from https://github.com/pytorch/examples/blob/master/imagenet/main.py)\n parser.add_argument('--world-size', default=1, type=int,\n help='number of nodes for distributed training')\n parser.add_argument('--rank', default=0, type=int,\n help='**node rank** for distributed training')\n parser.add_argument('--dist-url', default='tcp://127.0.0.1:10002', type=str,\n help='url used to set up distributed training')\n parser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\n parser.add_argument('--seed', default=0, type=int,\n help='seed for initializing training. ')\n parser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\n parser.add_argument('--multiprocessing-distributed', type=str2bool, default=True,\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n\n # config file\n parser.add_argument('--c', type=str, default='')\n\n args = parser.parse_args()\n over_write_args_from_file(args, args.c)\n main(args)\n"
] | [
[
"torch.distributed.init_process_group",
"torch.multiprocessing.spawn",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.nn.DataParallel",
"torch.cuda.is_available",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ioam/topographica | [
"1e097e2df9938a6ce9f48cefbf25672cbbf9a4db"
] | [
"topo/tests/unit/testplot.py"
] | [
"\"\"\"\nTest for the Plot class.\n\"\"\"\n\n\nimport unittest\nfrom topo.base.sheet import *\n#from testsheetview import ImageGenerator\n\nSHOW_PLOTS = False\n\n### JC: My new imports\nfrom topo.plotting.plot import make_template_plot\nimport numpy as np\n\nimport param\n\nfrom holoviews.core import BoundingBox, NdMapping\nfrom holoviews.interface.collector import AttrDict\nfrom holoviews import Image\n\n\n### This function is defined here, where it might be useful for testing\n### Plot\ndef matrix_hsv_to_rgb(hMapArray,sMapArray,vMapArray):\n \"\"\"\n First matrix sets the Hue (Color).\n Second marix sets the Sauration (How much color)\n Third matrix sets the Value (How bright the pixel will be)\n\n The three input matrices should all be the same size, and have\n been normalized to 1. There should be no side-effects on the\n original input matrices.\n \"\"\"\n\n shape = hMapArray.shape\n rmat = np.array(hMapArray,dtype=np.float)\n gmat = np.array(sMapArray,dtype=np.float)\n bmat = np.array(vMapArray,dtype=np.float)\n\n ## This code should never be seen. It means that calling code did\n ## not take the precaution of clipping the input matrices.\n if max(rmat.ravel()) > 1 or max(gmat.ravel()) > 1 or max(bmat.ravel()) > 1:\n param.Parameterized().warning('HSVBitmap inputs exceed 1. Clipping to 1.0')\n if max(rmat.ravel()) > 0: rmat = clip(rmat,0.0,1.0)\n if max(gmat.ravel()) > 0: gmat = clip(gmat,0.0,1.0)\n if max(bmat.ravel()) > 0: bmat = clip(bmat,0.0,1.0)\n\n # List comprehensions were not used because they were slower.\n for j in range(shape[0]):\n for i in range(shape[1]):\n rgb = hsv_to_rgb(rmat[j,i],gmat[j,i],bmat[j,i])\n rmat[j,i] = rgb[0]\n gmat[j,i] = rgb[1]\n bmat[j,i] = rgb[2]\n\n return (rmat, gmat, bmat)\n\n\n\n### JCALERT !he file should be rewritten according to new changes in Plot.\n\nclass TestPlot(unittest.TestCase):\n\n def setUp(self):\n ### Simple case: we only pass a dictionary to Plot()\n ### that does not belong to a Sheet:\n views = {}\n\n time = 0\n metadata = AttrDict(timestamp=time)\n\n ### SheetView1:\n ### Find a way to assign randomly the matrix.\n self.matrix1 = np.zeros((10,10),dtype=np.float) + np.random.random((10,10))\n self.bounds1 = BoundingBox(points=((-0.5,-0.5),(0.5,0.5)))\n im = Image(self.matrix1, self.bounds1)\n im.metadata=metadata\n self.sheet_view1 = NdMapping((None, im))\n self.sheet_view1.metadata = AttrDict(src_name='TestInputParam',\n precedence=0.1, row_precedence=0.1,\n cyclic_range=None, timestamp=time)\n self.key1 = 'IM1'\n views[self.key1] = self.sheet_view1\n\n ### SheetView2:\n ### Find a way to assign randomly the matrix.\n self.matrix2 = np.zeros((10,10),dtype=np.float) + 0.3\n self.bounds2 = BoundingBox(points=((-0.5,-0.5),(0.5,0.5)))\n im = Image(self.matrix2, self.bounds2)\n im.metadata=metadata\n self.sheet_view2 = NdMapping((None, im))\n self.sheet_view2.metadata = AttrDict(src_name='TestInputParam',\n precedence=0.2, row_precedence=0.2,\n cyclic_range=None, timestamp=time)\n self.key2 = 'IM2'\n views[self.key2] = self.sheet_view2\n\n ### SheetView3:\n ### Find a way to assign randomly the matrix.\n self.matrix3 = np.zeros((10,10),dtype=np.float) + np.random.random((10,10))\n self.bounds3 = BoundingBox(points=((-0.5,-0.5),(0.5,0.5)))\n im = Image(self.matrix3, self.bounds3)\n im.metadata=metadata\n self.sheet_view3 = NdMapping((None, im))\n self.sheet_view3.metadata = AttrDict(src_name='TestInputParam',\n precedence=0.3, row_precedence=0.3,\n cyclic_range=None, timestamp=time)\n self.key3 = 'IM3'\n views[self.key3] = self.sheet_view3\n\n ### SheetView4: for testing clipping + different bounding box\n ### Find a way to assign randomly the matrix.\n self.matrix4 = np.zeros((10,10),dtype=np.float) + 1.6\n self.bounds4 = BoundingBox(points=((-0.7,-0.7),(0.7,0.7)))\n im = Image(self.matrix4, self.bounds4)\n im.metadata=metadata\n self.sheet_view4 = NdMapping((None, im))\n self.sheet_view4.metadata = AttrDict(src_name='TestInputParam',\n precedence=0.4, row_precedence=0.4,\n cyclic_range=None, timestamp=time)\n self.key4 = 'IM4'\n views[self.key4] = self.sheet_view4\n\n self.view_dict = {'Strength': views, 'Hue': views, 'Confidence': views}\n\n ### JCALERT! for the moment we can only pass a triple when creating plot\n ### adding more sheetView to test when plot will be fixed for accepting\n ### as much as you want.\n\n # plot0: empty plot + no sheetviewdict passed: error or empty plot?\n ### JCALERT! It has to be fixed what to do in this case in plot..\n ### disabled test for the moment.\n #self.plot0 = Plot((None,None,None),None,name='plot0')\n ### CATCH EXCEPTION\n\n plot_channels1 = {'Strength':None,'Hue':None,'Confidence':None}\n # plot1: empty plot\n self.plot1 = make_template_plot(plot_channels1,self.view_dict,density=10.0,name='plot1')\n\n plot_channels2 = {'Strength':self.key1,'Hue':None,'Confidence':None}\n # plot2: sheetView 1, no normalize, no clipping\n self.plot2 = make_template_plot(plot_channels2,self.view_dict,density=10.0,name='plot2')\n\n plot_channels3 = {'Strength':self.key1,'Hue':self.key2,'Confidence':None}\n # plot3: sheetView 1+2, no normalize, no clipping\n self.plot3 = make_template_plot(plot_channels3,self.view_dict,density=10.0,name='plot3')\n\n plot_channels4 = {'Strength':self.key1,'Hue':self.key2,'Confidence':self.key3}\n # plot4: sheetView 1+2+3, no normalize , no clipping\n self.plot4 = make_template_plot(plot_channels4,self.view_dict,density=10.0,name='plot4')\n\n plot_channels5 = {'Strength':self.key1,'Hue':None,'Confidence':self.key3}\n # plot5: sheetView 1+3, no normalize, no clipping\n self.plot5 = make_template_plot(plot_channels5,self.view_dict,density=10.0,name='plot5')\n\n plot_channels6 = {'Strength':None,'Hue':self.key2,'Confidence':self.key3}\n # plot6: sheetView 2+3, no normalize , no clipping\n self.plot6 = make_template_plot(plot_channels6,self.view_dict,density=10.0,name='plot6')\n\n plot_channels7 = {'Strength':self.key4,'Hue':self.key2,'Confidence':self.key3}\n # plot7: sheetView 1+2+3, no normalize , clipping\n self.plot7 = make_template_plot(plot_channels7,self.view_dict,density=10.0,name='plot7')\n\n plot_channels8 = {'Strength':self.key1,'Hue':self.key2,'Confidence':self.key3}\n # plot8: sheetView 1+2+3, normalize , no clipping\n self.plot8 = make_template_plot(plot_channels8,self.view_dict,density=10.0,normalize=True,name='plot8')\n\n ### JCALERT! FOR THE MOMENT I TAKE THE DEFAULT FOR NORMALIZE.\n ### WE WILL SEE IF IT REMAINS IN PLOT FIRST.\n\n ### also makes a sheet to test realease_sheetviews\n\n self.sheet = Sheet()\n self.sheet.views.Maps[self.key1]=self.sheet_view1\n self.sheet.views.Maps[self.key2]=self.sheet_view2\n self.sheet.views.Maps[self.key3]=self.sheet_view3\n self.sheet.views.Maps[self.key4]=self.sheet_view4\n\n plot_channels9 = {'Strength':self.key1,'Hue':self.key2,'Confidence':self.key3}\n self.plot9 = make_template_plot(plot_channels9,self.sheet.views.Maps,density=10.0,name='plot9')\n\n\n\n\n def test_plot(self):\n pass\n\n# ### JCALERT! make a test for plot0\n\n# # plot 1\n# test = None\n# self.assertEqual(self.plot1,test)\n\n# # plot 2\n# sat = np.zeros((10,10),dtype=np.float)\n# hue = np.zeros((10,10),dtype=np.float)\n# val = self.matrix1\n\n# test = matrix_hsv_to_rgb(hue,sat,val)\n# for each1,each2 in zip(self.plot2.rgb_matrices,test):\n# for each3,each4 in zip(each1.ravel(),each2.ravel()):\n# self.assertAlmostEqual(each3,each4)\n\n# # plot 3\n# sat = ones((10,10),dtype=np.float)\n# hue = np.zeros((10,10),dtype=np.float) + 0.3\n# val = self.matrix1\n\n# test = matrix_hsv_to_rgb(hue,sat,val)\n# for each1,each2 in zip(self.plot3.rgb_matrices,test):\n# for each3,each4 in zip(each1.ravel(),each2.ravel()):\n# self.assertAlmostEqual(each3,each4)\n\n# # plot 4\n# sat = self.matrix3\n# hue = np.zeros((10,10),dtype=np.float) + 0.3\n# val = self.matrix1\n\n# test = matrix_hsv_to_rgb(hue,sat,val)\n# for each1,each2 in zip(self.plot4.rgb_matrices,test):\n# for each3,each4 in zip(each1.ravel(),each2.ravel()):\n# self.assertAlmostEqual(each3,each4)\n\n# # plot 5\n# sat = np.zeros((10,10),dtype=np.float)\n# hue = np.zeros((10,10),dtype=np.float)\n# val = self.matrix1\n\n# test = matrix_hsv_to_rgb(hue,sat,val)\n# for each1,each2 in zip(self.plot5.rgb_matrices,test):\n# for each3,each4 in zip(each1.ravel(),each2.ravel()):\n# self.assertAlmostEqual(each3,each4)\n\n# # plot 6\n# sat = self.matrix3\n# hue = np.zeros((10,10),dtype=np.float) + 0.3\n# val = ones((10,10),dtype=np.float)\n\n# test = matrix_hsv_to_rgb(hue,sat,val)\n# for each1,each2 in zip(self.plot6.rgb_matrices,test):\n# for each3,each4 in zip(each1.ravel(),each2.ravel()):\n# self.assertAlmostEqual(each3,each4)\n\n# # plot 7\n# sat = self.matrix3\n# hue = np.zeros((10,10),dtype=np.float) + 0.3\n# val = self.matrix4\n\n# val = MLab.clip(val,0.0,1.0)\n\n# test = matrix_hsv_to_rgb(hue,sat,val)\n# for each1,each2 in zip(self.plot7.rgb_matrices,test):\n# for each3,each4 in zip(each1.ravel(),each2.ravel()):\n# self.assertAlmostEqual(each3,each4)\n\n\n# # plot 8\n# sat = self.matrix3\n# hue = np.zeros((10,10),dtype=np.float) + 0.3\n# val = self.matrix1\n\n# val = divide(val,float(max(val.ravel())))\n\n# test = matrix_hsv_to_rgb(hue,sat,val)\n\n# for each1,each2 in zip(self.plot8.rgb_matrices,test):\n# for each3,each4 in zip(each1.ravel(),each2.ravel()):\n# self.assertAlmostEqual(each3,each4)\n\n\n\n\n# # plot 9\n# sat = self.matrix3\n# hue = np.zeros((10,10),dtype=np.float) + 0.3\n# val = self.matrix1\n\n# test = matrix_hsv_to_rgb(hue,sat,val)\n# for each1,each2 in zip(self.plot9.rgb_matrices,test):\n# for each3,each4 in zip(each1.ravel(),each2.ravel()):\n# self.assertAlmostEqual(each3,each4)\n\n# #### Think about doing a plot test using sheet_dict and a sheet?\n# ### Ask Jim if it is really necessary...\n\n# def test_release_sheetviews(self):\n\n# self.plot9.release_sheetviews()\n\n# test=self.sheet.sheet_views.get(self.key1,None)\n# self.assertEqual(test,None)\n# test=self.sheet.sheet_views.get(self.key2,None)\n# self.assertEqual(test,None)\n# test=self.sheet.sheet_views.get(self.key3,None)\n# self.assertEqual(test,None)\n# test=self.sheet.sheet_views.get(self.key4,None)\n# self.assertEqual(test,self.sheet_view4)\n\n\n# def test_matrix_hsv_to_rgb(self):\n# a = [j for i in range(256) for j in range(256)]\n# b = [i for i in range(256) for j in range(256)]\n# c = [max(i,j) for i in range(256) for j in range(256)]\n# a = Numeric.reshape(a,(256,256)) / 255.0\n# b = Numeric.reshape(b,(256,256)) / 255.0\n# c = Numeric.reshape(c,(256,256)) / 255.0\n# (h,s,v) = matrix_hsv_to_rgb(a,b,c)\n# rgb = RGBMap(h,s,v)\n# # rgb.show()\n\n# def test_matrix_hsv_to_rgb2(self):\n# h = Numeric.np.array([[0.0,0.0],[0.0,0.0]])\n# s = Numeric.np.array([[0.0,0.0],[0.0,0.0]])\n# v = Numeric.np.array([[0.5,0.5],[0.5,0.5]])\n# h_orig = Numeric.np.array(h)\n# s_orig = Numeric.np.array(s)\n# v_orig = Numeric.np.array(v)\n# r,g,b = matrix_hsv_to_rgb(h,s,v)\n# rgb_target = Numeric.np.array([[0.5,0.5],[0.5,0.5]])\n# self.assertEqual(h,h_orig)\n# self.assertEqual(s,s_orig)\n# self.assertEqual(v,v_orig)\n\n\n### JC: THIS CODE IS LEFT TEMPORARY IN CASE IT IS OF ANY USE IN NEAR FUTURE\n\n# x = plot.Plot(('Activity',None,None),plot.COLORMAP,self.s2)\n# for o in dir():\n# # pprint(o)\n# if isinstance(o,plot.Plot):\n# o.warning('Found ', o.name)\n\n# input = ImageGenerator(filename='topo/tests/testsheetview.ppm',\n# density=100,\n# bounds=BoundingBox(points=((-0.8,-0.8),(0.8,0.8))))\n# sv = input.sheet_view('Activity')\n\n# # Defined sheetview in the R channel\n# plot1 = plot.Plot((None,None,sv),plot.COLORMAP)\n# p_tuple = plot1.plot()\n# (r, g, b) = p_tuple.matrices\n# map = RGBMap(r,g,b)\n# if SHOW_PLOTS: map.show()\n\n\n# def test_HSV_plot(self):\n# input = ImageGenerator(filename='topo/tests/testsheetview.ppm',\n# density=100,\n# bounds=BoundingBox(points=((-0.8,-0.8),(0.8,0.8))))\n# sv = input.sheet_view('Activity')\n\n# # Defined sheetview in the R channel\n# plot1 = plot.Plot((sv,sv,sv),plot.HSV)\n# p_tuple = plot1.plot()\n# (r, g, b) = p_tuple.matrices\n# map = HSVMap(r,g,b)\n# if SHOW_PLOTS: map.show()\n\n# def test_plottemplate(self):\n# pt = plot.PlotTemplate()\n# pt = plot.PlotTemplate({'Strength' : None,\n# 'Hue' : 'HueP',\n# 'Confidence' : None})\n# pt = plot.PlotTemplate(channels={'Strength' : None,\n# 'Hue' : 'HueP',\n# 'Confidence' : None})\n\nif __name__ == \"__main__\":\n\timport nose\n\tnose.runmodule()\n\n"
] | [
[
"numpy.random.random",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
workprinond/Anti-Alignment | [
"12783fcc7f3c208007bf15d750a3659fd92506f0"
] | [
"anti_alignment/algo/graph_construction/search_tree.py"
] | [
"import numpy as np\nimport networkx as nx\n\n\nclass SearchTree:\n\n def __init__(self,net,i_m, f_m):\n self.net = net\n self.i_m = i_m\n self.f_m = f_m\n\n def compute_incidence_matrix(self,net):\n \"\"\"\n We compute the incidence matrix of a Petri Net. It provides us with the firing requirements of a transition and its\n outcome. The matrix has rows equals to the number of places and columns equals to the the number of transition.\n As a result, the columns represent the firing information for each transition.\n :return: Numpy Matrix representing the incidence matrix.\n \"\"\"\n incidence_matrix=np.zeros((len(self.net.places), len(self.net.transitions)))\n transitions=list(self.net.transitions)\n places=list(self.net.places)\n i=0\n while i<len(transitions):\n transition=transitions[i]\n for ingoing_arc in transition.in_arcs:\n #A transition consumes a token from its input places. Therefore, we have to subtract 1.\n incidence_matrix[places.index(ingoing_arc.source), i] -= ingoing_arc.weight\n for outgoing_arc in transition.out_arcs:\n #A transition produces one token for each of its \"destination\" places. Therefore, we have to add 1.\n incidence_matrix[places.index(outgoing_arc.target), i] +=outgoing_arc.weight\n i+=1\n return incidence_matrix\n\n def requirement_firing(self,net):\n \"\"\"\n Computes how many tokens in which place are needed for a transition to fire.\n :param net: PM4Py Petri Net object\n :return: dictionary, whereby the keys are the transition, the values are a np.array which only marks where tokens are consumed for the respective transtion.\n \"\"\"\n place_list=list(net.places)\n transition_dict={}\n for transition in net.transitions:\n temp=np.zeros(len(place_list))\n for arc in transition.in_arcs:\n temp[place_list.index(arc.source)]-=arc.weight\n transition_dict[transition]=temp\n return transition_dict\n\n def transition_firing_information(self,incidence_matrix, net):\n \"\"\"\n We transform the information that is available in the incidence in a more readable form. This means that we\n contruct a dictionary, whereby a key is the name of a\n :param incidence_matrix: Incidence Matrix of a Petri Net\n :param net: Petri Net\n :return: Dictionary\n \"\"\"\n firing_dict = {}\n i=0\n transitions=list(net.transitions)\n while i < len(transitions):\n firing_dict[transitions[i]] = incidence_matrix[: , i]\n i+=1\n return firing_dict\n\n def get_post_firing_marking(self,marking, firing_dict, requirement_dict):\n \"\"\"\n We compute all possible markings after all fireable transition have been fired.\n :param marking: current marking\n :param firing_dict: Dictionary which provides us with the firing infomation whereby the key is the transition\n :param requirement_dict: Dict, whereby keys are transitions, key np.array that marks from which places amount of tokens is needed to fire\n :return: List of tuples, whereby the first element is the new marking, the second element is the transition that\n was used to reach the new marking.\n \"\"\"\n firing_result = []\n for transition,requirement in requirement_dict.items():\n if all(np.greater_equal(marking, requirement.copy()*-1)):\n firing_result.append((marking+firing_dict[transition], transition))\n return firing_result\n \n def convert_marking(self, net, marking):\n \"\"\"\n Since we are working with numpy arrays as representation of marking, we need no transform the initial marking\n to such a representation\n :param marking: Marking which is returned from discovery algorithms of PM4Py\n :return: numpy array that represents the initial marking\n \"\"\"\n places=list(net.places)\n conv=np.zeros(len(places))\n i=0\n while i<len(places):\n if places[i] in marking:\n conv[i]=marking[places[i]]\n i+=1\n return conv\n\n def construct_reachability_graph(self,initial_marking, net, n):\n \"\"\"\n We construct a reachability graph. Important to note is that we only expand nodes/marking, which can be reached in\n at most n not-tau transitions.\n :param initial_marking: inital marking of Petri Net, already coverted into a np.array representation\n :param net: Petri Net Object\n :param n: How many not tau transitions are considered\n :return: Networkx Multi-Directed Graph object\n \"\"\"\n #compute incidence matrix\n incidence_matrix=self.compute_incidence_matrix(net)\n \n #compute requirment_dict\n requirement_dict=self.requirement_firing(net)\n\n #compute firing dict\n firing_dict=self.transition_firing_information(incidence_matrix,net)\n\n #output graph is a MulitDiGraph to represent self-loops and parallel edges\n graph=nx.MultiDiGraph()\n #the inital marking is node 0. We add the marking as data to the node\n j=0\n graph.add_node(j, marking=initial_marking, distance_from_source=0)\n\n #the reference table reveals the node-number a particular marking has\n reference_table={}\n reference_table[np.array2string(initial_marking)]=j\n\n #our set of nodes which has to be extended in the reachability graph\n work=set()\n work.add(j)\n j+=1\n while len(work)>0:\n #This set contains nodes that are during the computation of the graph faster reachable\n updated_nodes = set()\n #select a random marking and remove it from the work set\n mark=work.pop()\n #a list of marking that are reachble by firing transition which are currently available\n reachable_markings=self.get_post_firing_marking(graph.nodes[mark]['marking'], firing_dict, requirement_dict)\n for marking in reachable_markings:\n if np.array2string(marking[0]) not in reference_table:\n #the first element of marking represent the numpy representation of that marking, the second the\n # transition that was taken\n #If the marking is not in the graph yet, we add it\n reference_table[np.array2string(marking[0])]=j\n graph.add_node(j, marking=marking[0])\n if marking[1].label == None:\n #if the transition that fires, we need an edge of weight 0. This is done because we only care about\n # the projected run/trace at the end\n graph.add_edge(mark, j, transition=marking[1], weight=0)\n graph.nodes[j]['distance_from_source'] = graph.nodes[mark]['distance_from_source']\n else:\n #If a non-tau transition fires, we need a weight of 1\n graph.add_edge(mark,j, transition=marking[1], weight=1)\n graph.nodes[j]['distance_from_source'] = graph.nodes[mark]['distance_from_source']+1\n if graph.nodes[j]['distance_from_source']<=n:\n #only if the distance is not exceed, we add the node to our work set to expand it\n work.add(j)\n j+=1\n else:\n #The marking is in the graph. However, we have to add the edge.\n observerable_marking=reference_table[np.array2string(marking[0])]\n if marking[1].label == None:\n graph.add_edge(mark, observerable_marking, transition=marking[1], weight=0)\n else:\n graph.add_edge(mark, observerable_marking, transition=marking[1], weight=1)\n min=np.inf\n #since there might be multiple arcs beetween states, we want to get the minimum weight to have a minimum distance between these\n for arc in graph.get_edge_data(mark, observerable_marking):\n if graph.get_edge_data(mark, observerable_marking)[arc]['weight']<min:\n min=graph.get_edge_data(mark, observerable_marking)[arc]['weight']\n distance_from_source_over_m=graph.nodes[mark]['distance_from_source']+min\n if distance_from_source_over_m<graph.nodes[observerable_marking]['distance_from_source']:\n graph.nodes[observerable_marking]['distance_from_source']=distance_from_source_over_m\n updated_nodes.add((observerable_marking, graph.nodes[observerable_marking]['distance_from_source']-distance_from_source_over_m))\n for el in updated_nodes:\n #since we have an directed graph, bfs returns us every successor and even their sucessors. These\n #are the nodes which distance have to be updated due a shorter path\n edges = nx.bfs_edges(graph, el[0])\n for (v,w) in edges:\n min = np.inf\n #since there might be multiple arcs beetween states, we want to get the minimum weight to have a minimum distance between these\n for arc in graph.get_edge_data(v, w):\n if graph.get_edge_data(v, w)[arc]['weight'] < min:\n min = graph.get_edge_data(v, w)[arc]['weight']\n if graph.nodes[v]['distance_from_source']+min<graph.nodes[w]['distance_from_source']:\n #if there is shorter path than before, we have to chek if the sucessor had previously a distance greater than n\n if graph.nodes[w]['distance_from_source'] > n and graph.nodes[v]['distance_from_source']+min<n:\n # If the \"new\" distance after eding edges/nodes is smaller and the old distance exceed our limit,\n # we need to consider the node in the work set for expansion\n work.add(w)\n #We have to update the distance value due a shorter path\n graph.nodes[w]['distance_from_source'] =graph.nodes[v]['distance_from_source']+min\n return graph\n\n def annotate_distance_to_sink(self,graph,final_marking):\n \"\"\"\n Each node gets annotated with the the distance to the final marking. Distance means the number of non-tau\n transitions to reach the final marking\n :param graph: Reachability graph, networkx MultiDiGraph object\n :return: Networkx MultiDiGraph whereby each node gets an additional attribute 'distance_to_sink'\n \"\"\"\n for node in graph.nodes: \n if np.array_equal(graph.nodes[node]['marking'],final_marking):\n sink=node\n break\n for node in graph.nodes:\n #it is possible that the final marking cannot be reached from a node\n try:\n graph.nodes[node]['distance_to_sink'] = nx.shortest_path_length(graph, source=node, target=sink, weight='weight')\n except nx.NetworkXNoPath:\n #if the final marking cannot be reached, we remove the node from the reachbility graph\n graph.remove_node(node)\n return graph\n\n def apply(self,depth):#,net, i_m, f_m, \n \"\"\"\n Apply method. Use from outside to get an reachbility graph.\n The reachability graph consist of the following attributes:\n For nodes:\n marking: np.arrray representation of the marking\n distance_from_source: number of non-tau transitions to get to the marking of the node, starting from the initial\n distance_to_sink: number of non-tau transitions to get to the final marking\n For edges:\n transition: transition object of the given Petri Net that is fired\n weight: If a tau-tranisiton has fired, the weight is 0; else 1. Since we are interested in the projected trace,\n we need to distinguish.\n :param net: PM4Py Petri Net object\n :param i_m: initial marking of the Petri Net object\n :param f_m: final marking of the Petri Net object\n :param depth: Depth of the reachability graph, meaning, maximum of non-tau transitions to the final marking\n :return: 3-tupel, consisting of two nodes and a networkx MultiDigraph object. The first node contains the initial\n marking, the second node contains the final marking\n \"\"\"\n initial_marking = self.convert_marking(self.net,self.i_m)\n final_marking = self.convert_marking(self.net,self.f_m)\n reachability_graph =self.construct_reachability_graph(initial_marking, self.net, depth)\n reachability_graph = self.annotate_distance_to_sink(reachability_graph, final_marking)\n #Due the construction of the graph, we know that the initial marking is observerable in node 0\n inital_marking_node = 0\n for node in reachability_graph.nodes:\n if np.array_equal(reachability_graph.nodes[node]['marking'], final_marking):\n final_marking_node = node\n return (inital_marking_node, final_marking_node, reachability_graph)\n"
] | [
[
"numpy.array2string",
"numpy.array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jkulhanek/viewformer | [
"9ad2c5a2f7abe4b7ff490ced0132bf3d2f07e29c",
"9ad2c5a2f7abe4b7ff490ced0132bf3d2f07e29c"
] | [
"viewformer/evaluate/evaluate_transformer_multictx_allimg.py",
"viewformer/commands/generate_codes.py"
] | [
"from aparse import click, ConditionalType\nimport os\nimport tqdm\nimport json\nimport numpy as np\nfrom PIL import Image\nfrom typing import Optional\nfrom typing import List\nfrom viewformer.utils.tensorflow import load_model\nfrom viewformer.data.loaders import get_loaders\nimport tensorflow as tf\nfrom viewformer.evaluate.evaluate_transformer_multictx import MultiContextEvaluator, print_metrics, to_relative_cameras, resize_tf, normalize_cameras, from_relative_cameras\n\n\ndef transformer_predict(cameras, codes, *, transformer_model):\n if transformer_model.config.augment_poses == 'relative':\n # Rotate poses for relative augment\n cameras, transform = to_relative_cameras(cameras)\n cameras = normalize_cameras(cameras)\n\n # Generate image tokens\n with tf.name_scope('transformer_generate_images'):\n # Remove prediction information\n input_ids = tf.concat([codes[:, :-1], tf.fill(tf.shape(codes[:, :1]),\n tf.constant(transformer_model.mask_token, dtype=codes.dtype))], 1)\n context_cameras = tf.concat([cameras[:, :-1], tf.zeros_like(cameras[:, :1])], 1)\n\n # Task specific outputs\n image_generation_query_cameras = tf.tile(cameras[:, -1:], [1, tf.shape(cameras)[1], 1])\n localization_query_tokens = tf.tile(codes[:, -1:], [1, tf.shape(cameras)[1], 1, 1])\n\n # Generate outputs\n output = transformer_model(dict(input_ids=input_ids,\n poses=context_cameras,\n localization_tokens=localization_query_tokens,\n output_poses=image_generation_query_cameras), training=False)\n\n # Format output\n generated_codes = tf.argmax(output['logits'], -1)\n generated_cameras = transformer_model.reduce_cameras(output['pose_prediction'], -2)\n\n # Erase relative transform\n if transformer_model.config.augment_poses == 'relative':\n generated_cameras = from_relative_cameras(generated_cameras, transform)\n return generated_cameras, generated_codes\n\n\ndef run_with_batchsize(fn, batch_size, *args, **kwargs):\n total = len(args[0])\n splits = [min(batch_size, (total - i * batch_size)) for i in range((total + batch_size - 1) // batch_size)]\n outs = []\n for i, bs in enumerate(splits):\n largs = [x[i * batch_size: (i+1) * batch_size] for x in args]\n louts = fn(*largs, **kwargs)\n outs.append(louts)\n if isinstance(outs[0], tuple):\n return tuple(tf.concat([x[i] for x in outs], 0) for i in range(len(outs[0])))\n else:\n return tf.concat(outs, 0)\n\n\ndef encode_images(frames, *, codebook_model):\n with tf.name_scope('encode'):\n def encode(images):\n fimages = resize_tf(images, codebook_model.config.image_size)\n fimages = tf.image.convert_image_dtype(fimages, tf.float32)\n fimages = fimages * 2 - 1\n codes = codebook_model.encode(fimages)[-1] # [N, H', W']\n codes = tf.cast(codes, dtype=tf.int32)\n return codes\n\n # codes = tf.ragged.map_flat_values(encode, codes)\n batch_size, seq_len, *im_dim = tf.unstack(tf.shape(frames), 5)\n codes = encode(tf.reshape(frames, [batch_size * seq_len] + list(im_dim)))\n code_len = tf.shape(codes)[-1]\n codes = tf.reshape(codes, [batch_size, seq_len, code_len, code_len])\n return codes\n\n\ndef decode_code(generated_codes, *, codebook_model):\n with tf.name_scope('decode_images'):\n batch_size, seq_len, token_image_shape = tf.split(tf.shape(generated_codes), (1, 1, 2), 0)\n generated_images = codebook_model.decode_code(tf.reshape(generated_codes, tf.concat((batch_size * seq_len, token_image_shape), 0)))\n generated_images = tf.clip_by_value(generated_images, -1, 1)\n generated_images = tf.image.convert_image_dtype(generated_images / 2 + 0.5, tf.uint8)\n generated_images = tf.reshape(generated_images, tf.concat((batch_size, seq_len, tf.shape(generated_images)[-3:]), 0))\n return generated_images\n\n\n#\n# Types used in argument parsing\n#\ndef _loader_switch_cls(cls):\n class Loader(cls):\n # Disable arguments in loader classes\n def __init__(self, *args, image_size=None, shuffle=None, shuffle_sequence_items=None, shuffle_sequences=None, **kwargs):\n raise NotImplementedError()\n\n def __new__(_cls, *args, **kwargs):\n # Return callback to construct Loader on the Fly\n return lambda image_size: cls(*args, **kwargs, image_size=image_size, shuffle_sequences=False, shuffle_sequence_items=False)\n return Loader\n\n\nLoaderSwitch = ConditionalType('Loader', {k: _loader_switch_cls(v) for k, v in get_loaders().items()}, default='dataset')\n\n\[email protected]('evaluate-allimg')\ndef main(loader: LoaderSwitch,\n transformer_model: str,\n codebook_model: str,\n job_dir: str,\n context_views: List[int] = None,\n pose_multiplier: Optional[float] = None,\n image_size: Optional[int] = None):\n transformer_config = dict()\n if pose_multiplier is not None:\n transformer_config['pose_multiplier'] = pose_multiplier\n transformer_model = load_model(transformer_model, **transformer_config)\n codebook_model = load_model(codebook_model)\n loader = loader(codebook_model.config.image_size)\n n_context_views = len(context_views) if context_views is not None else (transformer_model.config.sequence_size - 1)\n evaluator = MultiContextEvaluator(n_context_views + 1, image_size=image_size)\n rng = np.random.default_rng(42)\n\n with tqdm.tqdm(total=len(loader)) as progress:\n for seq in loader:\n c_context_views = context_views\n if c_context_views is None:\n c_context_views = list(rng.choice(len(seq['frames']), (n_context_views,), replace=False))\n frames = np.array(seq['frames'])[np.newaxis, ...]\n cameras = np.stack(seq['cameras'])[np.newaxis, ...].astype('float32')\n frames, cameras = tf.convert_to_tensor(frames), tf.convert_to_tensor(cameras)\n codes = encode_images(frames, codebook_model=codebook_model)\n generated_cameras, generated_codes = [], []\n tcodes = np.concatenate([np.stack([codes[:, j] for j in c_context_views + [i]], 1) for i in range(len(seq['frames']))], 0)\n tcameras = np.concatenate([np.stack([cameras[:, j] for j in c_context_views + [i]], 1) for i in range(len(seq['frames']))], 0)\n generated_cameras, generated_codes = run_with_batchsize(transformer_predict, 128, tcameras, tcodes, transformer_model=transformer_model)\n\n # Decode images\n generated_images = run_with_batchsize(decode_code, 64, generated_codes, codebook_model=codebook_model)\n eval_frames = [x for x in range(len(generated_images)) if x not in c_context_views]\n evaluator.update_state(\n ground_truth_cameras=tf.stack([cameras[0, x] for x in eval_frames], 0),\n ground_truth_images=tf.stack([frames[0, x] for x in eval_frames], 0),\n generated_images=tf.stack([generated_images[x] for x in eval_frames], 0),\n generated_cameras=tf.stack([generated_cameras[x] for x in eval_frames], 0))\n for i in range(0, 1 + len(c_context_views)):\n os.makedirs(os.path.join(job_dir, 'gen_images', seq['sequence_id'], f'gen-{i:02}'), exist_ok=True)\n os.makedirs(os.path.join(job_dir, 'gen_images', seq['sequence_id'], 'gt'), exist_ok=True)\n os.makedirs(os.path.join(job_dir, 'gen_images', seq['sequence_id'], 'ctx'), exist_ok=True)\n for i, c in enumerate(c_context_views):\n Image.fromarray(frames[0, c].numpy()).save(os.path.join(job_dir, 'gen_images', seq['sequence_id'], 'ctx', f'{i:02}-{c:03}.png'))\n for i, c in enumerate(frames[0]):\n Image.fromarray(c.numpy()).save(os.path.join(job_dir, 'gen_images', seq['sequence_id'], 'gt', f'{i:03}.png'))\n for i, c in enumerate(generated_images):\n for j, d in enumerate(c):\n Image.fromarray(d.numpy()).save(os.path.join(job_dir, 'gen_images', seq['sequence_id'], f'gen-{j:02}', f'{i:03}.png'))\n progress.set_postfix(evaluator.get_progress_bar_info())\n progress.update()\n\n result = evaluator.result()\n with open(os.path.join(job_dir, 'results.json'), 'w+') as f:\n json.dump(result, f)\n print('Results:')\n print_metrics(result)\n\n\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python\nfrom aparse import click\nfrom typing import List\nfrom viewformer.utils import SplitIndices\nfrom viewformer.data import transform_dataset\n\n# Use memory growth for tf\ntry:\n import tensorflow as tf\n gpus = tf.config.list_physical_devices('GPU')\n if gpus:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\nexcept ImportError:\n pass\n\n\nclass LatentCodeTransformer:\n def _convert_image_type(self, image):\n if image.dtype == 'uint8':\n image = (image.astype('float32') / 255.) * 2. - 1.\n if image.shape[-1] == 3:\n image = image.transpose((0, 3, 1, 2))\n return image\n\n def update_dataset_info(self, dataset_info):\n dataset_info['token_image_size'] = self.image_size // self.model.config.stride\n self.dataset_info = dataset_info\n return dataset_info\n\n def __init__(self, model, batch_size: int = None, device=None):\n if device is not None:\n model = model.to(device)\n self.model = model\n self.image_size = model.config.image_size\n self.batch_size = batch_size if batch_size is not None else model.config.batch_size\n self.device = device\n\n def output_features(self, features):\n if features is not None and 'cameras-gqn' in features:\n return ['codes', 'cameras-gqn']\n else:\n return ['codes', 'cameras']\n\n def __call__(self, split, dataset):\n import torch\n import webdataset as wds\n\n with torch.no_grad():\n dataset = wds.filters.map_(dataset, lambda x: (torch.from_numpy(x['cameras']), torch.from_numpy(self._convert_image_type(x['frames'])), [len(x['frames'])] * len(x['frames'])))\n dataset = wds.filters.unbatched_(dataset)\n dataset = wds.filters.batched_(dataset, self.batch_size)\n\n past_cameras = None\n past_codes = None\n\n def update_cummulative_variable(past, value, sequence_sizes):\n sequence_sizes = list(sequence_sizes)\n output = []\n if past is not None:\n value = torch.cat([past, value], 0)\n sequence_sizes = ([sequence_sizes[0]] * len(past)) + sequence_sizes\n while len(sequence_sizes) > 0 and len(value) >= sequence_sizes[0]:\n output.append(value[:sequence_sizes[0]])\n value = value[sequence_sizes[0]:]\n sequence_sizes = sequence_sizes[sequence_sizes[0]:]\n past = value\n return past, output\n\n if hasattr(self.model, 'encode'):\n predict_step = lambda x: self.model.encode(x.to(self.device))[-1].detach().cpu()\n else:\n predict_step = lambda x: self.model(x.to(self.device))[-1].detach().cpu()\n for batch_id, (cameras, frames, sequence_sizes) in enumerate(dataset):\n codes = predict_step(frames)\n past_codes, codes = update_cummulative_variable(past_codes, codes, sequence_sizes)\n past_cameras, cameras = update_cummulative_variable(past_cameras, cameras, sequence_sizes)\n for cur_cameras, cur_codes in zip(cameras, codes):\n yield dict(cameras=cur_cameras, codes=cur_codes)\n\n\[email protected]('generate-codes')\ndef main(dataset: str, output: str, model: str,\n shards: SplitIndices = None,\n batch_size: int = None,\n splits: List[str] = None,\n profile_batch_id: int = None, use_gpu: bool = True):\n import torch\n from viewformer.utils.torch import load_model\n device = 'cpu' if not use_gpu or torch.cuda.device_count() == 0 else 'cuda'\n device = torch.device(device)\n model = load_model(model)\n transformer = LatentCodeTransformer(model, batch_size=batch_size, device=device)\n transform_dataset(dataset, output, transformer,\n splits=splits,\n shards=shards)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.clip_by_value",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.stack",
"tensorflow.reshape",
"tensorflow.cast",
"numpy.stack",
"tensorflow.zeros_like",
"tensorflow.name_scope",
"tensorflow.image.convert_image_dtype",
"tensorflow.argmax",
"numpy.array",
"numpy.random.default_rng"
],
[
"tensorflow.config.experimental.set_memory_growth",
"torch.cat",
"torch.from_numpy",
"torch.no_grad",
"tensorflow.config.list_physical_devices",
"torch.device",
"torch.cuda.device_count"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kdorichev/text2speech | [
"082ed9c222fa346f6c5ad6375477807df44ed45a",
"082ed9c222fa346f6c5ad6375477807df44ed45a"
] | [
"fastpitch/transformer_jit.py",
"waveglow/model.py"
] | [
"# Adapted from\n# https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/FastPitch\n\n# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom common.utils import mask_from_lens\nfrom common.text.symbols import pad_idx, symbols\n\n\nclass NoOp(nn.Module):\n def forward(self, x):\n return x\n\n\nclass PositionalEmbedding(nn.Module):\n def __init__(self, demb):\n super(PositionalEmbedding, self).__init__()\n self.demb = demb\n inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))\n self.register_buffer('inv_freq', inv_freq)\n\n def forward(self, pos_seq, bsz: Optional[int] = None):\n sinusoid_inp = torch.ger(pos_seq, self.inv_freq)\n pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1)\n if bsz is not None:\n return pos_emb[None, :, :].expand(bsz, -1, -1)\n else:\n return pos_emb[None, :, :]\n\n\nclass PositionwiseFF(nn.Module):\n def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):\n super(PositionwiseFF, self).__init__()\n\n self.d_model = d_model\n self.d_inner = d_inner\n self.dropout = dropout\n\n self.CoreNet = nn.Sequential(\n nn.Linear(d_model, d_inner), nn.ReLU(),\n nn.Dropout(dropout),\n nn.Linear(d_inner, d_model),\n nn.Dropout(dropout),\n )\n\n self.layer_norm = nn.LayerNorm(d_model)\n self.pre_lnorm = pre_lnorm\n\n def forward(self, inp):\n if self.pre_lnorm:\n # layer normalization + positionwise feed-forward\n core_out = self.CoreNet(self.layer_norm(inp))\n\n # residual connection\n output = core_out + inp\n else:\n # positionwise feed-forward\n core_out = self.CoreNet(inp)\n\n # residual connection + layer normalization\n output = self.layer_norm(inp + core_out)\n\n return output\n\n\nclass PositionwiseConvFF(nn.Module):\n def __init__(self, d_model, d_inner, kernel_size, dropout, pre_lnorm=False):\n super(PositionwiseConvFF, self).__init__()\n\n self.d_model = d_model\n self.d_inner = d_inner\n self.dropout = dropout\n\n self.CoreNet = nn.Sequential(\n nn.Conv1d(d_model, d_inner, kernel_size, 1, (kernel_size // 2)),\n nn.ReLU(),\n # nn.Dropout(dropout), # worse convergence\n nn.Conv1d(d_inner, d_model, kernel_size, 1, (kernel_size // 2)),\n nn.Dropout(dropout),\n )\n self.layer_norm = nn.LayerNorm(d_model)\n self.pre_lnorm = pre_lnorm\n\n def forward(self, inp):\n if self.pre_lnorm:\n # layer normalization + positionwise feed-forward\n core_out = inp.transpose(1, 2)\n core_out = self.CoreNet(self.layer_norm(core_out))\n core_out = core_out.transpose(1, 2)\n\n # residual connection\n output = core_out + inp\n else:\n # positionwise feed-forward\n core_out = inp.transpose(1, 2)\n core_out = self.CoreNet(core_out)\n core_out = core_out.transpose(1, 2)\n\n # residual connection + layer normalization\n output = self.layer_norm(inp + core_out)\n\n return output\n\n\nclass MultiHeadAttn(nn.Module):\n def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,\n pre_lnorm=False):\n super(MultiHeadAttn, self).__init__()\n\n self.n_head = n_head\n self.d_model = d_model\n self.d_head = d_head\n self.scale = 1 / (d_head ** 0.5)\n self.dropout = dropout\n self.pre_lnorm = pre_lnorm\n\n self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)\n self.drop = nn.Dropout(dropout)\n self.dropatt = nn.Dropout(dropatt)\n self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)\n self.layer_norm = nn.LayerNorm(d_model)\n\n\n def forward(self, inp, attn_mask: Optional[torch.Tensor] = None):\n residual = inp\n\n if self.pre_lnorm:\n # layer normalization\n inp = self.layer_norm(inp)\n\n n_head, d_head = self.n_head, self.d_head\n\n head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=-1)\n head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)\n head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)\n head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)\n\n q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)\n k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)\n v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)\n\n attn_score = torch.bmm(q, k.transpose(1, 2))\n attn_score.mul_(self.scale)\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(1)\n attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)\n attn_score.masked_fill_(attn_mask, -float('inf'))\n\n attn_prob = F.softmax(attn_score, dim=2)\n attn_prob = self.dropatt(attn_prob)\n attn_vec = torch.bmm(attn_prob, v)\n\n attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)\n attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(\n inp.size(0), inp.size(1), n_head * d_head)\n\n # linear projection\n attn_out = self.o_net(attn_vec)\n attn_out = self.drop(attn_out)\n\n if self.pre_lnorm:\n # residual connection\n output = residual + attn_out\n else:\n # residual connection + layer normalization\n\n # XXX Running TorchScript on 20.02 and 20.03 containers crashes here\n # XXX Works well with 20.01-py3 container.\n # XXX dirty fix is:\n # XXX output = self.layer_norm(residual + attn_out).half()\n output = self.layer_norm(residual + attn_out)\n\n return output\n\n # disabled; slower\n def forward_einsum(self, h, attn_mask=None):\n # multihead attention\n # [hlen x bsz x n_head x d_head]\n\n c = h\n\n if self.pre_lnorm:\n # layer normalization\n c = self.layer_norm(c)\n\n head_q = self.q_net(h)\n head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)\n\n head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)\n head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)\n head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)\n\n # [bsz x n_head x qlen x klen]\n # attn_score = torch.einsum('ibnd,jbnd->bnij', (head_q, head_k))\n attn_score = torch.einsum('bind,bjnd->bnij', (head_q, head_k))\n attn_score.mul_(self.scale)\n if attn_mask is not None and attn_mask.any().item():\n attn_score.masked_fill_(attn_mask[:, None, None, :], -float('inf'))\n\n # [bsz x qlen x klen x n_head]\n attn_prob = F.softmax(attn_score, dim=3)\n attn_prob = self.dropatt(attn_prob)\n\n # [bsz x n_head x qlen x klen] * [klen x bsz x n_head x d_head] \n # -> [qlen x bsz x n_head x d_head]\n attn_vec = torch.einsum('bnij,bjnd->bind', (attn_prob, head_v))\n attn_vec = attn_vec.contiguous().view(\n attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)\n\n # linear projection\n attn_out = self.o_net(attn_vec)\n attn_out = self.drop(attn_out)\n\n if self.pre_lnorm:\n # residual connection\n output = h + attn_out\n else:\n # residual connection + layer normalization\n output = self.layer_norm(h + attn_out)\n\n return output\n\n\nclass TransformerLayer(nn.Module):\n def __init__(self, n_head, d_model, d_head, d_inner, kernel_size, dropout,\n **kwargs):\n super(TransformerLayer, self).__init__()\n\n self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)\n self.pos_ff = PositionwiseConvFF(d_model, d_inner, kernel_size, dropout,\n pre_lnorm=kwargs.get('pre_lnorm'))\n\n def forward(self, dec_inp, mask):\n output = self.dec_attn(dec_inp, attn_mask=~mask.squeeze(2))\n output *= mask\n output = self.pos_ff(output)\n output *= mask\n return output\n\n\nclass FFTransformer(nn.Module):\n pad_idx = 0 # XXX\n\n def __init__(self, n_layer, n_head, d_model, d_head, d_inner, kernel_size,\n dropout, dropatt, dropemb=0.0, embed_input=True, d_embed=None,\n pre_lnorm=False):\n super(FFTransformer, self).__init__()\n self.d_model = d_model\n self.n_head = n_head\n self.d_head = d_head\n\n self.embed_input = embed_input\n if embed_input:\n self.word_emb = nn.Embedding(len(symbols), d_embed or d_model,\n padding_idx=FFTransformer.pad_idx)\n else:\n self.word_emb = NoOp()\n\n self.pos_emb = PositionalEmbedding(self.d_model)\n self.drop = nn.Dropout(dropemb)\n self.layers = nn.ModuleList()\n\n for _ in range(n_layer):\n self.layers.append(\n TransformerLayer(\n n_head, d_model, d_head, d_inner, kernel_size, dropout,\n dropatt=dropatt, pre_lnorm=pre_lnorm)\n )\n\n def forward(self, dec_inp, seq_lens: Optional[torch.Tensor] = None):\n if self.embed_input:\n inp = self.word_emb(dec_inp)\n # [bsz x L x 1]\n # mask = (dec_inp != FFTransformer.pad_idx).unsqueeze(2)\n mask = (dec_inp != 0).unsqueeze(2)\n else:\n inp = dec_inp\n assert seq_lens is not None\n mask = mask_from_lens(seq_lens).unsqueeze(2)\n pos_seq = torch.arange(inp.size(1), device=inp.device, dtype=inp.dtype)\n pos_emb = self.pos_emb(pos_seq) * mask\n out = self.drop(inp + pos_emb)\n\n for layer in self.layers:\n out = layer(out, mask=mask)\n\n # out = self.drop(out)\n return out, mask\n",
"# Adapted from\n# https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/FastPitch\n\n# *****************************************************************************\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the NVIDIA CORPORATION nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# *****************************************************************************\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\[email protected]\ndef fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):\n n_channels_int = n_channels[0]\n in_act = input_a + input_b\n t_act = torch.tanh(in_act[:, :n_channels_int, :])\n s_act = torch.sigmoid(in_act[:, n_channels_int:, :])\n acts = t_act * s_act\n return acts\n\n\nclass Invertible1x1Conv(torch.nn.Module):\n \"\"\"\n The layer outputs both the convolution, and the log determinant\n of its weight matrix. If reverse=True it does convolution with\n inverse\n \"\"\"\n\n def __init__(self, c):\n super(Invertible1x1Conv, self).__init__()\n self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,\n bias=False)\n\n # Sample a random orthonormal matrix to initialize weights\n W = torch.qr(torch.FloatTensor(c, c).normal_())[0]\n\n # Ensure determinant is 1.0 not -1.0\n if torch.det(W) < 0:\n W[:, 0] = -1 * W[:, 0]\n W = W.view(c, c, 1)\n self.conv.weight.data = W\n\n def forward(self, z):\n # shape\n batch_size, group_size, n_of_groups = z.size()\n\n W = self.conv.weight.squeeze()\n\n # Forward computation\n log_det_W = batch_size * n_of_groups * torch.logdet(W.unsqueeze(0).float()).squeeze()\n z = self.conv(z)\n return z, log_det_W\n\n\n def infer(self, z):\n # shape\n batch_size, group_size, n_of_groups = z.size()\n\n W = self.conv.weight.squeeze()\n\n if not hasattr(self, 'W_inverse'):\n # Reverse computation\n W_inverse = W.float().inverse()\n W_inverse = Variable(W_inverse[..., None])\n if z.type() == 'torch.cuda.HalfTensor' or z.type() == 'torch.HalfTensor':\n W_inverse = W_inverse.half()\n self.W_inverse = W_inverse\n z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)\n return z\n\n\nclass WN(torch.nn.Module):\n \"\"\"\n This is the WaveNet like layer for the affine coupling. The primary\n difference from WaveNet is the convolutions need not be causal. There is\n also no dilation size reset. The dilation only doubles on each layer\n \"\"\"\n\n def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels,\n kernel_size):\n super(WN, self).__init__()\n assert(kernel_size % 2 == 1)\n assert(n_channels % 2 == 0)\n self.n_layers = n_layers\n self.n_channels = n_channels\n self.in_layers = torch.nn.ModuleList()\n self.res_skip_layers = torch.nn.ModuleList()\n self.cond_layers = torch.nn.ModuleList()\n\n start = torch.nn.Conv1d(n_in_channels, n_channels, 1)\n start = torch.nn.utils.weight_norm(start, name='weight')\n self.start = start\n\n # Initializing last layer to 0 makes the affine coupling layers\n # do nothing at first. This helps with training stability\n end = torch.nn.Conv1d(n_channels, 2 * n_in_channels, 1)\n end.weight.data.zero_()\n end.bias.data.zero_()\n self.end = end\n\n for i in range(n_layers):\n dilation = 2 ** i\n padding = int((kernel_size * dilation - dilation) / 2)\n in_layer = torch.nn.Conv1d(n_channels, 2 * n_channels, kernel_size,\n dilation=dilation, padding=padding)\n in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')\n self.in_layers.append(in_layer)\n\n cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels, 1)\n cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')\n self.cond_layers.append(cond_layer)\n\n # last one is not necessary\n if i < n_layers - 1:\n res_skip_channels = 2 * n_channels\n else:\n res_skip_channels = n_channels\n res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)\n res_skip_layer = torch.nn.utils.weight_norm(\n res_skip_layer, name='weight')\n self.res_skip_layers.append(res_skip_layer)\n\n def forward(self, forward_input):\n audio, spect = forward_input\n audio = self.start(audio)\n\n for i in range(self.n_layers):\n acts = fused_add_tanh_sigmoid_multiply(\n self.in_layers[i](audio),\n self.cond_layers[i](spect),\n torch.IntTensor([self.n_channels]))\n\n res_skip_acts = self.res_skip_layers[i](acts)\n if i < self.n_layers - 1:\n audio = res_skip_acts[:, :self.n_channels, :] + audio\n skip_acts = res_skip_acts[:, self.n_channels:, :]\n else:\n skip_acts = res_skip_acts\n\n if i == 0:\n output = skip_acts\n else:\n output = skip_acts + output\n return self.end(output)\n\n\nclass WaveGlow(torch.nn.Module):\n def __init__(self, n_mel_channels: int, n_flows: int, n_group: int, n_early_every: int,\n n_early_size: int, WN_config):\n \"\"\"Initialize WaveGlow object.\n\n Args:\n n_mel_channels (int): Number of bins in mel-spectrograms\n n_flows (int): Number of steps of flow\n n_group (int): Number of samples in a group processed by the steps of flow\n n_early_every (int): Determines how often (i.e., after how many coupling layers)\n a number of channels (defined by --early-size parameter) are output\n to the loss function\n n_early_size (int): Number of channels output to the loss function\n WN_config ([type]): [description]\n \"\"\"\n\n super(WaveGlow, self).__init__()\n\n self.upsample = torch.nn.ConvTranspose1d(n_mel_channels,\n n_mel_channels,\n 1024, stride=256)\n assert(n_group % 2 == 0)\n self.n_flows = n_flows\n self.n_group = n_group\n self.n_early_every = n_early_every\n self.n_early_size = n_early_size\n self.WN = torch.nn.ModuleList()\n self.convinv = torch.nn.ModuleList()\n\n n_half = int(n_group / 2)\n\n # Set up layers with the right sizes based on how many dimensions\n # have been output already\n n_remaining_channels = n_group\n for k in range(n_flows):\n if k % self.n_early_every == 0 and k > 0:\n n_half = n_half - int(self.n_early_size / 2)\n n_remaining_channels = n_remaining_channels - self.n_early_size\n self.convinv.append(Invertible1x1Conv(n_remaining_channels))\n self.WN.append(WN(n_half, n_mel_channels * n_group, **WN_config))\n self.n_remaining_channels = n_remaining_channels\n\n def forward(self, forward_input):\n \"\"\"\n forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames\n forward_input[1] = audio: batch x time\n \"\"\"\n spect, audio = forward_input\n\n # Upsample spectrogram to size of audio\n spect = self.upsample(spect)\n assert(spect.size(2) >= audio.size(1))\n if spect.size(2) > audio.size(1):\n spect = spect[:, :, :audio.size(1)]\n\n spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)\n spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)\n spect = spect.permute(0, 2, 1)\n\n audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)\n output_audio = []\n log_s_list = []\n log_det_W_list = []\n\n for k in range(self.n_flows):\n if k % self.n_early_every == 0 and k > 0:\n output_audio.append(audio[:, :self.n_early_size, :])\n audio = audio[:, self.n_early_size:, :]\n\n audio, log_det_W = self.convinv[k](audio)\n log_det_W_list.append(log_det_W)\n\n n_half = int(audio.size(1) / 2)\n audio_0 = audio[:, :n_half, :]\n audio_1 = audio[:, n_half:, :]\n\n output = self.WN[k]((audio_0, spect))\n log_s = output[:, n_half:, :]\n b = output[:, :n_half, :]\n audio_1 = torch.exp(log_s) * audio_1 + b\n log_s_list.append(log_s)\n\n audio = torch.cat([audio_0, audio_1], 1)\n\n output_audio.append(audio)\n return torch.cat(output_audio, 1), log_s_list, log_det_W_list\n\n def infer(self, spect, sigma=1.0):\n \"\"\"[summary]\n\n Args:\n spect ([type]): [description]\n sigma (float, optional): [description]. Defaults to 1.0.\n\n Returns:\n [type]: [description]\n \"\"\"\n\n spect = self.upsample(spect)\n # trim conv artifacts. maybe pad spec to kernel multiple\n time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]\n spect = spect[:, :, :-time_cutoff]\n\n spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)\n spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)\n spect = spect.permute(0, 2, 1)\n\n audio = torch.randn(spect.size(0),\n self.n_remaining_channels,\n spect.size(2), device=spect.device).to(spect.dtype)\n\n audio = torch.autograd.Variable(sigma * audio)\n\n for k in reversed(range(self.n_flows)):\n n_half = int(audio.size(1) / 2)\n audio_0 = audio[:, :n_half, :]\n audio_1 = audio[:, n_half:, :]\n\n output = self.WN[k]((audio_0, spect))\n s = output[:, n_half:, :]\n b = output[:, :n_half, :]\n audio_1 = (audio_1 - b) / torch.exp(s)\n audio = torch.cat([audio_0, audio_1], 1)\n\n audio = self.convinv[k].infer(audio)\n\n if k % self.n_early_every == 0 and k > 0:\n z = torch.randn(spect.size(0), self.n_early_size, spect.size(\n 2), device=spect.device).to(spect.dtype)\n audio = torch.cat((sigma * z, audio), 1)\n\n audio = audio.permute(\n 0, 2, 1).contiguous().view(\n audio.size(0), -1).data\n return audio\n\n\n @staticmethod\n def remove_weightnorm(model):\n waveglow = model\n for WN in waveglow.WN:\n WN.start = torch.nn.utils.remove_weight_norm(WN.start)\n WN.in_layers = remove(WN.in_layers)\n WN.cond_layers = remove(WN.cond_layers)\n WN.res_skip_layers = remove(WN.res_skip_layers)\n return waveglow\n\n\ndef remove(conv_list):\n new_conv_list = torch.nn.ModuleList()\n for old_conv in conv_list:\n old_conv = torch.nn.utils.remove_weight_norm(old_conv)\n new_conv_list.append(old_conv)\n return new_conv_list\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.einsum",
"torch.nn.ModuleList",
"torch.arange",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.bmm",
"torch.nn.Conv1d",
"torch.nn.ReLU",
"torch.ger"
],
[
"torch.sigmoid",
"torch.cat",
"torch.nn.utils.weight_norm",
"torch.nn.ModuleList",
"torch.nn.functional.conv1d",
"torch.det",
"torch.IntTensor",
"torch.nn.utils.remove_weight_norm",
"torch.tanh",
"torch.exp",
"torch.FloatTensor",
"torch.nn.Conv1d",
"torch.nn.ConvTranspose1d",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
a-chumagin/great_expectations | [
"969dacbd4e0fce74ce515e5f1dcd0918785bcd21",
"969dacbd4e0fce74ce515e5f1dcd0918785bcd21"
] | [
"great_expectations/expectations/metrics/map_metric_provider.py",
"great_expectations/execution_engine/pandas_execution_engine.py"
] | [
"import logging\nfrom functools import wraps\nfrom typing import Any, Callable, Dict, List, Optional, Type, Union\n\nimport numpy as np\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.core import ExpectationConfiguration\nfrom great_expectations.core.util import convert_to_json_serializable\nfrom great_expectations.execution_engine import (\n ExecutionEngine,\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.execution_engine.execution_engine import (\n MetricDomainTypes,\n MetricFunctionTypes,\n MetricPartialFunctionTypes,\n)\nfrom great_expectations.execution_engine.sqlalchemy_execution_engine import (\n OperationalError,\n)\nfrom great_expectations.expectations.metrics import MetaMetricProvider\nfrom great_expectations.expectations.metrics.import_manager import F, Window, sa\nfrom great_expectations.expectations.metrics.metric_provider import (\n MetricProvider,\n metric_partial,\n)\nfrom great_expectations.expectations.metrics.util import Engine, Insert, Label, Select\nfrom great_expectations.expectations.registry import (\n get_metric_provider,\n register_metric,\n)\nfrom great_expectations.util import generate_temporary_table_name\nfrom great_expectations.validator.metric_configuration import MetricConfiguration\n\nlogger = logging.getLogger(__name__)\n\n\ndef column_function_partial(\n engine: Type[ExecutionEngine],\n partial_fn_type: str = None,\n **kwargs,\n):\n \"\"\"Provides engine-specific support for authoring a metric_fn with a simplified signature.\n\n A metric function that is decorated as a column_function_partial will be called with the engine-specific column type\n and any value_kwargs associated with the Metric for which the provider function is being declared.\n\n Args:\n engine:\n partial_fn_type:\n **kwargs:\n\n Returns:\n An annotated metric_function which will be called with a simplified signature.\n\n \"\"\"\n domain_type = MetricDomainTypes.COLUMN\n if issubclass(engine, PandasExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_SERIES\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type != MetricPartialFunctionTypes.MAP_SERIES:\n raise ValueError(\n \"PandasExecutionEngine only supports map_series for column_function_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n filter_column_isnull = kwargs.get(\n \"filter_column_isnull\", getattr(cls, \"filter_column_isnull\", False)\n )\n\n (\n df,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n if filter_column_isnull:\n df = df[df[column_name].notnull()]\n\n values = metric_fn(\n cls,\n df[column_name],\n **metric_value_kwargs,\n _metrics=metrics,\n )\n return values, compute_domain_kwargs, accessor_domain_kwargs\n\n return inner_func\n\n return wrapper\n\n elif issubclass(engine, SqlAlchemyExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_FN\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type not in [MetricPartialFunctionTypes.MAP_FN]:\n raise ValueError(\n \"SqlAlchemyExecutionEngine only supports map_fn for column_function_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n filter_column_isnull = kwargs.get(\n \"filter_column_isnull\", getattr(cls, \"filter_column_isnull\", False)\n )\n if filter_column_isnull:\n compute_domain_kwargs = execution_engine.add_column_row_condition(\n metric_domain_kwargs\n )\n else:\n # We do not copy here because if compute domain is different, it will be copied by get_compute_domain\n compute_domain_kwargs = metric_domain_kwargs\n (\n selectable,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=compute_domain_kwargs, domain_type=domain_type\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n dialect = execution_engine.dialect_module\n column_function = metric_fn(\n cls,\n sa.column(column_name),\n **metric_value_kwargs,\n _dialect=dialect,\n _table=selectable,\n _metrics=metrics,\n )\n return column_function, compute_domain_kwargs, accessor_domain_kwargs\n\n return inner_func\n\n return wrapper\n\n elif issubclass(engine, SparkDFExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_FN\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type not in [\n MetricPartialFunctionTypes.MAP_FN,\n MetricPartialFunctionTypes.WINDOW_FN,\n ]:\n raise ValueError(\n \"SparkDFExecutionEngine only supports map_fn and window_fn for column_function_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n filter_column_isnull = kwargs.get(\n \"filter_column_isnull\", getattr(cls, \"filter_column_isnull\", False)\n )\n\n if filter_column_isnull:\n compute_domain_kwargs = execution_engine.add_column_row_condition(\n metric_domain_kwargs\n )\n else:\n # We do not copy here because if compute domain is different, it will be copied by get_compute_domain\n compute_domain_kwargs = metric_domain_kwargs\n\n (\n data,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=compute_domain_kwargs, domain_type=domain_type\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n column = data[column_name]\n column_function = metric_fn(\n cls,\n column=column,\n **metric_value_kwargs,\n _metrics=metrics,\n _compute_domain_kwargs=compute_domain_kwargs,\n )\n return column_function, compute_domain_kwargs, accessor_domain_kwargs\n\n return inner_func\n\n return wrapper\n\n else:\n raise ValueError(\"Unsupported engine for column_function_partial\")\n\n\ndef column_condition_partial(\n engine: Type[ExecutionEngine],\n partial_fn_type: Optional[Union[str, MetricPartialFunctionTypes]] = None,\n **kwargs,\n):\n \"\"\"Provides engine-specific support for authoring a metric_fn with a simplified signature.\n\n A column_condition_partial must provide a map function that evaluates to a boolean value; it will be used to provide\n supplemental metrics, such as the unexpected_value count, unexpected_values, and unexpected_rows.\n\n A metric function that is decorated as a column_condition_partial will be called with the engine-specific column\n type and any value_kwargs associated with the Metric for which the provider function is being declared.\n\n\n\n Args:\n engine:\n partial_fn_type:\n **kwargs:\n\n Returns:\n An annotated metric_function which will be called with a simplified signature.\n\n \"\"\"\n domain_type = MetricDomainTypes.COLUMN\n if issubclass(engine, PandasExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_SERIES\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type not in [MetricPartialFunctionTypes.MAP_CONDITION_SERIES]:\n raise ValueError(\n \"PandasExecutionEngine only supports map_condition_series for column_condition_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n filter_column_isnull = kwargs.get(\n \"filter_column_isnull\", getattr(cls, \"filter_column_isnull\", True)\n )\n\n (\n df,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n if filter_column_isnull:\n df = df[df[column_name].notnull()]\n\n meets_expectation_series = metric_fn(\n cls,\n df[column_name],\n **metric_value_kwargs,\n _metrics=metrics,\n )\n return (\n ~meets_expectation_series,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n\n elif issubclass(engine, SqlAlchemyExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_FN\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type not in [\n MetricPartialFunctionTypes.MAP_CONDITION_FN,\n MetricPartialFunctionTypes.WINDOW_CONDITION_FN,\n ]:\n raise ValueError(\n \"SqlAlchemyExecutionEngine only supports map_condition_fn and window_condition_fn for column_condition_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n filter_column_isnull = kwargs.get(\n \"filter_column_isnull\", getattr(cls, \"filter_column_isnull\", True)\n )\n\n (\n selectable,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=domain_type\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n sqlalchemy_engine: Engine = execution_engine.engine\n\n dialect = execution_engine.dialect_module\n expected_condition = metric_fn(\n cls,\n sa.column(column_name),\n **metric_value_kwargs,\n _dialect=dialect,\n _table=selectable,\n _sqlalchemy_engine=sqlalchemy_engine,\n _metrics=metrics,\n )\n if filter_column_isnull:\n # If we \"filter\" (ignore) nulls then we allow null as part of our new expected condition\n unexpected_condition = sa.and_(\n sa.not_(sa.column(column_name).is_(None)),\n sa.not_(expected_condition),\n )\n else:\n unexpected_condition = sa.not_(expected_condition)\n return (\n unexpected_condition,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n\n elif issubclass(engine, SparkDFExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_FN\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type not in [\n MetricPartialFunctionTypes.MAP_CONDITION_FN,\n MetricPartialFunctionTypes.WINDOW_CONDITION_FN,\n ]:\n raise ValueError(\n \"SparkDFExecutionEngine only supports map_condition_fn and window_condition_fn for column_condition_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n filter_column_isnull = kwargs.get(\n \"filter_column_isnull\", getattr(cls, \"filter_column_isnull\", True)\n )\n\n (\n data,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n column = data[column_name]\n expected_condition = metric_fn(\n cls,\n column,\n **metric_value_kwargs,\n _table=data,\n _metrics=metrics,\n _compute_domain_kwargs=compute_domain_kwargs,\n _accessor_domain_kwargs=accessor_domain_kwargs,\n )\n if partial_fn_type == MetricPartialFunctionTypes.WINDOW_CONDITION_FN:\n if filter_column_isnull:\n compute_domain_kwargs = (\n execution_engine.add_column_row_condition(\n compute_domain_kwargs, column_name=column_name\n )\n )\n unexpected_condition = ~expected_condition\n else:\n if filter_column_isnull:\n unexpected_condition = column.isNotNull() & ~expected_condition\n else:\n unexpected_condition = ~expected_condition\n return (\n unexpected_condition,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n else:\n raise ValueError(\"Unsupported engine for column_condition_partial\")\n\n\ndef column_pair_function_partial(\n engine: Type[ExecutionEngine], partial_fn_type: str = None, **kwargs\n):\n \"\"\"Provides engine-specific support for authoring a metric_fn with a simplified signature.\n\n A metric function that is decorated as a column_pair_function_partial will be called with the engine-specific\n column_list type and any value_kwargs associated with the Metric for which the provider function is being declared.\n\n Args:\n engine:\n partial_fn_type:\n **kwargs:\n\n Returns:\n An annotated metric_function which will be called with a simplified signature.\n\n \"\"\"\n domain_type = MetricDomainTypes.COLUMN_PAIR\n if issubclass(engine, PandasExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_SERIES\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type != MetricPartialFunctionTypes.MAP_SERIES:\n raise ValueError(\n \"PandasExecutionEngine only supports map_series for column_pair_function_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n (\n df,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n # noinspection PyPep8Naming\n column_A_name = accessor_domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = accessor_domain_kwargs[\"column_B\"]\n\n column_list = [column_A_name, column_B_name]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n values = metric_fn(\n cls,\n df[column_A_name],\n df[column_B_name],\n **metric_value_kwargs,\n _metrics=metrics,\n )\n return values, compute_domain_kwargs, accessor_domain_kwargs\n\n return inner_func\n\n return wrapper\n\n elif issubclass(engine, SqlAlchemyExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_FN\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type != MetricPartialFunctionTypes.MAP_FN:\n raise ValueError(\n \"SqlAlchemyExecutionEngine only supports map_fn for column_pair_function_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n (\n selectable,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n # noinspection PyPep8Naming\n column_A_name = accessor_domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = accessor_domain_kwargs[\"column_B\"]\n\n column_list = [column_A_name, column_B_name]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n column_pair_function = metric_fn(\n cls,\n sa.column(column_A_name),\n sa.column(column_B_name),\n **metric_value_kwargs,\n _metrics=metrics,\n )\n return (\n column_pair_function,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n\n elif issubclass(engine, SparkDFExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_FN\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type != MetricPartialFunctionTypes.MAP_FN:\n raise ValueError(\n \"SparkDFExecutionEngine only supports map_fn for column_pair_function_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n (\n data,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n # noinspection PyPep8Naming\n column_A_name = accessor_domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = accessor_domain_kwargs[\"column_B\"]\n\n column_list = [column_A_name, column_B_name]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n column_pair_function = metric_fn(\n cls,\n data[column_A_name],\n data[column_B_name],\n **metric_value_kwargs,\n _metrics=metrics,\n )\n return (\n column_pair_function,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n\n else:\n raise ValueError(\"Unsupported engine for column_pair_function_partial\")\n\n\ndef column_pair_condition_partial(\n engine: Type[ExecutionEngine],\n partial_fn_type: Optional[Union[str, MetricPartialFunctionTypes]] = None,\n **kwargs,\n):\n \"\"\"Provides engine-specific support for authoring a metric_fn with a simplified signature. A\n column_pair_condition_partial must provide a map function that evaluates to a boolean value; it will be used to\n provide supplemental metrics, such as the unexpected_value count, unexpected_values, and unexpected_rows.\n\n A metric function that is decorated as a column_pair_condition_partial will be called with the engine-specific\n column_list type and any value_kwargs associated with the Metric for which the provider function is being declared.\n\n Args:\n engine:\n partial_fn_type:\n **kwargs:\n\n Returns:\n An annotated metric_function which will be called with a simplified signature.\n\n \"\"\"\n domain_type = MetricDomainTypes.COLUMN_PAIR\n if issubclass(engine, PandasExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_SERIES\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type not in [MetricPartialFunctionTypes.MAP_CONDITION_SERIES]:\n raise ValueError(\n \"PandasExecutionEngine only supports map_condition_series for column_pair_condition_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n (\n df,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n # noinspection PyPep8Naming\n column_A_name = accessor_domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = accessor_domain_kwargs[\"column_B\"]\n\n column_list = [column_A_name, column_B_name]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n meets_expectation_series = metric_fn(\n cls,\n df[column_A_name],\n df[column_B_name],\n **metric_value_kwargs,\n _metrics=metrics,\n )\n return (\n ~meets_expectation_series,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n\n elif issubclass(engine, SqlAlchemyExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_FN\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type not in [\n MetricPartialFunctionTypes.MAP_CONDITION_FN,\n MetricPartialFunctionTypes.WINDOW_CONDITION_FN,\n ]:\n raise ValueError(\n \"SqlAlchemyExecutionEngine only supports map_condition_fn and window_condition_fn for column_pair_condition_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n (\n selectable,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n # noinspection PyPep8Naming\n column_A_name = accessor_domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = accessor_domain_kwargs[\"column_B\"]\n\n column_list = [column_A_name, column_B_name]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n sqlalchemy_engine: Engine = execution_engine.engine\n\n dialect = execution_engine.dialect_module\n expected_condition = metric_fn(\n cls,\n sa.column(column_A_name),\n sa.column(column_B_name),\n **metric_value_kwargs,\n _dialect=dialect,\n _table=selectable,\n _sqlalchemy_engine=sqlalchemy_engine,\n _metrics=metrics,\n )\n\n unexpected_condition = sa.not_(expected_condition)\n return (\n unexpected_condition,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n\n elif issubclass(engine, SparkDFExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_FN\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type not in [\n MetricPartialFunctionTypes.MAP_CONDITION_FN,\n MetricPartialFunctionTypes.WINDOW_CONDITION_FN,\n ]:\n raise ValueError(\n \"SparkDFExecutionEngine only supports map_condition_fn and window_condition_fn for column_pair_condition_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n (\n data,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n # noinspection PyPep8Naming\n column_A_name = accessor_domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = accessor_domain_kwargs[\"column_B\"]\n\n column_list = [column_A_name, column_B_name]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n expected_condition = metric_fn(\n cls,\n data[column_A_name],\n data[column_B_name],\n **metric_value_kwargs,\n _metrics=metrics,\n )\n return (\n ~expected_condition,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n\n else:\n raise ValueError(\"Unsupported engine for column_pair_condition_partial\")\n\n\ndef multicolumn_function_partial(\n engine: Type[ExecutionEngine], partial_fn_type: str = None, **kwargs\n):\n \"\"\"Provides engine-specific support for authoring a metric_fn with a simplified signature.\n\n A metric function that is decorated as a multicolumn_function_partial will be called with the engine-specific\n column_list type and any value_kwargs associated with the Metric for which the provider function is being declared.\n\n Args:\n engine:\n partial_fn_type:\n **kwargs:\n\n Returns:\n An annotated metric_function which will be called with a simplified signature.\n\n \"\"\"\n domain_type = MetricDomainTypes.MULTICOLUMN\n if issubclass(engine, PandasExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_SERIES\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type != MetricPartialFunctionTypes.MAP_SERIES:\n raise ValueError(\n \"PandasExecutionEngine only supports map_series for multicolumn_function_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n (\n df,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n values = metric_fn(\n cls,\n df[column_list],\n **metric_value_kwargs,\n _metrics=metrics,\n )\n return values, compute_domain_kwargs, accessor_domain_kwargs\n\n return inner_func\n\n return wrapper\n\n elif issubclass(engine, SqlAlchemyExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_FN\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type != MetricPartialFunctionTypes.MAP_FN:\n raise ValueError(\n \"SqlAlchemyExecutionEngine only supports map_fn for multicolumn_function_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n (\n selectable,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n table_columns = metrics[\"table.columns\"]\n\n for column_name in column_list:\n if column_name not in table_columns:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n sqlalchemy_engine: Engine = execution_engine.engine\n\n column_selector = [\n sa.column(column_name) for column_name in column_list\n ]\n dialect = execution_engine.dialect_module\n multicolumn_function = metric_fn(\n cls,\n column_selector,\n **metric_value_kwargs,\n _column_names=column_list,\n _table_columns=table_columns,\n _dialect=dialect,\n _table=selectable,\n _sqlalchemy_engine=sqlalchemy_engine,\n _metrics=metrics,\n )\n return (\n multicolumn_function,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n\n elif issubclass(engine, SparkDFExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_FN\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type != MetricPartialFunctionTypes.MAP_FN:\n raise ValueError(\n \"SparkDFExecutionEngine only supports map_fn for multicolumn_function_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n (\n data,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n multicolumn_function = metric_fn(\n cls,\n data[column_list],\n **metric_value_kwargs,\n _metrics=metrics,\n )\n return (\n multicolumn_function,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n\n else:\n raise ValueError(\"Unsupported engine for multicolumn_function_partial\")\n\n\ndef multicolumn_condition_partial(\n engine: Type[ExecutionEngine],\n partial_fn_type: Optional[Union[str, MetricPartialFunctionTypes]] = None,\n **kwargs,\n):\n \"\"\"Provides engine-specific support for authoring a metric_fn with a simplified signature. A\n multicolumn_condition_partial must provide a map function that evaluates to a boolean value; it will be used to\n provide supplemental metrics, such as the unexpected_value count, unexpected_values, and unexpected_rows.\n\n A metric function that is decorated as a multicolumn_condition_partial will be called with the engine-specific\n column_list type and any value_kwargs associated with the Metric for which the provider function is being declared.\n\n Args:\n engine:\n partial_fn_type:\n **kwargs:\n\n Returns:\n An annotated metric_function which will be called with a simplified signature.\n\n \"\"\"\n domain_type = MetricDomainTypes.MULTICOLUMN\n if issubclass(engine, PandasExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_SERIES\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type not in [MetricPartialFunctionTypes.MAP_CONDITION_SERIES]:\n raise ValueError(\n \"PandasExecutionEngine only supports map_condition_series for multicolumn_condition_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n (\n df,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n meets_expectation_series = metric_fn(\n cls,\n df[column_list],\n **metric_value_kwargs,\n _metrics=metrics,\n )\n return (\n ~meets_expectation_series,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n\n elif issubclass(engine, SqlAlchemyExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_FN\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type not in [\n MetricPartialFunctionTypes.MAP_CONDITION_FN,\n MetricPartialFunctionTypes.WINDOW_CONDITION_FN,\n ]:\n raise ValueError(\n \"SqlAlchemyExecutionEngine only supports map_condition_fn and window_condition_fn for multicolumn_condition_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n (\n selectable,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n sqlalchemy_engine: Engine = execution_engine.engine\n\n column_selector = [\n sa.column(column_name) for column_name in column_list\n ]\n dialect = execution_engine.dialect_module\n expected_condition = metric_fn(\n cls,\n column_selector,\n **metric_value_kwargs,\n _dialect=dialect,\n _table=selectable,\n _sqlalchemy_engine=sqlalchemy_engine,\n _metrics=metrics,\n )\n\n unexpected_condition = sa.not_(expected_condition)\n return (\n unexpected_condition,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n\n elif issubclass(engine, SparkDFExecutionEngine):\n if partial_fn_type is None:\n partial_fn_type = MetricPartialFunctionTypes.MAP_CONDITION_FN\n partial_fn_type = MetricPartialFunctionTypes(partial_fn_type)\n if partial_fn_type not in [\n MetricPartialFunctionTypes.MAP_CONDITION_FN,\n MetricPartialFunctionTypes.WINDOW_CONDITION_FN,\n ]:\n raise ValueError(\n \"SparkDFExecutionEngine only supports map_condition_fn and window_condition_fn for multicolumn_condition_partial partial_fn_type\"\n )\n\n def wrapper(metric_fn: Callable):\n @metric_partial(\n engine=engine,\n partial_fn_type=partial_fn_type,\n domain_type=domain_type,\n **kwargs,\n )\n @wraps(metric_fn)\n def inner_func(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n runtime_configuration: Dict,\n ):\n (\n data,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = execution_engine.get_compute_domain(\n domain_kwargs=metric_domain_kwargs, domain_type=domain_type\n )\n\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n expected_condition = metric_fn(\n cls,\n data[column_list],\n **metric_value_kwargs,\n _metrics=metrics,\n )\n return (\n ~expected_condition,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n return inner_func\n\n return wrapper\n\n else:\n raise ValueError(\"Unsupported engine for multicolumn_condition_partial\")\n\n\ndef _pandas_map_condition_unexpected_count(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Returns unexpected count for MapExpectations\"\"\"\n return np.count_nonzero(metrics[\"unexpected_condition\"][0])\n\n\ndef _pandas_column_map_condition_values(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return values from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n (\n boolean_mapped_unexpected_values,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = metrics[\"unexpected_condition\"]\n df = execution_engine.get_domain_records(\n domain_kwargs=compute_domain_kwargs,\n )\n\n ###\n # NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we\n # currently handle filter_column_isnull differently than other map_fn / map_condition\n # cases.\n ###\n filter_column_isnull = kwargs.get(\n \"filter_column_isnull\", getattr(cls, \"filter_column_isnull\", False)\n )\n\n if \"column\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column\" found in provided metric_domain_kwargs, but it is required for a column map metric\n(_pandas_column_map_condition_values).\n\"\"\"\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n if filter_column_isnull:\n df = df[df[column_name].notnull()]\n\n domain_values = df[column_name]\n\n domain_values = domain_values[boolean_mapped_unexpected_values == True]\n\n result_format = metric_value_kwargs[\"result_format\"]\n\n if result_format[\"result_format\"] == \"COMPLETE\":\n return list(domain_values)\n else:\n return list(domain_values[: result_format[\"partial_unexpected_count\"]])\n\n\ndef _pandas_column_pair_map_condition_values(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return values from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n (\n boolean_mapped_unexpected_values,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = metrics[\"unexpected_condition\"]\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n df = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n if not (\"column_A\" in domain_kwargs and \"column_B\" in domain_kwargs):\n raise ValueError(\n \"\"\"No \"column_A\" and \"column_B\" found in provided metric_domain_kwargs, but it is required for a column pair map metric\n(_pandas_column_pair_map_condition_values).\n\"\"\"\n )\n\n # noinspection PyPep8Naming\n column_A_name = accessor_domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = accessor_domain_kwargs[\"column_B\"]\n\n column_list = [column_A_name, column_B_name]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n domain_values = df[column_list]\n\n domain_values = domain_values[boolean_mapped_unexpected_values == True]\n\n result_format = metric_value_kwargs[\"result_format\"]\n\n unexpected_list = [\n value_pair\n for value_pair in zip(\n domain_values[column_A_name].values, domain_values[column_B_name].values\n )\n ]\n if result_format[\"result_format\"] == \"COMPLETE\":\n return unexpected_list\n else:\n return unexpected_list[: result_format[\"partial_unexpected_count\"]]\n\n\ndef _pandas_column_pair_map_condition_filtered_row_count(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return record counts from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n _, compute_domain_kwargs, accessor_domain_kwargs = metrics[\"unexpected_condition\"]\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n df = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n if not (\"column_A\" in domain_kwargs and \"column_B\" in domain_kwargs):\n raise ValueError(\n \"\"\"No \"column_A\" and \"column_B\" found in provided metric_domain_kwargs, but it is required for a column pair map metric\n(_pandas_column_pair_map_condition_filtered_row_count).\n\"\"\"\n )\n\n # noinspection PyPep8Naming\n column_A_name = accessor_domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = accessor_domain_kwargs[\"column_B\"]\n\n column_list = [column_A_name, column_B_name]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n return df.shape[0]\n\n\ndef _pandas_multicolumn_map_condition_values(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return values from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n (\n boolean_mapped_unexpected_values,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = metrics[\"unexpected_condition\"]\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n df = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n if \"column_list\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column_list\" found in provided metric_domain_kwargs, but it is required for a multicolumn map metric\n(_pandas_multicolumn_map_condition_values).\n\"\"\"\n )\n\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n domain_values = df[column_list]\n\n domain_values = domain_values[boolean_mapped_unexpected_values == True]\n\n result_format = metric_value_kwargs[\"result_format\"]\n\n if result_format[\"result_format\"] == \"COMPLETE\":\n return domain_values.to_dict(\"records\")\n else:\n return domain_values[: result_format[\"partial_unexpected_count\"]].to_dict(\n \"records\"\n )\n\n\ndef _pandas_multicolumn_map_condition_filtered_row_count(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return record counts from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n _, compute_domain_kwargs, accessor_domain_kwargs = metrics[\"unexpected_condition\"]\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n df = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n if \"column_list\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column_list\" found in provided metric_domain_kwargs, but it is required for a multicolumn map metric\n(_pandas_multicolumn_map_condition_filtered_row_count).\n\"\"\"\n )\n\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n return df.shape[0]\n\n\ndef _pandas_column_map_series_and_domain_values(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return values from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n (\n boolean_mapped_unexpected_values,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = metrics[\"unexpected_condition\"]\n (\n map_series,\n compute_domain_kwargs_2,\n accessor_domain_kwargs_2,\n ) = metrics[\"metric_partial_fn\"]\n assert (\n compute_domain_kwargs == compute_domain_kwargs_2\n ), \"map_series and condition must have the same compute domain\"\n assert (\n accessor_domain_kwargs == accessor_domain_kwargs_2\n ), \"map_series and condition must have the same accessor kwargs\"\n df = execution_engine.get_domain_records(\n domain_kwargs=compute_domain_kwargs,\n )\n\n ###\n # NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we\n # currently handle filter_column_isnull differently than other map_fn / map_condition\n # cases.\n ###\n filter_column_isnull = kwargs.get(\n \"filter_column_isnull\", getattr(cls, \"filter_column_isnull\", False)\n )\n\n if \"column\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column\" found in provided metric_domain_kwargs, but it is required for a column map metric\n(_pandas_column_map_series_and_domain_values).\n\"\"\"\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n if filter_column_isnull:\n df = df[df[column_name].notnull()]\n\n domain_values = df[column_name]\n\n domain_values = domain_values[boolean_mapped_unexpected_values == True]\n map_series = map_series[boolean_mapped_unexpected_values == True]\n\n result_format = metric_value_kwargs[\"result_format\"]\n\n if result_format[\"result_format\"] == \"COMPLETE\":\n return (\n list(domain_values),\n list(map_series),\n )\n else:\n return (\n list(domain_values[: result_format[\"partial_unexpected_count\"]]),\n list(map_series[: result_format[\"partial_unexpected_count\"]]),\n )\n\n\ndef _pandas_map_condition_index(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n (\n boolean_mapped_unexpected_values,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = metrics.get(\"unexpected_condition\")\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n df = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n ###\n # NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we\n # currently handle filter_column_isnull differently than other map_fn / map_condition\n # cases.\n ###\n filter_column_isnull = kwargs.get(\n \"filter_column_isnull\", getattr(cls, \"filter_column_isnull\", False)\n )\n\n if \"column\" in accessor_domain_kwargs:\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n if filter_column_isnull:\n df = df[df[column_name].notnull()]\n\n elif \"column_list\" in accessor_domain_kwargs:\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n result_format = metric_value_kwargs[\"result_format\"]\n\n df = df[boolean_mapped_unexpected_values]\n\n if result_format[\"result_format\"] == \"COMPLETE\":\n return list(df.index)\n\n return list(df.index[: result_format[\"partial_unexpected_count\"]])\n\n\ndef _pandas_column_map_condition_value_counts(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Returns respective value counts for distinct column values\"\"\"\n (\n boolean_mapped_unexpected_values,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = metrics.get(\"unexpected_condition\")\n df = execution_engine.get_domain_records(\n domain_kwargs=compute_domain_kwargs,\n )\n\n ###\n # NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we\n # currently handle filter_column_isnull differently than other map_fn / map_condition\n # cases.\n ###\n filter_column_isnull = kwargs.get(\n \"filter_column_isnull\", getattr(cls, \"filter_column_isnull\", False)\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if \"column\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column\" found in provided metric_domain_kwargs, but it is required for a column map metric\n(_pandas_column_map_condition_value_counts).\n\"\"\"\n )\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n if filter_column_isnull:\n df = df[df[column_name].notnull()]\n\n domain_values = df[column_name]\n\n result_format = metric_value_kwargs[\"result_format\"]\n value_counts = None\n try:\n value_counts = domain_values[boolean_mapped_unexpected_values].value_counts()\n except ValueError:\n try:\n value_counts = (\n domain_values[boolean_mapped_unexpected_values]\n .apply(tuple)\n .value_counts()\n )\n except ValueError:\n pass\n\n if not value_counts:\n raise ge_exceptions.MetricComputationError(\"Unable to compute value counts\")\n\n if result_format[\"result_format\"] == \"COMPLETE\":\n return value_counts\n else:\n return value_counts[result_format[\"partial_unexpected_count\"]]\n\n\ndef _pandas_map_condition_rows(\n cls,\n execution_engine: PandasExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return values from the specified domain (ignoring the column constraint) that match the map-style metric in the metrics dictionary.\"\"\"\n (\n boolean_mapped_unexpected_values,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = metrics.get(\"unexpected_condition\")\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n df = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n ###\n # NOTE: 20201111 - JPC - in the map_series / map_condition_series world (pandas), we\n # currently handle filter_column_isnull differently than other map_fn / map_condition\n # cases.\n ###\n filter_column_isnull = kwargs.get(\n \"filter_column_isnull\", getattr(cls, \"filter_column_isnull\", False)\n )\n\n if \"column\" in accessor_domain_kwargs:\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n if filter_column_isnull:\n df = df[df[column_name].notnull()]\n\n elif \"column_list\" in accessor_domain_kwargs:\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n result_format = metric_value_kwargs[\"result_format\"]\n\n df = df[boolean_mapped_unexpected_values]\n\n if result_format[\"result_format\"] == \"COMPLETE\":\n return df\n\n return df.iloc[: result_format[\"partial_unexpected_count\"]]\n\n\ndef _sqlalchemy_map_condition_unexpected_count_aggregate_fn(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Returns unexpected count for MapExpectations\"\"\"\n unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(\n \"unexpected_condition\"\n )\n\n return (\n sa.func.sum(\n sa.case(\n [(unexpected_condition, 1)],\n else_=0,\n )\n ),\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n\ndef _sqlalchemy_map_condition_unexpected_count_value(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Returns unexpected count for MapExpectations. This is a *value* metric, which is useful for\n when the unexpected_condition is a window function.\n \"\"\"\n unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(\n \"unexpected_condition\"\n )\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n selectable = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n # The integral values are cast to SQL Numeric in order to avoid a bug in AWS Redshift (converted to integer later).\n count_case_statement: List[Label] = sa.case(\n [\n (\n unexpected_condition,\n sa.sql.expression.cast(1, sa.Numeric),\n )\n ],\n else_=sa.sql.expression.cast(0, sa.Numeric),\n ).label(\"condition\")\n\n count_selectable: Select = sa.select([count_case_statement])\n if not MapMetricProvider.is_sqlalchemy_metric_selectable(map_metric_provider=cls):\n count_selectable = count_selectable.select_from(selectable)\n\n try:\n if execution_engine.engine.dialect.name.lower() == \"mssql\":\n temp_table_name: str = generate_temporary_table_name(\n default_table_name_prefix=\"#ge_temp_\"\n )\n\n with execution_engine.engine.begin():\n metadata: sa.MetaData = sa.MetaData(execution_engine.engine)\n temp_table_obj: sa.Table = sa.Table(\n temp_table_name,\n metadata,\n sa.Column(\n \"condition\", sa.Integer, primary_key=False, nullable=False\n ),\n )\n temp_table_obj.create(execution_engine.engine, checkfirst=True)\n\n inner_case_query: Insert = temp_table_obj.insert().from_select(\n [count_case_statement],\n count_selectable,\n )\n execution_engine.engine.execute(inner_case_query)\n\n count_selectable = temp_table_obj\n\n unexpected_count_query: Select = (\n sa.select(\n [\n sa.func.sum(sa.column(\"condition\")).label(\"unexpected_count\"),\n ]\n )\n .select_from(count_selectable)\n .alias(\"UnexpectedCountSubquery\")\n )\n\n unexpected_count: Union[float, int] = execution_engine.engine.execute(\n sa.select(\n [\n unexpected_count_query.c.unexpected_count,\n ]\n )\n ).scalar()\n unexpected_count = int(unexpected_count)\n except OperationalError as oe:\n exception_message: str = f\"An SQL execution Exception occurred: {str(oe)}.\"\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=exception_message\n )\n\n return convert_to_json_serializable(unexpected_count)\n\n\ndef _sqlalchemy_column_map_condition_values(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"\n Particularly for the purpose of finding unexpected values, returns all the metric values which do not meet an\n expected Expectation condition for ColumnMapExpectation Expectations.\n \"\"\"\n unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(\n \"unexpected_condition\"\n )\n selectable = execution_engine.get_domain_records(\n domain_kwargs=compute_domain_kwargs,\n )\n\n if \"column\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column\" found in provided metric_domain_kwargs, but it is required for a column map metric\n(_sqlalchemy_column_map_condition_values).\n\"\"\"\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n query = sa.select([sa.column(column_name).label(\"unexpected_values\")]).where(\n unexpected_condition\n )\n if not MapMetricProvider.is_sqlalchemy_metric_selectable(map_metric_provider=cls):\n query = query.select_from(selectable)\n\n result_format = metric_value_kwargs[\"result_format\"]\n if result_format[\"result_format\"] != \"COMPLETE\":\n query = query.limit(result_format[\"partial_unexpected_count\"])\n\n return [\n val.unexpected_values\n for val in execution_engine.engine.execute(query).fetchall()\n ]\n\n\ndef _sqlalchemy_column_pair_map_condition_values(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return values from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n (\n boolean_mapped_unexpected_values,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = metrics[\"unexpected_condition\"]\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n selectable = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n # noinspection PyPep8Naming\n column_A_name = accessor_domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = accessor_domain_kwargs[\"column_B\"]\n\n column_list = [column_A_name, column_B_name]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n query = sa.select(\n sa.column(column_A_name).label(\"unexpected_values_A\"),\n sa.column(column_B_name).label(\"unexpected_values_B\"),\n ).where(boolean_mapped_unexpected_values)\n if not MapMetricProvider.is_sqlalchemy_metric_selectable(map_metric_provider=cls):\n query = query.select_from(selectable)\n\n result_format = metric_value_kwargs[\"result_format\"]\n if result_format[\"result_format\"] != \"COMPLETE\":\n query = query.limit(result_format[\"partial_unexpected_count\"])\n\n unexpected_list = [\n (val.unexpected_values_A, val.unexpected_values_B)\n for val in execution_engine.engine.execute(query).fetchall()\n ]\n return unexpected_list\n\n\ndef _sqlalchemy_column_pair_map_condition_filtered_row_count(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return record counts from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n _, compute_domain_kwargs, accessor_domain_kwargs = metrics[\"unexpected_condition\"]\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n selectable = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n # noinspection PyPep8Naming\n column_A_name = accessor_domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = accessor_domain_kwargs[\"column_B\"]\n\n column_list = [column_A_name, column_B_name]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n return execution_engine.engine.execute(\n sa.select([sa.func.count()]).select_from(selectable)\n ).scalar()\n\n\ndef _sqlalchemy_multicolumn_map_condition_values(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return values from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n (\n boolean_mapped_unexpected_values,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = metrics[\"unexpected_condition\"]\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n selectable = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n if \"column_list\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column_list\" found in provided metric_domain_kwargs, but it is required for a multicolumn map metric\n(_sqlalchemy_multicolumn_map_condition_values).\n\"\"\"\n )\n\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n column_selector = [sa.column(column_name) for column_name in column_list]\n\n query = sa.select(column_selector).where(boolean_mapped_unexpected_values)\n if not MapMetricProvider.is_sqlalchemy_metric_selectable(map_metric_provider=cls):\n query = query.select_from(selectable)\n\n result_format = metric_value_kwargs[\"result_format\"]\n if result_format[\"result_format\"] != \"COMPLETE\":\n query = query.limit(result_format[\"partial_unexpected_count\"])\n\n return [dict(val) for val in execution_engine.engine.execute(query).fetchall()]\n\n\ndef _sqlalchemy_multicolumn_map_condition_filtered_row_count(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return record counts from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n _, compute_domain_kwargs, accessor_domain_kwargs = metrics[\"unexpected_condition\"]\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n selectable = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n if \"column_list\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column_list\" found in provided metric_domain_kwargs, but it is required for a multicolumn map metric\n(_sqlalchemy_multicolumn_map_condition_filtered_row_count).\n\"\"\"\n )\n\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n return execution_engine.engine.execute(\n sa.select([sa.func.count()]).select_from(selectable)\n ).scalar()\n\n\ndef _sqlalchemy_column_map_condition_value_counts(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"\n Returns value counts for all the metric values which do not meet an expected Expectation condition for instances\n of ColumnMapExpectation.\n \"\"\"\n unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(\n \"unexpected_condition\"\n )\n selectable = execution_engine.get_domain_records(\n domain_kwargs=compute_domain_kwargs,\n )\n\n if \"column\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column\" found in provided metric_domain_kwargs, but it is required for a column map metric\n(_sqlalchemy_column_map_condition_value_counts).\n\"\"\"\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n column: sa.Column = sa.column(column_name)\n\n query = (\n sa.select([column, sa.func.count(column)])\n .where(unexpected_condition)\n .group_by(column)\n )\n if not MapMetricProvider.is_sqlalchemy_metric_selectable(map_metric_provider=cls):\n query = query.select_from(selectable)\n\n return execution_engine.engine.execute(query).fetchall()\n\n\ndef _sqlalchemy_map_condition_rows(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"\n Returns all rows of the metric values which do not meet an expected Expectation condition for instances\n of ColumnMapExpectation.\n \"\"\"\n unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(\n \"unexpected_condition\"\n )\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n selectable = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n table_columns = metrics.get(\"table.columns\")\n column_selector = [sa.column(column_name) for column_name in table_columns]\n query = sa.select(column_selector).where(unexpected_condition)\n if not MapMetricProvider.is_sqlalchemy_metric_selectable(map_metric_provider=cls):\n query = query.select_from(selectable)\n\n result_format = metric_value_kwargs[\"result_format\"]\n if result_format[\"result_format\"] != \"COMPLETE\":\n query = query.limit(result_format[\"partial_unexpected_count\"])\n try:\n return execution_engine.engine.execute(query).fetchall()\n except OperationalError as oe:\n exception_message: str = f\"An SQL execution Exception occurred: {str(oe)}.\"\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=exception_message\n )\n\n\ndef _spark_map_condition_unexpected_count_aggregate_fn(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(\n \"unexpected_condition\"\n )\n return (\n F.sum(F.when(unexpected_condition, 1).otherwise(0)),\n compute_domain_kwargs,\n accessor_domain_kwargs,\n )\n\n\ndef _spark_map_condition_unexpected_count_value(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n # fn_domain_kwargs maybe updated to reflect null filtering\n unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(\n \"unexpected_condition\"\n )\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n df = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n # withColumn is required to transform window functions returned by some metrics to boolean mask\n data = df.withColumn(\"__unexpected\", unexpected_condition)\n filtered = data.filter(F.col(\"__unexpected\") == True).drop(F.col(\"__unexpected\"))\n\n return filtered.count()\n\n\ndef _spark_column_map_condition_values(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return values from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(\n \"unexpected_condition\"\n )\n df = execution_engine.get_domain_records(\n domain_kwargs=compute_domain_kwargs,\n )\n\n if \"column\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column\" found in provided metric_domain_kwargs, but it is required for a column map metric\n(_spark_column_map_condition_values).\n\"\"\"\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n # withColumn is required to transform window functions returned by some metrics to boolean mask\n data = df.withColumn(\"__unexpected\", unexpected_condition)\n filtered = data.filter(F.col(\"__unexpected\") == True).drop(F.col(\"__unexpected\"))\n\n result_format = metric_value_kwargs[\"result_format\"]\n if result_format[\"result_format\"] == \"COMPLETE\":\n rows = filtered.select(F.col(column_name)).collect()\n else:\n rows = (\n filtered.select(F.col(column_name))\n .limit(result_format[\"partial_unexpected_count\"])\n .collect()\n )\n return [row[column_name] for row in rows]\n\n\ndef _spark_column_map_condition_value_counts(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(\n \"unexpected_condition\"\n )\n df = execution_engine.get_domain_records(\n domain_kwargs=compute_domain_kwargs,\n )\n\n if \"column\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column\" found in provided metric_domain_kwargs, but it is required for a column map metric\n(_spark_column_map_condition_value_counts).\n\"\"\"\n )\n\n column_name = accessor_domain_kwargs[\"column\"]\n\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n # withColumn is required to transform window functions returned by some metrics to boolean mask\n data = df.withColumn(\"__unexpected\", unexpected_condition)\n filtered = data.filter(F.col(\"__unexpected\") == True).drop(F.col(\"__unexpected\"))\n\n result_format = metric_value_kwargs[\"result_format\"]\n\n value_counts = filtered.groupBy(F.col(column_name)).count()\n if result_format[\"result_format\"] == \"COMPLETE\":\n rows = value_counts.collect()\n else:\n rows = value_counts.collect()[: result_format[\"partial_unexpected_count\"]]\n return rows\n\n\ndef _spark_map_condition_rows(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n unexpected_condition, compute_domain_kwargs, accessor_domain_kwargs = metrics.get(\n \"unexpected_condition\"\n )\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n df = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n # withColumn is required to transform window functions returned by some metrics to boolean mask\n data = df.withColumn(\"__unexpected\", unexpected_condition)\n filtered = data.filter(F.col(\"__unexpected\") == True).drop(F.col(\"__unexpected\"))\n\n result_format = metric_value_kwargs[\"result_format\"]\n\n if result_format[\"result_format\"] == \"COMPLETE\":\n return filtered.collect()\n else:\n return filtered.limit(result_format[\"partial_unexpected_count\"]).collect()\n\n\ndef _spark_column_pair_map_condition_values(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return values from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n (\n unexpected_condition,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = metrics[\"unexpected_condition\"]\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n df = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n # noinspection PyPep8Naming\n column_A_name = accessor_domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = accessor_domain_kwargs[\"column_B\"]\n\n column_list = [column_A_name, column_B_name]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n # withColumn is required to transform window functions returned by some metrics to boolean mask\n data = df.withColumn(\"__unexpected\", unexpected_condition)\n filtered = data.filter(F.col(\"__unexpected\") == True).drop(F.col(\"__unexpected\"))\n\n result_format = metric_value_kwargs[\"result_format\"]\n if result_format[\"result_format\"] == \"COMPLETE\":\n rows = filtered.select([F.col(column_A_name), F.col(column_B_name)]).collect()\n else:\n rows = (\n filtered.select([F.col(column_A_name), F.col(column_B_name)])\n .limit(result_format[\"partial_unexpected_count\"])\n .collect()\n )\n\n unexpected_list = [(row[column_A_name], row[column_B_name]) for row in rows]\n return unexpected_list\n\n\ndef _spark_column_pair_map_condition_filtered_row_count(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return record counts from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n _, compute_domain_kwargs, accessor_domain_kwargs = metrics[\"unexpected_condition\"]\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n df = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n # noinspection PyPep8Naming\n column_A_name = accessor_domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = accessor_domain_kwargs[\"column_B\"]\n\n column_list = [column_A_name, column_B_name]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n return df.count()\n\n\ndef _spark_multicolumn_map_condition_values(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return values from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n (\n unexpected_condition,\n compute_domain_kwargs,\n accessor_domain_kwargs,\n ) = metrics[\"unexpected_condition\"]\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n df = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n if \"column_list\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column_list\" found in provided metric_domain_kwargs, but it is required for a multicolumn map metric\n(_spark_multicolumn_map_condition_values).\n\"\"\"\n )\n\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n # withColumn is required to transform window functions returned by some metrics to boolean mask\n data = df.withColumn(\"__unexpected\", unexpected_condition)\n filtered = data.filter(F.col(\"__unexpected\") == True).drop(F.col(\"__unexpected\"))\n\n column_selector = [F.col(column_name) for column_name in column_list]\n\n domain_values = filtered.select(column_selector)\n\n result_format = metric_value_kwargs[\"result_format\"]\n if result_format[\"result_format\"] == \"COMPLETE\":\n domain_values = (\n domain_values.select(column_selector).toPandas().to_dict(\"records\")\n )\n else:\n domain_values = (\n domain_values.select(column_selector)\n .limit(result_format[\"partial_unexpected_count\"])\n .toPandas()\n .to_dict(\"records\")\n )\n\n return domain_values\n\n\ndef _spark_multicolumn_map_condition_filtered_row_count(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: Dict,\n metric_value_kwargs: Dict,\n metrics: Dict[str, Any],\n **kwargs,\n):\n \"\"\"Return record counts from the specified domain that match the map-style metric in the metrics dictionary.\"\"\"\n _, compute_domain_kwargs, accessor_domain_kwargs = metrics[\"unexpected_condition\"]\n \"\"\"\n In order to invoke the \"ignore_row_if\" filtering logic, \"execution_engine.get_domain_records()\" must be supplied\n with all of the available \"domain_kwargs\" keys.\n \"\"\"\n domain_kwargs = dict(**compute_domain_kwargs, **accessor_domain_kwargs)\n df = execution_engine.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n\n if \"column_list\" not in accessor_domain_kwargs:\n raise ValueError(\n \"\"\"No \"column_list\" found in provided metric_domain_kwargs, but it is required for a multicolumn map metric\n(_spark_multicolumn_map_condition_filtered_row_count).\n\"\"\"\n )\n\n column_list = accessor_domain_kwargs[\"column_list\"]\n\n for column_name in column_list:\n if column_name not in metrics[\"table.columns\"]:\n raise ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError(\n message=f'Error: The column \"{column_name}\" in BatchData does not exist.'\n )\n\n return df.count()\n\n\nclass MapMetricProvider(MetricProvider):\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"row_condition\",\n \"condition_parser\",\n )\n function_domain_keys = (\n \"batch_id\",\n \"table\",\n \"row_condition\",\n \"condition_parser\",\n )\n condition_value_keys = tuple()\n function_value_keys = tuple()\n filter_column_isnull = True\n\n SQLALCHEMY_SELECTABLE_METRICS = {\n \"compound_columns.count\",\n \"compound_columns.unique\",\n }\n\n @classmethod\n def _register_metric_functions(cls):\n if not hasattr(cls, \"function_metric_name\") and not hasattr(\n cls, \"condition_metric_name\"\n ):\n return\n\n for attr, candidate_metric_fn in cls.__dict__.items():\n if not hasattr(candidate_metric_fn, \"metric_engine\"):\n # This is not a metric\n continue\n metric_fn_type = getattr(candidate_metric_fn, \"metric_fn_type\")\n engine = candidate_metric_fn.metric_engine\n if not issubclass(engine, ExecutionEngine):\n raise ValueError(\n \"metric functions must be defined with an Execution Engine\"\n )\n\n if metric_fn_type in [\n MetricPartialFunctionTypes.MAP_CONDITION_SERIES,\n MetricPartialFunctionTypes.MAP_CONDITION_FN,\n MetricPartialFunctionTypes.WINDOW_CONDITION_FN,\n ]:\n if not hasattr(cls, \"condition_metric_name\"):\n raise ValueError(\n \"A MapMetricProvider must have a metric_condition_name to have a decorated column_condition_partial method.\"\n )\n\n condition_provider = candidate_metric_fn\n # noinspection PyUnresolvedReferences\n metric_name = cls.condition_metric_name\n metric_domain_keys = cls.condition_domain_keys\n metric_value_keys = cls.condition_value_keys\n metric_definition_kwargs = getattr(\n condition_provider, \"metric_definition_kwargs\", {}\n )\n domain_type = getattr(\n condition_provider,\n \"domain_type\",\n metric_definition_kwargs.get(\n \"domain_type\", MetricDomainTypes.TABLE\n ),\n )\n if issubclass(engine, PandasExecutionEngine):\n register_metric(\n metric_name=metric_name + \".condition\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=condition_provider,\n metric_fn_type=metric_fn_type,\n )\n register_metric(\n metric_name=metric_name + \".unexpected_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_pandas_map_condition_unexpected_count,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n register_metric(\n metric_name=metric_name + \".unexpected_index_list\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_pandas_map_condition_index,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n register_metric(\n metric_name=metric_name + \".unexpected_rows\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_pandas_map_condition_rows,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n if domain_type == MetricDomainTypes.COLUMN:\n register_metric(\n metric_name=metric_name + \".unexpected_values\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_pandas_column_map_condition_values,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n register_metric(\n metric_name=metric_name + \".unexpected_value_counts\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_pandas_column_map_condition_value_counts,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n elif domain_type == MetricDomainTypes.COLUMN_PAIR:\n register_metric(\n metric_name=metric_name + \".unexpected_values\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_pandas_column_pair_map_condition_values,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n register_metric(\n metric_name=metric_name + \".filtered_row_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_pandas_column_pair_map_condition_filtered_row_count,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n elif domain_type == MetricDomainTypes.MULTICOLUMN:\n register_metric(\n metric_name=metric_name + \".unexpected_values\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_pandas_multicolumn_map_condition_values,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n register_metric(\n metric_name=metric_name + \".filtered_row_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_pandas_multicolumn_map_condition_filtered_row_count,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n elif issubclass(engine, SqlAlchemyExecutionEngine):\n register_metric(\n metric_name=metric_name + \".condition\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=condition_provider,\n metric_fn_type=metric_fn_type,\n )\n register_metric(\n metric_name=metric_name + \".unexpected_rows\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_sqlalchemy_map_condition_rows,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n if metric_fn_type == MetricPartialFunctionTypes.MAP_CONDITION_FN:\n if domain_type == MetricDomainTypes.COLUMN:\n register_metric(\n metric_name=metric_name\n + \".unexpected_count.aggregate_fn\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_sqlalchemy_map_condition_unexpected_count_aggregate_fn,\n metric_fn_type=MetricPartialFunctionTypes.AGGREGATE_FN,\n )\n register_metric(\n metric_name=metric_name + \".unexpected_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=None,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n else:\n register_metric(\n metric_name=metric_name + \".unexpected_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_sqlalchemy_map_condition_unexpected_count_value,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n elif (\n metric_fn_type == MetricPartialFunctionTypes.WINDOW_CONDITION_FN\n ):\n register_metric(\n metric_name=metric_name + \".unexpected_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_sqlalchemy_map_condition_unexpected_count_value,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n if domain_type == MetricDomainTypes.COLUMN:\n register_metric(\n metric_name=metric_name + \".unexpected_values\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_sqlalchemy_column_map_condition_values,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n register_metric(\n metric_name=metric_name + \".unexpected_value_counts\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_sqlalchemy_column_map_condition_value_counts,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n elif domain_type == MetricDomainTypes.COLUMN_PAIR:\n register_metric(\n metric_name=metric_name + \".unexpected_values\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_sqlalchemy_column_pair_map_condition_values,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n register_metric(\n metric_name=metric_name + \".filtered_row_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_sqlalchemy_column_pair_map_condition_filtered_row_count,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n elif domain_type == MetricDomainTypes.MULTICOLUMN:\n register_metric(\n metric_name=metric_name + \".unexpected_values\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_sqlalchemy_multicolumn_map_condition_values,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n register_metric(\n metric_name=metric_name + \".filtered_row_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_sqlalchemy_multicolumn_map_condition_filtered_row_count,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n elif issubclass(engine, SparkDFExecutionEngine):\n register_metric(\n metric_name=metric_name + \".condition\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=condition_provider,\n metric_fn_type=metric_fn_type,\n )\n register_metric(\n metric_name=metric_name + \".unexpected_rows\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_spark_map_condition_rows,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n if metric_fn_type == MetricPartialFunctionTypes.MAP_CONDITION_FN:\n if domain_type == MetricDomainTypes.COLUMN:\n register_metric(\n metric_name=metric_name\n + \".unexpected_count.aggregate_fn\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_spark_map_condition_unexpected_count_aggregate_fn,\n metric_fn_type=MetricPartialFunctionTypes.AGGREGATE_FN,\n )\n register_metric(\n metric_name=metric_name + \".unexpected_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=None,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n else:\n register_metric(\n metric_name=metric_name + \".unexpected_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_spark_map_condition_unexpected_count_value,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n elif (\n metric_fn_type == MetricPartialFunctionTypes.WINDOW_CONDITION_FN\n ):\n register_metric(\n metric_name=metric_name + \".unexpected_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_spark_map_condition_unexpected_count_value,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n if domain_type == MetricDomainTypes.COLUMN:\n register_metric(\n metric_name=metric_name + \".unexpected_values\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_spark_column_map_condition_values,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n register_metric(\n metric_name=metric_name + \".unexpected_value_counts\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_spark_column_map_condition_value_counts,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n elif domain_type == MetricDomainTypes.COLUMN_PAIR:\n register_metric(\n metric_name=metric_name + \".unexpected_values\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_spark_column_pair_map_condition_values,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n register_metric(\n metric_name=metric_name + \".filtered_row_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_spark_column_pair_map_condition_filtered_row_count,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n elif domain_type == MetricDomainTypes.MULTICOLUMN:\n register_metric(\n metric_name=metric_name + \".unexpected_values\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_spark_multicolumn_map_condition_values,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n register_metric(\n metric_name=metric_name + \".filtered_row_count\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=(*metric_value_keys, \"result_format\"),\n execution_engine=engine,\n metric_class=cls,\n metric_provider=_spark_multicolumn_map_condition_filtered_row_count,\n metric_fn_type=MetricFunctionTypes.VALUE,\n )\n elif metric_fn_type in [\n MetricPartialFunctionTypes.MAP_SERIES,\n MetricPartialFunctionTypes.MAP_FN,\n MetricPartialFunctionTypes.WINDOW_FN,\n ]:\n if not hasattr(cls, \"function_metric_name\"):\n raise ValueError(\n \"A MapMetricProvider must have a function_metric_name to have a decorated column_function_partial method.\"\n )\n map_function_provider = candidate_metric_fn\n # noinspection PyUnresolvedReferences\n metric_name = cls.function_metric_name\n metric_domain_keys = cls.function_domain_keys\n metric_value_keys = cls.function_value_keys\n register_metric(\n metric_name=metric_name + \".map\",\n metric_domain_keys=metric_domain_keys,\n metric_value_keys=metric_value_keys,\n execution_engine=engine,\n metric_class=cls,\n metric_provider=map_function_provider,\n metric_fn_type=metric_fn_type,\n )\n\n @classmethod\n def _get_evaluation_dependencies(\n cls,\n metric: MetricConfiguration,\n configuration: Optional[ExpectationConfiguration] = None,\n execution_engine: Optional[ExecutionEngine] = None,\n runtime_configuration: Optional[dict] = None,\n ):\n metric_name = metric.metric_name\n base_metric_value_kwargs = {\n k: v for k, v in metric.metric_value_kwargs.items() if k != \"result_format\"\n }\n dependencies = {}\n\n metric_suffix = \".unexpected_count\"\n if metric_name.endswith(metric_suffix):\n try:\n _ = get_metric_provider(metric_name + \".aggregate_fn\", execution_engine)\n has_aggregate_fn = True\n except ge_exceptions.MetricProviderError:\n has_aggregate_fn = False\n if has_aggregate_fn:\n dependencies[\"metric_partial_fn\"] = MetricConfiguration(\n metric_name + \".aggregate_fn\",\n metric.metric_domain_kwargs,\n base_metric_value_kwargs,\n )\n else:\n dependencies[\"unexpected_condition\"] = MetricConfiguration(\n metric_name[: -len(metric_suffix)] + \".condition\",\n metric.metric_domain_kwargs,\n base_metric_value_kwargs,\n )\n\n # MapMetric uses the condition to build unexpected_count.aggregate_fn as well\n metric_suffix = \".unexpected_count.aggregate_fn\"\n if metric_name.endswith(metric_suffix):\n dependencies[\"unexpected_condition\"] = MetricConfiguration(\n metric_name[: -len(metric_suffix)] + \".condition\",\n metric.metric_domain_kwargs,\n base_metric_value_kwargs,\n )\n\n for metric_suffix in [\n \".unexpected_values\",\n \".unexpected_value_counts\",\n \".unexpected_index_list\",\n \".unexpected_rows\",\n \".filtered_row_count\",\n ]:\n if metric_name.endswith(metric_suffix):\n dependencies[\"unexpected_condition\"] = MetricConfiguration(\n metric_name[: -len(metric_suffix)] + \".condition\",\n metric.metric_domain_kwargs,\n base_metric_value_kwargs,\n )\n\n try:\n _ = get_metric_provider(metric_name + \".map\", execution_engine)\n dependencies[\"metric_map_fn\"] = MetricConfiguration(\n metric_name + \".map\",\n metric.metric_domain_kwargs,\n metric.metric_value_kwargs,\n )\n except ge_exceptions.MetricProviderError:\n pass\n\n return dependencies\n\n @staticmethod\n def is_sqlalchemy_metric_selectable(\n map_metric_provider: MetaMetricProvider,\n ) -> bool:\n \"\"\"\n :param map_metric_provider: object of type \"MapMetricProvider\", whose SQLAlchemy implementation is inspected\n :return: boolean indicating whether or not the returned value of a method implementing the metric resolves all\n columns -- hence the caller must not use \"select_from\" clause as part of its own SQLAlchemy query; otherwise an\n unwanted selectable (e.g., table) will be added to \"FROM\", leading to duplicated and/or erroneous results.\n \"\"\"\n # noinspection PyUnresolvedReferences\n return (\n hasattr(map_metric_provider, \"condition_metric_name\")\n and map_metric_provider.condition_metric_name\n in MapMetricProvider.SQLALCHEMY_SELECTABLE_METRICS\n ) or (\n hasattr(map_metric_provider, \"function_metric_name\")\n and map_metric_provider.function_metric_name\n in MapMetricProvider.SQLALCHEMY_SELECTABLE_METRICS\n )\n\n\nclass ColumnMapMetricProvider(MapMetricProvider):\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column\",\n \"row_condition\",\n \"condition_parser\",\n )\n function_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column\",\n \"row_condition\",\n \"condition_parser\",\n )\n condition_value_keys = tuple()\n function_value_keys = tuple()\n\n @classmethod\n def _get_evaluation_dependencies(\n cls,\n metric: MetricConfiguration,\n configuration: Optional[ExpectationConfiguration] = None,\n execution_engine: Optional[ExecutionEngine] = None,\n runtime_configuration: Optional[dict] = None,\n ):\n dependencies: dict = super()._get_evaluation_dependencies(\n metric=metric,\n configuration=configuration,\n execution_engine=execution_engine,\n runtime_configuration=runtime_configuration,\n )\n table_domain_kwargs: dict = {\n k: v for k, v in metric.metric_domain_kwargs.items() if k != \"column\"\n }\n dependencies[\"table.columns\"] = MetricConfiguration(\n metric_name=\"table.columns\",\n metric_domain_kwargs=table_domain_kwargs,\n metric_value_kwargs=None,\n metric_dependencies=None,\n )\n return dependencies\n\n\nclass ColumnPairMapMetricProvider(MapMetricProvider):\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_A\",\n \"column_B\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n function_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_A\",\n \"column_B\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n condition_value_keys = tuple()\n function_value_keys = tuple()\n\n @classmethod\n def _get_evaluation_dependencies(\n cls,\n metric: MetricConfiguration,\n configuration: Optional[ExpectationConfiguration] = None,\n execution_engine: Optional[ExecutionEngine] = None,\n runtime_configuration: Optional[dict] = None,\n ):\n dependencies: dict = super()._get_evaluation_dependencies(\n metric=metric,\n configuration=configuration,\n execution_engine=execution_engine,\n runtime_configuration=runtime_configuration,\n )\n table_domain_kwargs: dict = {\n k: v\n for k, v in metric.metric_domain_kwargs.items()\n if k not in [\"column_A\", \"column_B\", \"ignore_row_if\"]\n }\n dependencies[\"table.columns\"] = MetricConfiguration(\n metric_name=\"table.columns\",\n metric_domain_kwargs=table_domain_kwargs,\n metric_value_kwargs=None,\n metric_dependencies=None,\n )\n return dependencies\n\n\nclass MulticolumnMapMetricProvider(MapMetricProvider):\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n function_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n condition_value_keys = tuple()\n function_value_keys = tuple()\n\n @classmethod\n def _get_evaluation_dependencies(\n cls,\n metric: MetricConfiguration,\n configuration: Optional[ExpectationConfiguration] = None,\n execution_engine: Optional[ExecutionEngine] = None,\n runtime_configuration: Optional[dict] = None,\n ):\n dependencies: dict = super()._get_evaluation_dependencies(\n metric=metric,\n configuration=configuration,\n execution_engine=execution_engine,\n runtime_configuration=runtime_configuration,\n )\n table_domain_kwargs: dict = {\n k: v\n for k, v in metric.metric_domain_kwargs.items()\n if k not in [\"column_list\", \"ignore_row_if\"]\n }\n dependencies[\"table.columns\"] = MetricConfiguration(\n metric_name=\"table.columns\",\n metric_domain_kwargs=table_domain_kwargs,\n metric_value_kwargs=None,\n metric_dependencies=None,\n )\n return dependencies\n",
"import copy\nimport datetime\nimport hashlib\nimport logging\nimport os\nimport pickle\nimport random\nimport warnings\nfrom functools import partial\nfrom io import BytesIO\nfrom typing import Any, Callable, Iterable, List, Optional, Tuple, Union\n\nimport pandas as pd\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.core.batch import BatchMarkers\nfrom great_expectations.core.batch_spec import (\n AzureBatchSpec,\n BatchSpec,\n GCSBatchSpec,\n PathBatchSpec,\n RuntimeDataBatchSpec,\n S3BatchSpec,\n)\nfrom great_expectations.core.util import AzureUrl, GCSUrl, S3Url, sniff_s3_compression\nfrom great_expectations.execution_engine import ExecutionEngine\nfrom great_expectations.execution_engine.execution_engine import MetricDomainTypes\nfrom great_expectations.execution_engine.pandas_batch_data import PandasBatchData\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import boto3\nexcept ImportError:\n boto3 = None\n logger.debug(\n \"Unable to load AWS connection object; install optional boto3 dependency for support\"\n )\n\ntry:\n from azure.storage.blob import BlobServiceClient\nexcept ImportError:\n BlobServiceClient = None\n logger.debug(\n \"Unable to load Azure connection object; install optional azure dependency for support\"\n )\n\ntry:\n from google.auth.exceptions import DefaultCredentialsError\n from google.cloud import storage\n from google.oauth2 import service_account\nexcept ImportError:\n storage = None\n service_account = None\n DefaultCredentialsError = None\n logger.debug(\n \"Unable to load GCS connection object; install optional google dependency for support\"\n )\n\n\nHASH_THRESHOLD = 1e9\n\n\nclass PandasExecutionEngine(ExecutionEngine):\n \"\"\"\nPandasExecutionEngine instantiates the great_expectations Expectations API as a subclass of a pandas.DataFrame.\n\nFor the full API reference, please see :func:`Dataset <great_expectations.data_asset.dataset.Dataset>`\n\nNotes:\n 1. Samples and Subsets of PandaDataSet have ALL the expectations of the original \\\n data frame unless the user specifies the ``discard_subset_failing_expectations = True`` \\\n property on the original data frame.\n 2. Concatenations, joins, and merges of PandaDataSets contain NO expectations (since no autoinspection\n is performed by default).\n\n--ge-feature-maturity-info--\n\n id: validation_engine_pandas\n title: Validation Engine - Pandas\n icon:\n short_description: Use Pandas DataFrame to validate data\n description: Use Pandas DataFrame to validate data\n how_to_guide_url:\n maturity: Production\n maturity_details:\n api_stability: Stable\n implementation_completeness: Complete\n unit_test_coverage: Complete\n integration_infrastructure_test_coverage: N/A -> see relevant Datasource evaluation\n documentation_completeness: Complete\n bug_risk: Low\n expectation_completeness: Complete\n\n--ge-feature-maturity-info--\n \"\"\"\n\n recognized_batch_spec_defaults = {\n \"reader_method\",\n \"reader_options\",\n }\n\n def __init__(self, *args, **kwargs):\n self.discard_subset_failing_expectations = kwargs.pop(\n \"discard_subset_failing_expectations\", False\n )\n boto3_options: dict = kwargs.pop(\"boto3_options\", {})\n azure_options: dict = kwargs.pop(\"azure_options\", {})\n gcs_options: dict = kwargs.pop(\"gcs_options\", {})\n\n # Try initializing cloud provider client. If unsuccessful, we'll catch it when/if a BatchSpec is passed in.\n try:\n self._s3 = boto3.client(\"s3\", **boto3_options)\n except (TypeError, AttributeError):\n self._s3 = None\n\n try:\n if \"conn_str\" in azure_options:\n self._azure = BlobServiceClient.from_connection_string(**azure_options)\n else:\n self._azure = BlobServiceClient(**azure_options)\n except (TypeError, AttributeError):\n self._azure = None\n\n # Can only configure a GCS connection by 1) seting an env var OR 2) passing explicit credentials\n if os.getenv(\"GOOGLE_APPLICATION_CREDENTIALS\") is None and gcs_options == {}:\n self._gcs = None\n else:\n try:\n credentials = None # If configured with gcloud CLI / env vars\n if \"filename\" in gcs_options:\n credentials = service_account.Credentials.from_service_account_file(\n **gcs_options\n )\n elif \"info\" in gcs_options:\n credentials = service_account.Credentials.from_service_account_info(\n **gcs_options\n )\n self._gcs = storage.Client(credentials=credentials, **gcs_options)\n except (TypeError, AttributeError):\n self._gcs = None\n\n super().__init__(*args, **kwargs)\n\n self._config.update(\n {\n \"discard_subset_failing_expectations\": self.discard_subset_failing_expectations,\n \"boto3_options\": boto3_options,\n \"azure_options\": azure_options,\n \"gcs_options\": gcs_options,\n }\n )\n\n def configure_validator(self, validator):\n super().configure_validator(validator)\n validator.expose_dataframe_methods = True\n\n def load_batch_data(self, batch_id: str, batch_data: Any) -> None:\n if isinstance(batch_data, pd.DataFrame):\n batch_data = PandasBatchData(self, batch_data)\n elif isinstance(batch_data, PandasBatchData):\n pass\n else:\n raise ge_exceptions.GreatExpectationsError(\n \"PandasExecutionEngine requires batch data that is either a DataFrame or a PandasBatchData object\"\n )\n super().load_batch_data(batch_id=batch_id, batch_data=batch_data)\n\n def get_batch_data_and_markers(\n self, batch_spec: BatchSpec\n ) -> Tuple[Any, BatchMarkers]: # batch_data\n # We need to build a batch_markers to be used in the dataframe\n batch_markers: BatchMarkers = BatchMarkers(\n {\n \"ge_load_time\": datetime.datetime.now(datetime.timezone.utc).strftime(\n \"%Y%m%dT%H%M%S.%fZ\"\n )\n }\n )\n\n batch_data: Any\n if isinstance(batch_spec, RuntimeDataBatchSpec):\n # batch_data != None is already checked when RuntimeDataBatchSpec is instantiated\n batch_data = batch_spec.batch_data\n if isinstance(batch_data, str):\n raise ge_exceptions.ExecutionEngineError(\n f\"\"\"PandasExecutionEngine has been passed a string type batch_data, \"{batch_data}\", which is illegal.\nPlease check your config.\"\"\"\n )\n if isinstance(batch_spec.batch_data, pd.DataFrame):\n df = batch_spec.batch_data\n elif isinstance(batch_spec.batch_data, PandasBatchData):\n df = batch_spec.batch_data.dataframe\n else:\n raise ValueError(\n \"RuntimeDataBatchSpec must provide a Pandas DataFrame or PandasBatchData object.\"\n )\n batch_spec.batch_data = \"PandasDataFrame\"\n\n elif isinstance(batch_spec, S3BatchSpec):\n if self._s3 is None:\n raise ge_exceptions.ExecutionEngineError(\n f\"\"\"PandasExecutionEngine has been passed a S3BatchSpec,\n but the ExecutionEngine does not have a boto3 client configured. Please check your config.\"\"\"\n )\n s3_engine = self._s3\n reader_method: str = batch_spec.reader_method\n reader_options: dict = batch_spec.reader_options or {}\n path: str = batch_spec.path\n s3_url = S3Url(path)\n if \"compression\" not in reader_options.keys():\n inferred_compression_param = sniff_s3_compression(s3_url)\n if inferred_compression_param is not None:\n reader_options[\"compression\"] = inferred_compression_param\n s3_object = s3_engine.get_object(Bucket=s3_url.bucket, Key=s3_url.key)\n logger.debug(\n \"Fetching s3 object. Bucket: {} Key: {}\".format(\n s3_url.bucket, s3_url.key\n )\n )\n reader_fn = self._get_reader_fn(reader_method, s3_url.key)\n buf = BytesIO(s3_object[\"Body\"].read())\n buf.seek(0)\n df = reader_fn(buf, **reader_options)\n\n elif isinstance(batch_spec, AzureBatchSpec):\n if self._azure is None:\n raise ge_exceptions.ExecutionEngineError(\n f\"\"\"PandasExecutionEngine has been passed a AzureBatchSpec,\n but the ExecutionEngine does not have an Azure client configured. Please check your config.\"\"\"\n )\n azure_engine = self._azure\n reader_method: str = batch_spec.reader_method\n reader_options: dict = batch_spec.reader_options or {}\n path: str = batch_spec.path\n azure_url = AzureUrl(path)\n blob_client = azure_engine.get_blob_client(\n container=azure_url.container, blob=azure_url.blob\n )\n azure_object = blob_client.download_blob()\n logger.debug(\n f\"Fetching Azure blob. Container: {azure_url.container} Blob: {azure_url.blob}\"\n )\n reader_fn = self._get_reader_fn(reader_method, azure_url.blob)\n buf = BytesIO(azure_object.readall())\n buf.seek(0)\n df = reader_fn(buf, **reader_options)\n\n elif isinstance(batch_spec, GCSBatchSpec):\n if self._gcs is None:\n raise ge_exceptions.ExecutionEngineError(\n f\"\"\"PandasExecutionEngine has been passed a GCSBatchSpec,\n but the ExecutionEngine does not have an GCS client configured. Please check your config.\"\"\"\n )\n gcs_engine = self._gcs\n gcs_url = GCSUrl(batch_spec.path)\n reader_method: str = batch_spec.reader_method\n reader_options: dict = batch_spec.reader_options or {}\n gcs_bucket = gcs_engine.get_bucket(gcs_url.bucket)\n gcs_blob = gcs_bucket.blob(gcs_url.blob)\n logger.debug(\n f\"Fetching GCS blob. Bucket: {gcs_url.bucket} Blob: {gcs_url.blob}\"\n )\n reader_fn = self._get_reader_fn(reader_method, gcs_url.blob)\n buf = BytesIO(gcs_blob.download_as_bytes())\n buf.seek(0)\n df = reader_fn(buf, **reader_options)\n\n elif isinstance(batch_spec, PathBatchSpec):\n reader_method: str = batch_spec.reader_method\n reader_options: dict = batch_spec.reader_options\n path: str = batch_spec.path\n reader_fn: Callable = self._get_reader_fn(reader_method, path)\n df = reader_fn(path, **reader_options)\n\n else:\n raise ge_exceptions.BatchSpecError(\n f\"batch_spec must be of type RuntimeDataBatchSpec, PathBatchSpec, S3BatchSpec, or AzureBatchSpec, not {batch_spec.__class__.__name__}\"\n )\n\n df = self._apply_splitting_and_sampling_methods(batch_spec, df)\n if df.memory_usage().sum() < HASH_THRESHOLD:\n batch_markers[\"pandas_data_fingerprint\"] = hash_pandas_dataframe(df)\n\n typed_batch_data = PandasBatchData(execution_engine=self, dataframe=df)\n\n return typed_batch_data, batch_markers\n\n def _apply_splitting_and_sampling_methods(self, batch_spec, batch_data):\n if batch_spec.get(\"splitter_method\"):\n splitter_fn = getattr(self, batch_spec.get(\"splitter_method\"))\n splitter_kwargs: dict = batch_spec.get(\"splitter_kwargs\") or {}\n batch_data = splitter_fn(batch_data, **splitter_kwargs)\n\n if batch_spec.get(\"sampling_method\"):\n sampling_fn = getattr(self, batch_spec.get(\"sampling_method\"))\n sampling_kwargs: dict = batch_spec.get(\"sampling_kwargs\") or {}\n batch_data = sampling_fn(batch_data, **sampling_kwargs)\n return batch_data\n\n @property\n def dataframe(self):\n \"\"\"Tests whether or not a Batch has been loaded. If the loaded batch does not exist, raises a\n ValueError Exception\n \"\"\"\n # Changed to is None because was breaking prior\n if self.active_batch_data is None:\n raise ValueError(\n \"Batch has not been loaded - please run load_batch_data() to load a batch.\"\n )\n\n return self.active_batch_data.dataframe\n\n def _get_reader_fn(self, reader_method=None, path=None):\n \"\"\"Static helper for parsing reader types. If reader_method is not provided, path will be used to guess the\n correct reader_method.\n\n Args:\n reader_method (str): the name of the reader method to use, if available.\n path (str): the path used to guess\n\n Returns:\n ReaderMethod to use for the filepath\n\n \"\"\"\n if reader_method is None and path is None:\n raise ge_exceptions.ExecutionEngineError(\n \"Unable to determine pandas reader function without reader_method or path.\"\n )\n\n reader_options = {}\n if reader_method is None:\n path_guess = self.guess_reader_method_from_path(path)\n reader_method = path_guess[\"reader_method\"]\n reader_options = path_guess.get(\n \"reader_options\"\n ) # This may not be there; use None in that case\n\n try:\n reader_fn = getattr(pd, reader_method)\n if reader_options:\n reader_fn = partial(reader_fn, **reader_options)\n return reader_fn\n except AttributeError:\n raise ge_exceptions.ExecutionEngineError(\n f'Unable to find reader_method \"{reader_method}\" in pandas.'\n )\n\n # NOTE Abe 20201105: Any reason this shouldn't be a private method?\n @staticmethod\n def guess_reader_method_from_path(path):\n \"\"\"Helper method for deciding which reader to use to read in a certain path.\n\n Args:\n path (str): the to use to guess\n\n Returns:\n ReaderMethod to use for the filepath\n\n \"\"\"\n if path.endswith(\".csv\") or path.endswith(\".tsv\"):\n return {\"reader_method\": \"read_csv\"}\n elif path.endswith(\".parquet\"):\n return {\"reader_method\": \"read_parquet\"}\n elif path.endswith(\".xlsx\") or path.endswith(\".xls\"):\n return {\"reader_method\": \"read_excel\"}\n elif path.endswith(\".json\"):\n return {\"reader_method\": \"read_json\"}\n elif path.endswith(\".pkl\"):\n return {\"reader_method\": \"read_pickle\"}\n elif path.endswith(\".feather\"):\n return {\"reader_method\": \"read_feather\"}\n elif path.endswith(\".csv.gz\") or path.endswith(\".tsv.gz\"):\n return {\n \"reader_method\": \"read_csv\",\n \"reader_options\": {\"compression\": \"gzip\"},\n }\n\n raise ge_exceptions.ExecutionEngineError(\n f'Unable to determine reader method from path: \"{path}\".'\n )\n\n def get_domain_records(\n self,\n domain_kwargs: dict,\n ) -> pd.DataFrame:\n \"\"\"\n Uses the given domain kwargs (which include row_condition, condition_parser, and ignore_row_if directives) to\n obtain and/or query a batch. Returns in the format of a Pandas DataFrame.\n\n Args:\n domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain\n\n Returns:\n A DataFrame (the data on which to compute)\n \"\"\"\n table = domain_kwargs.get(\"table\", None)\n if table:\n raise ValueError(\n \"PandasExecutionEngine does not currently support multiple named tables.\"\n )\n\n batch_id = domain_kwargs.get(\"batch_id\")\n if batch_id is None:\n # We allow no batch id specified if there is only one batch\n if self.active_batch_data_id is not None:\n data = self.active_batch_data.dataframe\n else:\n raise ge_exceptions.ValidationError(\n \"No batch is specified, but could not identify a loaded batch.\"\n )\n else:\n if batch_id in self.loaded_batch_data_dict:\n data = self.loaded_batch_data_dict[batch_id].dataframe\n else:\n raise ge_exceptions.ValidationError(\n f\"Unable to find batch with batch_id {batch_id}\"\n )\n\n # Filtering by row condition.\n row_condition = domain_kwargs.get(\"row_condition\", None)\n if row_condition:\n condition_parser = domain_kwargs.get(\"condition_parser\", None)\n\n # Ensuring proper condition parser has been provided\n if condition_parser not in [\"python\", \"pandas\"]:\n raise ValueError(\n \"condition_parser is required when setting a row_condition,\"\n \" and must be 'python' or 'pandas'\"\n )\n else:\n # Querying row condition\n data = data.query(row_condition, parser=condition_parser).reset_index(\n drop=True\n )\n\n if \"column\" in domain_kwargs:\n return data\n\n if (\n \"column_A\" in domain_kwargs\n and \"column_B\" in domain_kwargs\n and \"ignore_row_if\" in domain_kwargs\n ):\n # noinspection PyPep8Naming\n column_A_name = domain_kwargs[\"column_A\"]\n # noinspection PyPep8Naming\n column_B_name = domain_kwargs[\"column_B\"]\n\n ignore_row_if = domain_kwargs[\"ignore_row_if\"]\n if ignore_row_if == \"both_values_are_missing\":\n data = data.dropna(\n axis=0,\n how=\"all\",\n subset=[column_A_name, column_B_name],\n )\n elif ignore_row_if == \"either_value_is_missing\":\n data = data.dropna(\n axis=0,\n how=\"any\",\n subset=[column_A_name, column_B_name],\n )\n else:\n if ignore_row_if not in [\"neither\", \"never\"]:\n raise ValueError(\n f'Unrecognized value of ignore_row_if (\"{ignore_row_if}\").'\n )\n\n if ignore_row_if == \"never\":\n warnings.warn(\n f\"\"\"The correct \"no-action\" value of the \"ignore_row_if\" directive for the column pair case is \\\n\"neither\" (the use of \"{ignore_row_if}\" will be deprecated). Please update code accordingly.\n\"\"\",\n DeprecationWarning,\n )\n\n return data\n\n if \"column_list\" in domain_kwargs and \"ignore_row_if\" in domain_kwargs:\n column_list = domain_kwargs[\"column_list\"]\n\n ignore_row_if = domain_kwargs[\"ignore_row_if\"]\n if ignore_row_if == \"all_values_are_missing\":\n data = data.dropna(\n axis=0,\n how=\"all\",\n subset=column_list,\n )\n elif ignore_row_if == \"any_value_is_missing\":\n data = data.dropna(\n axis=0,\n how=\"any\",\n subset=column_list,\n )\n else:\n if ignore_row_if != \"never\":\n raise ValueError(\n f'Unrecognized value of ignore_row_if (\"{ignore_row_if}\").'\n )\n\n return data\n\n return data\n\n def get_compute_domain(\n self,\n domain_kwargs: dict,\n domain_type: Union[str, MetricDomainTypes],\n accessor_keys: Optional[Iterable[str]] = None,\n ) -> Tuple[pd.DataFrame, dict, dict]:\n \"\"\"\n Uses the given domain kwargs (which include row_condition, condition_parser, and ignore_row_if directives) to\n obtain and/or query a batch. Returns in the format of a Pandas DataFrame. If the domain is a single column,\n this is added to 'accessor domain kwargs' and used for later access\n\n Args:\n domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain\n domain_type (str or MetricDomainTypes) - an Enum value indicating which metric domain the user would\n like to be using, or a corresponding string value representing it. String types include \"column\",\n \"column_pair\", \"table\", and \"other\". Enum types include capitalized versions of these from the\n class MetricDomainTypes.\n accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when\n describing the domain and simply transferred with their associated values into accessor_domain_kwargs.\n\n Returns:\n A tuple including:\n - a DataFrame (the data on which to compute)\n - a dictionary of compute_domain_kwargs, describing the DataFrame\n - a dictionary of accessor_domain_kwargs, describing any accessors needed to\n identify the domain within the compute domain\n \"\"\"\n data = self.get_domain_records(\n domain_kwargs=domain_kwargs,\n )\n # Extracting value from enum if it is given for future computation\n domain_type = MetricDomainTypes(domain_type)\n\n compute_domain_kwargs = copy.deepcopy(domain_kwargs)\n accessor_domain_kwargs = {}\n table = domain_kwargs.get(\"table\", None)\n if table:\n raise ValueError(\n \"PandasExecutionEngine does not currently support multiple named tables.\"\n )\n\n # Warning user if accessor keys are in any domain that is not of type table, will be ignored\n if (\n domain_type != MetricDomainTypes.TABLE\n and accessor_keys is not None\n and len(list(accessor_keys)) > 0\n ):\n logger.warning(\n 'Accessor keys ignored since Metric Domain Type is not \"table\"'\n )\n\n # If given table (this is default), get all unexpected accessor_keys (an optional parameters allowing us to\n # modify domain access)\n if domain_type == MetricDomainTypes.TABLE:\n if accessor_keys is not None and len(list(accessor_keys)) > 0:\n for key in accessor_keys:\n accessor_domain_kwargs[key] = compute_domain_kwargs.pop(key)\n if len(compute_domain_kwargs.keys()) > 0:\n # Warn user if kwarg not \"normal\".\n unexpected_keys: set = set(compute_domain_kwargs.keys()).difference(\n {\n \"batch_id\",\n \"table\",\n \"row_condition\",\n \"condition_parser\",\n }\n )\n if len(unexpected_keys) > 0:\n unexpected_keys_str: str = \", \".join(\n map(lambda element: f'\"{element}\"', unexpected_keys)\n )\n logger.warning(\n f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type \"{domain_type.value}\".'\n )\n return data, compute_domain_kwargs, accessor_domain_kwargs\n\n elif domain_type == MetricDomainTypes.COLUMN:\n if \"column\" not in compute_domain_kwargs:\n raise ge_exceptions.GreatExpectationsError(\n \"Column not provided in compute_domain_kwargs\"\n )\n\n accessor_domain_kwargs[\"column\"] = compute_domain_kwargs.pop(\"column\")\n\n elif domain_type == MetricDomainTypes.COLUMN_PAIR:\n if not (\"column_A\" in domain_kwargs and \"column_B\" in domain_kwargs):\n raise ge_exceptions.GreatExpectationsError(\n \"column_A or column_B not found within domain_kwargs\"\n )\n\n accessor_domain_kwargs[\"column_A\"] = compute_domain_kwargs.pop(\"column_A\")\n accessor_domain_kwargs[\"column_B\"] = compute_domain_kwargs.pop(\"column_B\")\n\n elif domain_type == MetricDomainTypes.MULTICOLUMN:\n if \"column_list\" not in domain_kwargs:\n raise ge_exceptions.GreatExpectationsError(\n \"column_list not found within domain_kwargs\"\n )\n\n column_list = compute_domain_kwargs.pop(\"column_list\")\n\n if len(column_list) < 2:\n raise ge_exceptions.GreatExpectationsError(\n \"column_list must contain at least 2 columns\"\n )\n\n accessor_domain_kwargs[\"column_list\"] = column_list\n\n return data, compute_domain_kwargs, accessor_domain_kwargs\n\n ### Splitter methods for partitioning dataframes ###\n @staticmethod\n def _split_on_whole_table(\n df,\n ) -> pd.DataFrame:\n return df\n\n @staticmethod\n def _split_on_column_value(\n df, column_name: str, batch_identifiers: dict\n ) -> pd.DataFrame:\n return df[df[column_name] == batch_identifiers[column_name]]\n\n @staticmethod\n def _split_on_converted_datetime(\n df,\n column_name: str,\n batch_identifiers: dict,\n date_format_string: str = \"%Y-%m-%d\",\n ):\n \"\"\"Convert the values in the named column to the given date_format, and split on that\"\"\"\n stringified_datetime_series = df[column_name].map(\n lambda x: x.strftime(date_format_string)\n )\n matching_string = batch_identifiers[column_name]\n return df[stringified_datetime_series == matching_string]\n\n @staticmethod\n def _split_on_divided_integer(\n df, column_name: str, divisor: int, batch_identifiers: dict\n ):\n \"\"\"Divide the values in the named column by `divisor`, and split on that\"\"\"\n\n matching_divisor = batch_identifiers[column_name]\n matching_rows = df[column_name].map(\n lambda x: int(x / divisor) == matching_divisor\n )\n\n return df[matching_rows]\n\n @staticmethod\n def _split_on_mod_integer(df, column_name: str, mod: int, batch_identifiers: dict):\n \"\"\"Divide the values in the named column by `divisor`, and split on that\"\"\"\n\n matching_mod_value = batch_identifiers[column_name]\n matching_rows = df[column_name].map(lambda x: x % mod == matching_mod_value)\n\n return df[matching_rows]\n\n @staticmethod\n def _split_on_multi_column_values(\n df, column_names: List[str], batch_identifiers: dict\n ):\n \"\"\"Split on the joint values in the named columns\"\"\"\n\n subset_df = df.copy()\n for column_name in column_names:\n value = batch_identifiers.get(column_name)\n if not value:\n raise ValueError(\n f\"In order for PandasExecution to `_split_on_multi_column_values`, \"\n f\"all values in column_names must also exist in batch_identifiers. \"\n f\"{column_name} was not found in batch_identifiers.\"\n )\n subset_df = subset_df[subset_df[column_name] == value]\n return subset_df\n\n @staticmethod\n def _split_on_hashed_column(\n df,\n column_name: str,\n hash_digits: int,\n batch_identifiers: dict,\n hash_function_name: str = \"md5\",\n ):\n \"\"\"Split on the hashed value of the named column\"\"\"\n try:\n hash_method = getattr(hashlib, hash_function_name)\n except (TypeError, AttributeError):\n raise (\n ge_exceptions.ExecutionEngineError(\n f\"\"\"The splitting method used with SparkDFExecutionEngine has a reference to an invalid hash_function_name.\n Reference to {hash_function_name} cannot be found.\"\"\"\n )\n )\n matching_rows = df[column_name].map(\n lambda x: hash_method(str(x).encode()).hexdigest()[-1 * hash_digits :]\n == batch_identifiers[\"hash_value\"]\n )\n return df[matching_rows]\n\n ### Sampling methods ###\n\n @staticmethod\n def _sample_using_random(\n df,\n p: float = 0.1,\n ):\n \"\"\"Take a random sample of rows, retaining proportion p\"\"\"\n return df[df.index.map(lambda x: random.random() < p)]\n\n @staticmethod\n def _sample_using_mod(\n df,\n column_name: str,\n mod: int,\n value: int,\n ):\n \"\"\"Take the mod of named column, and only keep rows that match the given value\"\"\"\n return df[df[column_name].map(lambda x: x % mod == value)]\n\n @staticmethod\n def _sample_using_a_list(\n df,\n column_name: str,\n value_list: list,\n ):\n \"\"\"Match the values in the named column against value_list, and only keep the matches\"\"\"\n return df[df[column_name].isin(value_list)]\n\n @staticmethod\n def _sample_using_hash(\n df,\n column_name: str,\n hash_digits: int = 1,\n hash_value: str = \"f\",\n hash_function_name: str = \"md5\",\n ):\n \"\"\"Hash the values in the named column, and only keep rows that match the given hash_value\"\"\"\n try:\n hash_func = getattr(hashlib, hash_function_name)\n except (TypeError, AttributeError):\n raise (\n ge_exceptions.ExecutionEngineError(\n f\"\"\"The sampling method used with PandasExecutionEngine has a reference to an invalid hash_function_name.\n Reference to {hash_function_name} cannot be found.\"\"\"\n )\n )\n\n matches = df[column_name].map(\n lambda x: hash_func(str(x).encode()).hexdigest()[-1 * hash_digits :]\n == hash_value\n )\n return df[matches]\n\n\ndef hash_pandas_dataframe(df):\n try:\n obj = pd.util.hash_pandas_object(df, index=True).values\n except TypeError:\n # In case of facing unhashable objects (like dict), use pickle\n obj = pickle.dumps(df, pickle.HIGHEST_PROTOCOL)\n\n return hashlib.md5(obj).hexdigest()\n"
] | [
[
"numpy.count_nonzero"
],
[
"pandas.util.hash_pandas_object"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
DarseZ/DL_hw5 | [
"6229ef6493e92b93e67e71058f90bc4a6b537796"
] | [
"q_learning/utils/schedule.py"
] | [
"import numpy as np\nfrom utils.test_env import EnvTest\n\n\nclass LinearSchedule(object):\n def __init__(self, eps_begin, eps_end, nsteps):\n \"\"\"\n Args:\n eps_begin: initial exploration\n eps_end: end exploration\n nsteps: number of steps between the two values of eps\n \"\"\"\n self.epsilon = eps_begin\n self.eps_begin = eps_begin\n self.eps_end = eps_end\n self.nsteps = nsteps\n\n\n def update(self, t):\n \"\"\"\n Updates epsilon\n\n Args:\n t: int\n frame number\n \"\"\"\n if t > self.nsteps:\n self.epsilon = self.eps_end\n else:\n alpha = (1.0 * t) / self.nsteps\n self.epsilon = (alpha * self.eps_end) \\\n + ((1-alpha) * self.eps_begin)\n\n\nclass LinearExploration(LinearSchedule):\n def __init__(self, env, eps_begin, eps_end, nsteps):\n \"\"\"\n Args:\n env: gym environment\n eps_begin: float\n initial exploration rate\n eps_end: float\n final exploration rate\n nsteps: int\n number of steps taken to linearly decay eps_begin to eps_end\n \"\"\"\n self.env = env\n super(LinearExploration, self).__init__(eps_begin, eps_end, nsteps)\n\n\n def get_action(self, best_action):\n \"\"\"\n Returns a random action with prob epsilon, otherwise returns the best_action\n\n Args:\n best_action: int\n best action according some policy\n Returns:\n an action\n \"\"\"\n ##############################################################\n if np.random.uniform(0, 1) < self.epsilon:\n return self.env.action_space.sample()\n else:\n return best_action\n"
] | [
[
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dennislwm/dtale-desktop | [
"1a034d505f6b45c1ece4c18b83af6ae367d16824"
] | [
"dtale_desktop/default_sources/dft_csv/get_data.py"
] | [
"import pandas as pd\n\n\ndef main(path: str) -> pd.DataFrame:\n return pd.read_csv(path)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rjplevin/BEST-AIR | [
"40f4bb74f4a8a98b66a452f58596f4c425c9a51b"
] | [
"best_air/bin/extract_EPA_monitor_data_88101_2000_2017.py"
] | [
"import csv\nimport pandas as pd\n\nCALIFORNIA = 6 # State Code to keep\n\ndata_dir = '/Volumes/T7/Box Sync/BEST-AIR/Data/AQ Monitoring/EPA Criteria Pollutants/PM Daily Data/'\n\npathname = data_dir + '2017/daily_88101_2017.csv'\n\n# Create 'Monitor ID' = State Code + County Code + Site Num + Parameter Code\n# Drop rows with 'Sample Duration' of '24-HR BLK AVG' (redundant)\n# Calculate quarterly average of daily means.\n# Calculate annual average of four quarterly averages.\n# Parameter code is always 88101; name is always 'PM2.5 - Local Conditions'\ncols_to_keep = [\n 'State Code',\n 'County Code',\n 'Site Num',\n 'Parameter Code',\n 'POC',\n 'Latitude',\n 'Longitude',\n 'Sample Duration',\n 'Date Local',\n 'Event Type', # ['None', 'Included', 'Excluded']\n 'Method Code', # decide which to keep\n # 'Method Name',\n 'Observation Count',\n 'Observation Percent',\n 'Arithmetic Mean',\n\n # 'Datum', # all are in ['WGS84', 'NAD83']\n # 'Units of Measure', # all are 'Micrograms/cubic meter (LC)'\n]\n\ndef verify_excludes_have_matching_includes(df):\n df = df.copy()\n df.columns = [name.replace(' ', '') for name in df.columns]\n excl = df.query(\"EventType == 'Excluded'\")\n\n count = 0\n for idx, row in excl.iterrows():\n id = row.monitor_id\n date = row.DateLocal\n poc = row.POC\n found = df.query(\"EventType == 'Included' and SampleDuration == '1 HOUR' and monitor_id == @id and POC == @poc and DateLocal == @date\")\n count += len(found)\n\n if len(found) > 1:\n print(\"Multiple matches: \\n\", found)\n\n if count != len(excl):\n raise Exception(f\"Found {count} Included matches for {len(excl)} Excluded\")\n\ndef extract(input_path, output_path):\n print(f\"Reading '{input_path}'\")\n df = pd.read_csv(input_path, index_col=None, usecols=cols_to_keep)\n\n mask = (df['State Code'] == CALIFORNIA) & (df['Sample Duration'] != '24-HR BLK AVG')\n df = df[mask].copy()\n\n # Create fixed width monitor ID from these four numeric columns\n df['monitor_id'] = (df['State Code'].astype(str).str.zfill(2) + '-' +\n df['County Code'].astype(str).str.zfill(3) + '-' +\n df['Site Num'].astype(str).str.zfill(4) + '-' +\n df['Parameter Code'].astype(str).str.zfill(5))\n\n cols_to_drop = ['Parameter Code', 'State Code']\n df.drop(cols_to_drop, axis='columns', inplace=True)\n\n verify_excludes_have_matching_includes(df)\n\n rows_to_keep = (df['Event Type'] != 'Excluded')\n df = df[rows_to_keep]\n\n print(f\"Saving extracted data {df.shape} to '{output_path}'\")\n df.to_csv(output_path, index=None, quoting=csv.QUOTE_NONNUMERIC)\n\nfor year in (2000, 2017):\n input_path = data_dir + f\"{year}/daily_88101_{year}.csv.gz\"\n output_path = data_dir + f\"extracted_monitor_data_88101_{year}.csv\"\n extract(input_path, output_path)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
pierrepo/buildH | [
"4870ffc4fb41deec2c8af5ba5b589795bbb99563"
] | [
"buildh/core.py"
] | [
"\"\"\"Module holding the core functions.\"\"\"\n\nimport pandas as pd\nimport MDAnalysis as mda\nimport MDAnalysis.coordinates.XTC as XTC\n\nfrom . import hydrogens\nfrom . import geometry as geo\nfrom . import writers\n\n\n# For debugging.\n# TODO: Remove it after implement logging feature\nDEBUG=False\n\n\ndef buildHs_on_1C(atom, H_type, helper1, helper2, helper3=None):\n \"\"\"Build 1, 2 or 3 H on a given carbon.\n\n This function is a wrapper which takes the coordinates of the helpers\n and call the function that builds 1, 2 or 3 H.\n\n Parameters\n ----------\n atom : numpy 1D-array\n Central atom on which we want to reconstruct the hydrogen.\n H_type: str\n The type of H to build. It could be 'CH2', 'CH', 'CHdoublebond' or 'CH3'\n see dic_lipds.py\n helper1 : numpy 1D-array\n First neighbor of central atom.\n helper2 : numpy 1D-array\n Second neighbor of central atom.\n helper3 : numpy 1D-array\n Third neighbor of central atom.\n\n Returns\n -------\n tuple of numpy 1D-arrays\n Each element of the tuple is a numpy 1D-array containing 1, 2 or 3\n reconstructed hydrogen(s).\n !!! IMPORTANT !!! This function *should* return a tuple even if\n there's only one H that has been rebuilt.\n \"\"\"\n if H_type == \"CH2\":\n H1_coor, H2_coor = hydrogens.get_CH2(atom, helper1, helper2)\n return (H1_coor, H2_coor)\n elif H_type == \"CH\":\n # If we reconstruct a single H, we have a 3rd helper.\n #helper3_coor = sel(\"name {0}\".format(helper3_name))[0].position\n H1_coor = hydrogens.get_CH(atom, helper1, helper2,\n helper3)\n return (H1_coor,)\n elif H_type == \"CHdoublebond\":\n H1_coor = hydrogens.get_CH_double_bond(atom, helper1,\n helper2)\n return (H1_coor,)\n elif H_type == \"CH3\":\n H1_coor, H2_coor, H3_coor = hydrogens.get_CH3(atom,\n helper1, helper2)\n return (H1_coor, H2_coor, H3_coor)\n else:\n raise UserWarning(\"Wrong code for typeofH2build, expected 'CH2', 'CH'\"\n \", 'CHdoublebond' or 'CH3', got {}.\"\n .format(H_type))\n\n\ndef build_system_hydrogens(universe_woH, dic_lipid, dic_Cname2Hnames, dic_lipid_indexes):\n \"\"\"Build a new system *with* hydrogens.\n\n The function take the MDAnalysis universe *without* hydrogens, reconstruct all hydrogens\n and return a pandas dataframe. This latter will be used later to build a new\n MDAnalysis universe with H.\n\n Notes\n -----\n There is no simple way to create a new MDAnalysis universe directly.\n\n Parameters\n ----------\n universe_woH : MDAnalysis universe instance\n This is the universe *without* hydrogen.\n dic_lipid : dictionary\n Comes from dic_lipids.py. Contains carbon names and helper names needed\n for reconstructing hydrogens.\n dic_Cname2Hnames : dictionary\n This dict gives the correspondance Cname -> Hname. It is a dict of\n tuples. If there is more than 1 H for a given C, they need to be\n *ordered* like in the PDB. e.g. for CHARMM POPC :\n {'C13': ('H13A', 'H13B', 'H13C'), ..., 'C33': ('H3X', 'H3Y'),\n ..., 'C216': ('H16R', 'H16S'), ...}\n dic_lipids_with_indexes : dictionary\n The dictionary made in function make_dic_lipids_with_indexes().\n\n Returns\n -------\n pandas dataframe\n contains the system *with* hydrogens.\n \"\"\"\n # The list newrows will be used to store the new molecule *with* H.\n newrows = []\n # Counter for numbering the new mlcs with H.\n new_atom_num = 1\n # Loop over all atoms in the universe without H.\n for atom in universe_woH.atoms:\n resnum = atom.resnum\n resname = atom.resname\n name = atom.name\n # Append atom to the new list.\n # 0 1 2 3 4 5 6\n # atnum, atname, resname, resnum, x, y, z\n newrows.append([new_atom_num, name, resname, resnum]\n + list(atom.position))\n new_atom_num += 1\n # Build new H(s)?\n if (atom.name in dic_lipid and atom.residue.resname == dic_lipid[\"resname\"]):\n # Retrieve helpers coordinates\n # helperX_ix is the index of the helper inside one residue.\n if len(dic_lipid_indexes[atom.name]) == 6:\n typeofH2build, _, _, _, helper1_ix, helper2_ix = dic_lipid_indexes[atom.name]\n helper3_coor = None\n else:\n typeofH2build, _, _, _, _, helper1_ix, helper2_ix, helper3_ix = dic_lipid_indexes[atom.name]\n helper3_coor = atom.residue.atoms[helper3_ix].position\n\n helper1_coor = atom.residue.atoms[helper1_ix].position\n helper2_coor = atom.residue.atoms[helper2_ix].position\n\n # Build Hs and store them in a list of numpy 1D-arrays Hs_coor.\n # The \"s\" in Hs_coor means there can be more than 1 H:\n # For CH2, Hs_coor will contain: [H1_coor, H2_coor].\n # For CH3, Hs_coor will contain: [H1_coor, H2_coor, H3_coor].\n # For CH, Hs_coor will contain: [H1_coor].\n # For CHdoublebond, Hs_coor will contain: [H1_coor].\n Hs_coor = buildHs_on_1C(atom.position, typeofH2build,\n helper1_coor, helper2_coor, helper3_coor)\n\n # Loop over Hs_coor (H_coor is a 1D-array with the 3 coors of 1 H).\n for i, H_coor in enumerate(Hs_coor):\n # Retrieve name of newly built H.\n Hname = dic_Cname2Hnames[atom.name][i]\n # Add them to newrows.\n newrows.append([new_atom_num, Hname, resname, resnum]\n + list(H_coor))\n new_atom_num += 1\n\n # Create a dataframe to store the mlc with added hydrogens.\n new_df_atoms = pd.DataFrame(newrows, columns=[\"atnum\", \"atname\",\n \"resname\", \"resnum\",\n \"x\", \"y\", \"z\"])\n return new_df_atoms\n\n\n###\n### The next function build_all_Hs_calc_OP())\n### build new H, calculate the order parameter and write the new traj with Hs\n### to an output file (e.g. .xtc, etc).\n### Note: it is slow, it shouldn't be used if the user doesn't want to\n### write the trajectory. Instead, fast_build_all_Hs() should be used.\n###\ndef build_all_Hs_calc_OP(universe_woH, ts, dic_lipid, dic_Cname2Hnames, universe_wH, dic_OP,\n dic_corresp_numres_index_dic_OP, dic_lipid_indexes):\n \"\"\"Build all hydrogens and calculates order parameters for one frame.\n\n This function loop overs *all* atoms of the universe_woH in order to update\n the atom coordinates and the new H built into the universe_wH.\n\n The function also calculates the order parameter.\n The coordinates of the universe *with* H are updated in place.\n The order parameter is also added in place (within dic_OP dictionary).\n\n Notes\n -----\n This function is slow, thus it shall be used when one wants\n to create a trajectory with H (such as .xtc or whatever format).\n\n This function assumes all possible C-H pairs are present in the .def\n file (with -d option). They are needed since we want to build an xtc with\n the whole system. If one is interested in calculating only a subset of OPs,\n please use the function fast_build_all_Hs_calc_OP() instead.\n\n Parameters\n ----------\n universe_woH : MDAnalysis universe instance\n This is the universe *without* hydrogen.\n ts : Timestep instance\n the current timestep with the coordinates\n dic_lipid : dictionary\n Comes from dic_lipids.py. Contains carbon names and helper names needed\n for reconstructing hydrogens.\n dic_Cname2Hnames : dictionary\n This dict gives the correspondance Cname -> Hname. It is a dict of\n tuples. If there is more than 1 H for a given C, they need to be\n *ordered* like in the PDB. e.g. for CHARMM POPC :\n {'C13': ('H13A', 'H13B', 'H13C'), ..., 'C33': ('H3X', 'H3Y'),\n ..., 'C216': ('H16R', 'H16S'), ...}\n universe_wH : MDAnalysis universe instance (optional)\n This is the universe *with* hydrogens.\n dic_OP : ordered dictionary\n Each key of this dict is a couple carbon/H, and at the beginning it\n contains an empty list, e.g.\n OrderedDict([ ('C1', 'H11): [], ('C1', 'H12'): [], ... ])\n See function init_dic_OP() below to see how it is organized.\n dic_corresp_numres_index_dic_OP : dictionary\n This dict should contain the correspondance between the numres and\n the corresponding index in dic_OP. For example {..., 15: 14, ...} means\n the residue numbered 15 in the PDB has an index of 14 in dic_OP.\n \"\"\"\n # We will need the index in the numpy array for updating coordinates\n # in the universe with H.\n row_index_coor_array = 0\n resid = -9999\n\n # Loop over all atoms in the universe without H.\n for atom in universe_woH.atoms:\n # Update the position of the current atom in the universe with H.\n universe_wH.coord.positions[row_index_coor_array, :] = atom.position\n row_index_coor_array += 1\n\n # Build new H(s)?\n if (atom.name in dic_lipid and atom.residue.resname == dic_lipid[\"resname\"]):\n\n # Retrieve the index of the first atom in the current residue\n # Test to avoid refreshing it at every step of the loop\n if resid != atom.residue.resid:\n resid = atom.residue.resid\n ix_first_atom_res = atom.residue.atoms[0].ix\n\n # Retrieve helpers coordinates\n if len(dic_lipid_indexes[atom.name]) == 6:\n typeofH2build, _, _, _, helper1_ix, helper2_ix = dic_lipid_indexes[atom.name]\n helper3_coor = None\n else:\n typeofH2build, _, _, _, _, helper1_ix, helper2_ix, helper3_ix = dic_lipid_indexes[atom.name]\n helper3_coor = ts[helper3_ix + ix_first_atom_res]\n\n # Faster to retrieve the coordinates from ts than from universe_woH.atoms.positions\n helper1_coor = ts[helper1_ix + ix_first_atom_res]\n helper2_coor = ts[helper2_ix + ix_first_atom_res]\n\n # Build Hs and store them in a list of numpy 1D-arrays Hs_coor.\n # The \"s\" in Hs_coor means there can be more than 1 H:\n # For CH2, Hs_coor will contain: [H1_coor, H2_coor].\n # For CH3, Hs_coor will contain: [H1_coor, H2_coor, H3_coor].\n # For CH, Hs_coor will contain: [H1_coor].\n # For CHdoublebond, Hs_coor will contain: [H1_coor].\n Hs_coor = buildHs_on_1C(atom.position, typeofH2build,\n helper1_coor, helper2_coor, helper3_coor)\n\n # Loop over Hs_coor (H_coor is a 1D-array with the 3 coors of 1 H).\n for i, H_coor in enumerate(Hs_coor):\n # Retrieve name of newly built H.\n Hname = dic_Cname2Hnames[atom.name][i]\n ####\n #### We calculate here the order param on the fly :-D !\n ####\n if (atom.name, Hname) in dic_OP:\n op = geo.calc_OP(atom.position, H_coor)\n # We should get here the index of the residue in dic_OP.\n # For that we can use dic_corresp_numres_index_dic_OP\n # (key: resnum in pdb, value: index residue in dic_OP).\n lipid_ix = dic_corresp_numres_index_dic_OP[atom.resid]\n # OLD way: dic_OP[(atom.name, Hname)].append(op)\n if (atom.name, Hname) in dic_OP:\n dic_OP[(atom.name, Hname)][lipid_ix].append(op)\n if DEBUG:\n print(atom.name, H_coor, \"OP:\", op)\n\n # Update the position of the current H in the universe with H.\n universe_wH.coord.positions[row_index_coor_array, :] = H_coor\n row_index_coor_array += 1\n\n if dic_OP and DEBUG:\n print()\n print()\n if dic_OP and DEBUG:\n print(\"Final dic_OP:\", dic_OP)\n print()\n\n\n###\n### The next 3 functions (get_indexes(), make_dic_lipids_with_indexes()\n### and fast_build_all_Hs_calc_OP()) should be used when the\n### user doesn't want an output trajectory.\n### By using fast indexing to individual Catoms and helpers, they\n### are much faster.\n###\ndef get_indexes(atom, dic_lipid):\n \"\"\"Return the index of helpers for a given carbon.\n\n Parameters\n ----------\n atom : MDAnalysis Atom instance\n This is an Atom instance of a carbon on which we want to build Hs.\n dic_lipid : dictionary\n Comes from dic_lipids.py. Contains carbon names and helper names needed\n for reconstructing hydrogens.\n\n Returns\n -------\n tuple of 2 or 3 int\n The tuple contains the index of the 2 (or 3) helpers for the atom that\n was passed as argument. (e.g. for atom C37 with index 99, the function\n returns a tuple containing 98 (index of C36 = helper 1) and 100 (index\n of C38=helper2).\n \"\"\"\n # Get nb of H to build and helper names (we can have 2 or 3 helpers).\n if len(dic_lipid[atom.name]) == 3:\n typeofH2build, helper1_name, helper2_name = dic_lipid[atom.name]\n else:\n typeofH2build, helper1_name, helper2_name, helper3_name = dic_lipid[atom.name]\n # Get helper coordinates using atom, which an instance from Atom class.\n # atom.residue.atoms is a list of atoms we can select with\n # method .select_atoms().\n # To avoid too long line, we shorten its name to `sel`.\n sel = atom.residue.atoms.select_atoms\n helper1_ix = sel(\"name {}\".format(helper1_name))[0].ix\n helper2_ix = sel(\"name {}\".format(helper2_name))[0].ix\n if typeofH2build == \"CH\":\n # If we reconstruct a single H, we have a 3rd helper.\n helper3_ix = sel(\"name {0}\".format(helper3_name))[0].ix\n return (helper1_ix, helper2_ix, helper3_ix)\n else:\n return (helper1_ix, helper2_ix)\n\n\ndef make_dic_lipids_with_indexes(universe_woH, dic_lipid, dic_OP):\n \"\"\"Expand dic_lipid and adds the index of each atom and helper.\n\n IMPORTANT: the index of each atom/helper is given with respect to the\n first atom in that residue.\n For example, if we have a POPC where C1 is the first atom, and C50 the\n last one, we want in the end:\n {'C1': ('CH3', 'N4', 'C5', 0, 3, 4), ...,\n 'C50': ('CH3', 'C49', 'C48', 49, 48, 47)}\n Where the 3 last int are the index (ix) of the atom, helper1, helper2\n (possibly helper3) with respect to the first atom.\n Thus for C1 : 0 is index of C1, N4 is 3 atoms away from C1 and C5 is 4\n atoms away from C1.\n For C50: C50 is 49 atoms away from C1, C49 is 48 atoms away from C1,\n C48 is 47 atoms away from C1.\n\n Parameters\n ----------\n universe_woH : MDAnalysis Universe instance\n The universe without hydrogens.\n dic_lipid : dictionary\n Comes from dic_lipids.py. Contains carbon names and helper names needed\n for reconstructing hydrogens.\n dic_OP : ordered dictionary\n Each key of this dict is a couple carbon/H, and at the beginning it\n contains an empty list, e.g.\n OrderedDict([ ('C1', 'H11): [], ('C1', 'H12'): [], ... ])\n See function init_dic_OP() below to see how it is organized.\n\n Returns\n -------\n dictionary\n The returned dictionary as described above in this docstring.\n \"\"\"\n # Get lipid name.\n resname = dic_lipid[\"resname\"]\n # Get resnum of the 1st lipid encountered in the system whose name\n # is `resname`.\n selection = \"resname {}\".format(resname)\n first_lipid_residue = universe_woH.select_atoms(selection).residues[0]\n resnum_1st_lipid = first_lipid_residue.resnum\n # Get name of 1st atom of that lipid.\n first_atom_name = first_lipid_residue.atoms[0].name\n # Get index of this atom.\n first_atom_ix = first_lipid_residue.atoms[0].ix\n if DEBUG:\n print(\"resname: {}, first encountered residue: {},\\n\"\n \"resnum_1st_lipid: {}, first_atom_name: {}, first_atom_ix: {}\"\n .format(resname, first_lipid_residue, resnum_1st_lipid,\n first_atom_name, first_atom_ix))\n print()\n # Keep only carbons on which we want to build Hs.\n carbons2keep = []\n for Cname, _ in dic_OP:\n if Cname not in carbons2keep:\n carbons2keep.append(Cname)\n dic_lipids_with_indexes = {}\n for Cname in dic_lipid.keys():\n if Cname in carbons2keep:\n dic_lipids_with_indexes[Cname] = dic_lipid[Cname].copy()\n # Now add the helper indexes.\n # The reasonning is over one residue (e.g. POPC). We want to add (to the\n # dict) the index (ix) of each helper of a given carbon with respect to\n # the index of the first atom in that lipid residue.\n # Loop over each carbon on which we want to reconstruct Hs.\n for Cname in dic_lipids_with_indexes:\n # Loop over residues for a given Cname atom.\n selection = \"resid {} and name {}\".format(resnum_1st_lipid, Cname)\n for Catom in universe_woH.select_atoms(selection):\n # Get the (absolute) index of helpers.\n if dic_lipid[Cname][0] == \"CH\":\n helper1_ix, helper2_ix, helper3_ix = get_indexes(Catom, dic_lipid)\n else:\n helper1_ix, helper2_ix = get_indexes(Catom, dic_lipid)\n # If the first lipid doesn't start at residue 1 we must\n # substract the index of the first atom of that lipid.\n Catom_ix_inres = Catom.ix - first_atom_ix\n helper1_ix_inres = helper1_ix - first_atom_ix\n helper2_ix_inres = helper2_ix - first_atom_ix\n # Then add these indexes to dic_lipids_with_indexes.\n if dic_lipid[Cname][0] == \"CH\":\n helper3_ix_inres = helper3_ix - first_atom_ix\n tmp_tuple = (Catom_ix_inres, helper1_ix_inres,\n helper2_ix_inres, helper3_ix_inres)\n dic_lipids_with_indexes[Cname] += tmp_tuple\n else:\n tmp_tuple = (Catom_ix_inres, helper1_ix_inres,\n helper2_ix_inres)\n dic_lipids_with_indexes[Cname] += tmp_tuple\n if DEBUG:\n print(\"Everything is based on the following dic_lipids_with_indexes\\n{}\"\n .format(dic_lipids_with_indexes))\n print()\n return dic_lipids_with_indexes\n\n\ndef fast_build_all_Hs_calc_OP(universe_woH, begin, end,\n dic_OP, dic_lipid, dic_Cname2Hnames):\n \"\"\"Build Hs and calc OP using fast indexing.\n\n This function uses fast indexing to carbon atoms and helper atoms. It\n should be used when the user doesn't want any output traj with hydrogens.\n\n Parameters\n ----------\n universe_woH : MDAnalysis universe instance\n This is the universe *without* hydrogen.\n begin: int\n index of the first frame of trajectory\n end: int\n index of the last frame of trajectory\n dic_OP : ordered dictionary\n Each key of this dict is a couple carbon/H, and at the beginning it\n contains an empty list, e.g.\n OrderedDict([ ('C1', 'H11): [], ('C1', 'H12'): [], ... ])\n See function init_dic_OP() below to see how it is organized.\n dic_lipid : dictionary\n Comes from dic_lipids.py. Contains carbon names and helper names needed\n for reconstructing hydrogens.\n dic_Cname2Hnames : dictionary\n This dict gives the correspondance Cname -> Hname. It is a dict of\n tuples. If there is more than 1 H for a given C, they need to be\n *ordered* like in the PDB. e.g. for CHARMM POPC :\n {'C13': ('H13A', 'H13B', 'H13C'), ..., 'C33': ('H3X', 'H3Y'),\n ..., 'C216': ('H16R', 'H16S'), ...}\n\n Returns\n -------\n None\n This function returns nothing, dic_OP is changed *in place*.\n \"\"\"\n ###\n ### 1) Expand dic_lipids and store there helpers' index.\n ###\n ### We want {'C1': ('CH3', 'N4', 'C5', 0, 3, 4), ...,\n ### 'C50': ('CH3', 'C49', 'C48', 49, 48, 47)}\n ### Where the 3 last int are the index (ix) of the atom, helper1, helper2\n ### (possibly helper3) with respect to the first atom\n ### (e.g. 0 is index of C1, N4 is 3 atoms away from C1, etc).\n ###\n dic_lipids_with_indexes = make_dic_lipids_with_indexes(universe_woH,\n dic_lipid, dic_OP)\n # Get lipid name.\n resname = dic_lipid[\"resname\"]\n # Select first residue of that lipid.\n selection = \"resname {}\".format(resname)\n first_lipid_residue = universe_woH.select_atoms(selection).residues[0]\n # Get name of 1st atom of that lipid.\n first_atom_name = first_lipid_residue.atoms[0].name\n ###\n ### 2) Now loop over the traj, residues and Catoms.\n ### At each iteration build Hs and calc OP.\n ###\n # Loop over frames (ts is a Timestep instance).\n for ts in universe_woH.trajectory[begin:end]:\n print(\"Dealing with frame {} at {} ps.\"\n .format(ts.frame, universe_woH.trajectory.time))\n if DEBUG:\n print(\"Looping now over residues...\")\n print()\n # Loop over the 1st atom of each lipid, which is equiv to loop *over\n # residues* (first_lipid_atom is an Atom instance, lipid_ix is an int\n # that will be used for storing OPs in dic_OP).\n selection = \"resname {} and name {}\".format(resname, first_atom_name)\n for lipid_ix, first_lipid_atom in enumerate(universe_woH.select_atoms(selection)):\n if DEBUG:\n print(\"Dealing with Cname\", first_lipid_atom)\n print(\"which is part of residue\", first_lipid_atom.residue)\n print(\"Now looping over atoms of this residue\")\n print()\n # Get the index of this first atom.\n ix_first_atom_res = first_lipid_atom.ix\n # Now loop over each carbon on which we want to build Hs\n # (Cname is a string).\n for Cname in dic_lipids_with_indexes:\n # Get Cname and helpers coords.\n if len(dic_lipids_with_indexes[Cname]) == 6:\n typeofH2build, _, _, Cname_ix, helper1_ix, helper2_ix = dic_lipids_with_indexes[Cname]\n helper3_coor = None\n else:\n typeofH2build, _, _, _, Cname_ix, helper1_ix, helper2_ix, helper3_ix = dic_lipids_with_indexes[Cname]\n helper3_coor = ts[helper3_ix+ix_first_atom_res]\n helper1_coor = ts[helper1_ix+ix_first_atom_res]\n helper2_coor = ts[helper2_ix+ix_first_atom_res]\n Cname_position = ts[Cname_ix+ix_first_atom_res]\n if DEBUG:\n print(\"Dealing with Cname\", Cname)\n sel = first_lipid_atom.residue.atoms.select_atoms\n Cname_atom = sel(\"name {}\".format(Cname))[0]\n print(Cname_atom, Cname_atom.position)\n if len(dic_lipid[Cname]) == 3:\n _, helper1_name, helper2_name = dic_lipid[Cname]\n else:\n _, helper1_name, helper2_name, helper3_name = dic_lipid[Cname]\n helper1_atom = sel(\"name {}\".format(helper1_name))[0]\n print(\"helper1\", helper1_atom, helper1_atom.position)\n helper2_atom = sel(\"name {}\".format(helper2_name))[0]\n print(\"helper2\", helper2_atom, helper2_atom.position)\n if len(dic_lipid[Cname]) == 4:\n helper3_atom = sel(\"name {}\".format(helper3_name))[0]\n print(\"helper3\", helper3_atom, helper3_atom.position)\n # Get newly built H(s) on that atom.\n Hs_coor = buildHs_on_1C(Cname_position, typeofH2build,\n helper1_coor, helper2_coor, helper3_coor)\n if DEBUG:\n print(\"Cname_position with fast indexing:\", Cname_position)\n print(\"helper1_position with fast indexing:\",\n ts[helper1_ix+ix_first_atom_res])\n print(\"helper2_position with fast indexing:\",\n ts[helper2_ix+ix_first_atom_res])\n if len(dic_lipid[Cname]) == 4:\n print(\"helper3_position with fast indexing:\",\n ts[helper3_ix+ix_first_atom_res])\n # To retrieve Hname, we need a counter.\n counter4Hname = 0\n # Loop over all Hs.\n for H_coor in Hs_coor:\n # Retrieve name of newly built H.\n Hname = dic_Cname2Hnames[Cname][counter4Hname]\n # Calc and store OP for that couple C-H.\n Cname_position = ts[Cname_ix+ix_first_atom_res]\n op = geo.calc_OP(Cname_position, H_coor)\n # Old way: dic_OP[(Cname, Hname)].append(op)\n if (Cname, Hname) in dic_OP:\n dic_OP[(Cname, Hname)][lipid_ix].append(op)\n if DEBUG:\n print(Hname, H_coor, \"OP:\", op)\n # Increment counter4Hname for retrieving next H.\n counter4Hname += 1\n if DEBUG:\n print()\n print()\n if DEBUG:\n print(\"Final dic_OP:\", dic_OP)\n print()\n\n\ndef gen_coordinates_calcOP(basename, universe_woH, dic_OP, dic_lipid,\n dic_Cname2Hnames, dic_corresp_numres_index_dic_OP,\n begin, end, traj_file):\n \"\"\"Generate coordinates files (pdb and/or xtc) with computed hydrogens\n and compute the order parameter.\n\n If `traj_file` is set to False, only a pdb file will be written.\n This depends whether or not the user supplied a trajectory file\n in the first place.\n\n Parameters\n ----------\n basename : str\n basename for the output coordinate file(s).\n universe_woH : MDAnalysis universe instance\n This is the universe *without* hydrogen.\n dic_OP : ordered dictionary\n Each key of this dict is a couple carbon/H, and at the beginning it\n contains an empty list, e.g.\n OrderedDict([ ('C1', 'H11): [], ('C1', 'H12'): [], ... ])\n See function init_dic_OP() below to see how it is organized.\n dic_lipid : dictionary\n Comes from dic_lipids.py. Contains carbon names and helper names needed\n for reconstructing hydrogens.\n dic_Cname2Hnames : dictionary\n This dict gives the correspondance Cname -> Hname. It is a dict of\n tuples. If there is more than 1 H for a given C, they need to be\n *ordered* like in the PDB. e.g. for CHARMM POPC :\n {'C13': ('H13A', 'H13B', 'H13C'), ..., 'C33': ('H3X', 'H3Y'),\n ..., 'C216': ('H16R', 'H16S'), ...}\n dic_corresp_numres_index_dic_OP : dictionary\n This dict should contain the correspondance between the numres and\n the corresponding index in dic_OP.\n begin: int\n index of the first frame of trajectory\n end: int\n index of the last frame of trajectory\n traj_file : bool\n a trajectory output file has to be generated?\n \"\"\"\n dic_lipids_with_indexes = make_dic_lipids_with_indexes(universe_woH, dic_lipid,\n dic_OP)\n\n # Create filenames.\n pdbout_filename = basename + \".pdb\"\n # Build a new universe with H.\n # Build a pandas df with H.\n new_df_atoms = build_system_hydrogens(universe_woH, dic_lipid, dic_Cname2Hnames,\n dic_lipids_with_indexes)\n # Create a new universe with H using that df.\n print(\"Writing new pdb with hydrogens.\")\n # Write pdb with H to disk.\n with open(pdbout_filename, \"w\") as f:\n f.write(writers.pandasdf2pdb(new_df_atoms))\n # Then create the universe with H from that pdb.\n universe_wH = mda.Universe(pdbout_filename)\n\n #Do we need to generate a trajectory file ?\n if traj_file:\n xtcout_filename = basename + \".xtc\"\n # Create an xtc writer.\n print(\"Writing trajectory with hydrogens in xtc file.\")\n newxtc = XTC.XTCWriter(xtcout_filename, len(universe_wH.atoms))\n # Write 1st frame.\n newxtc.write(universe_wH)\n\n # 4) Loop over all frames of the traj *without* H, build Hs and\n # calc OP (ts is a Timestep instance).\n for ts in universe_woH.trajectory[begin:end]:\n print(\"Dealing with frame {} at {} ps.\"\n .format(ts.frame, universe_woH.trajectory.time))\n # Build H and update their positions in the universe *with* H (in place).\n # Calculate OPs on the fly while building Hs (dic_OP changed in place).\n build_all_Hs_calc_OP(universe_woH, ts, dic_lipid, dic_Cname2Hnames,\n universe_wH, dic_OP, dic_corresp_numres_index_dic_OP,\n dic_lipids_with_indexes)\n # Write new frame to xtc.\n newxtc.write(universe_wH)\n # Close xtc.\n newxtc.close()\n # if not, just compute OP in the fast way.\n else:\n fast_build_all_Hs_calc_OP(universe_woH, begin, end, dic_OP, dic_lipid, dic_Cname2Hnames)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
samuelstanton/lambo | [
"7b67684b884f75f7007501978c5299514d0efb75",
"7b67684b884f75f7007501978c5299514d0efb75"
] | [
"lambo/models/mlm.py",
"lambo/optimizers/mutation.py"
] | [
"import math\n\nimport numpy as np\nimport torch\nimport torchvision\nimport wandb\n\nfrom torch.nn import functional as F\nfrom torch import LongTensor\n\nfrom lambo import transforms as gfp_transforms, dataset as gfp_dataset\nfrom lambo.models.shared_elements import check_early_stopping\nfrom lambo.utils import str_to_tokens\n\n\ndef sample_tokens(base_tokens, logit_batch, enc_tokenizer, replacement=False, temp=1.):\n\tlogit_batch /= temp\n\t# don't sample special tokens\n\tnon_viable_idxs = np.array(enc_tokenizer.special_idxs)[None, None, :]\n\tnp.put_along_axis(logit_batch, non_viable_idxs, -1e10, axis=-1)\n\n\tif not replacement and base_tokens is not None:\n\t\t# don't sample the original tokens\n\t\tbase_tokens = base_tokens.numpy().astype(int)[..., None]\n\t\tnp.put_along_axis(logit_batch, base_tokens, -1e10, axis=-1)\n\n\t# sample tokens\n\ttoken_samples = torch.distributions.Categorical(logits=logit_batch).sample()\n\n\t# calculate entropy\n\tentropy = -(\n\t\t\tF.softmax(logit_batch, dim=-1) * F.log_softmax(logit_batch, dim=-1)\n\t).sum(-1)\n\n\treturn token_samples, entropy\n\n\ndef sample_mask(\n\t\ttoken_batch: LongTensor,\n\t\ttokenizer,\n\t\tmask_ratio: float = 0.125,\n\t\tmask_size=None\n):\n\t\"\"\"\n\tArgs:\n\t\ttoken_batch: (batch_size, num_tokens)\n\t\ttokenizer: only necessary to avoid masking special tokens\n\t\tmask_ratio: proportion of tokens to mask\n\t\tmask_size: (optional) override mask_ratio with a specific mask size\n\tReturns:\n\t\tmask_idxs: (batch_size, mask_size) np.ndarray of position indexes to mask\n\t\"\"\"\n\tif mask_size is None:\n\t\tmask_size = math.ceil(token_batch.shape[-1] * mask_ratio)\n\n\tspecial_idxs = torch.tensor(tokenizer.special_idxs).view(-1, 1, 1)\n\tis_non_special = token_batch.ne(special_idxs).prod(dim=0).float()\n\tmask_weights = is_non_special / is_non_special.sum(dim=-1, keepdims=True)\n\tmask_idxs = torch.multinomial(mask_weights, mask_size, replacement=False)\n\treturn mask_idxs.numpy()\n\n\ndef evaluate_windows(base_seqs, encoder, mask_size, replacement=True, encoder_obj='mlm'):\n\twindow_mask_idxs = {}\n\twindow_entropy = {}\n\twindow_features = {}\n\n\tfor idx, seq in enumerate(base_seqs):\n\t\twindow_mask_idxs[idx] = []\n\t\twindow_entropy[idx] = []\n\t\twindow_features[idx] = []\n\t\t# avoids evaluating windows corresponding to padding tokens\n\t\ttokens = str_to_tokens(np.array([seq]), encoder.tokenizer)\n\t\t# assert torch.all(tokens.ne(encoder.tokenizer.padding_idx)) # SELFIES no-op token may trigger\n\t\tmask_size = min(mask_size, tokens.shape[-1] - 2)\n\t\toffset = np.random.randint(1, mask_size + 1)\n\t\tfor mask_start in range(offset, tokens.shape[-1] - 1, mask_size):\n\t\t\tif mask_start + mask_size < tokens.shape[-1] - 1:\n\t\t\t\tmask_idxs = np.arange(mask_start, mask_start + mask_size).reshape(1, -1)\n\t\t\telse:\n\t\t\t\tmask_stop = tokens.shape[-1] - 1\n\t\t\t\tmask_idxs = np.arange(mask_stop - mask_size, mask_stop).reshape(1, -1)\n\n\t\t\twith torch.no_grad():\n\t\t\t\tmasked_inputs = tokens.clone().to(encoder.device)\n\t\t\t\tnp.put_along_axis(masked_inputs, mask_idxs, encoder.tokenizer.masking_idx, axis=1)\n\t\t\t\ttgt_tok_logits, tgt_mask = encoder.logits_from_tokens(masked_inputs)\n\t\t\t\tif encoder_obj == 'mlm':\n\t\t\t\t\t_, logit_entropy = sample_tokens(\n\t\t\t\t\t\ttokens, tgt_tok_logits, encoder.tokenizer, replacement\n\t\t\t\t\t)\n\t\t\t\t\tlogit_entropy = np.take_along_axis(logit_entropy, mask_idxs, axis=1)\n\t\t\t\telif encoder_obj == 'lanmt':\n\t\t\t\t\ttgt_tok_idxs, logit_entropy = encoder.sample_tgt_tok_idxs(\n\t\t\t\t\t\ttgt_tok_logits, tgt_mask, temp=1.\n\t\t\t\t\t)\n\t\t\t\telse:\n\t\t\t\t\traise ValueError\n\n\t\t\twindow_mask_idxs[idx].append(mask_idxs.copy())\n\t\t\twindow_entropy[idx].append(logit_entropy.mean().item())\n\n\treturn window_mask_idxs, window_entropy\n\n\ndef mlm_train_step(model, optimizer, token_batch, mask_ratio, loss_scale=1.):\n\toptimizer.zero_grad(set_to_none=True)\n\n\t# replace random tokens with mask token\n\tmask_idxs = sample_mask(token_batch, model.tokenizer, mask_ratio)\n\tmasked_token_batch = token_batch.clone().to(model.device)\n\tnp.put_along_axis(masked_token_batch, mask_idxs, model.tokenizer.masking_idx, axis=1)\n\n\t# get predicted logits for masked tokens\n\tlogits, _ = model.logits_from_tokens(masked_token_batch)\n\tvocab_size = logits.shape[-1]\n\tmasked_logits = np.take_along_axis(logits, mask_idxs[..., None], axis=1).view(-1, vocab_size)\n\n\t# use the ground-truth tokens as labels\n\tmasked_tokens = np.take_along_axis(token_batch, mask_idxs, axis=1)\n\tmasked_tokens = masked_tokens.view(-1).to(model.device)\n\n\tloss = loss_scale * F.cross_entropy(masked_logits, masked_tokens)\n\tloss.backward()\n\toptimizer.step()\n\n\treturn loss, masked_logits, masked_tokens\n\n\ndef mlm_train_epoch(model, optimizer, train_loader, mask_ratio):\n\tmetrics = dict(\n\t\ttrain_loss=0.,\n\t\ttrain_perplexity=0.,\n\t)\n\tmodel.train()\n\tfor minibatch in train_loader:\n\t\tif isinstance(minibatch, tuple):\n\t\t\ttoken_batch = minibatch[0]\n\t\telse:\n\t\t\tassert torch.is_tensor(minibatch)\n\t\t\ttoken_batch = minibatch\n\n\t\tloss, masked_logits, masked_tokens = mlm_train_step(model, optimizer, token_batch, mask_ratio)\n\n\t\t# logging\n\t\tlog_prob = F.log_softmax(masked_logits, dim=-1)\n\t\tlog_prob = np.take_along_axis(log_prob, masked_tokens.cpu().numpy()[..., None], axis=1)\n\t\tmetrics['train_perplexity'] += 2 ** (\n\t\t\t-(log_prob / math.log(2)).mean().detach()\n\t\t) / len(train_loader)\n\t\tmetrics['train_loss'] += loss.detach() / len(train_loader)\n\tmetrics = {key: val.item() for key, val in metrics.items()}\n\treturn metrics\n\n\ndef mlm_eval_epoch(model, eval_loader, mask_ratio, split):\n\tmetrics = dict(\n\t\tperplexity=0.,\n\t)\n\tmodel.eval()\n\tfor minibatch in eval_loader:\n\t\tif isinstance(minibatch, tuple):\n\t\t\ttoken_batch = minibatch[0]\n\t\telse:\n\t\t\tassert torch.is_tensor(minibatch)\n\t\t\ttoken_batch = minibatch\n\n\t\t# replace random tokens with mask token\n\t\tmask_idxs = sample_mask(token_batch, model.tokenizer, mask_ratio)\n\t\tmasked_token_batch = token_batch.clone().to(model.device)\n\t\tnp.put_along_axis(masked_token_batch, mask_idxs, model.tokenizer.masking_idx, axis=1)\n\n\t\t# get predicted logits for masked tokens\n\t\tlogits, _ = model.logits_from_tokens(masked_token_batch)\n\t\tvocab_size = logits.shape[-1]\n\t\tmasked_logits = np.take_along_axis(logits, mask_idxs[..., None], axis=1).view(-1, vocab_size)\n\n\t\t# use the ground-truth tokens as labels\n\t\tmasked_tokens = np.take_along_axis(token_batch, mask_idxs, axis=1)\n\t\tmasked_tokens = masked_tokens.view(-1).to(model.device)\n\n\t\t# logging\n\t\tlog_prob = F.log_softmax(masked_logits, dim=-1)\n\t\tlog_prob = np.take_along_axis(log_prob, masked_tokens.cpu().numpy()[..., None], axis=1)\n\t\tmetrics['perplexity'] += 2 ** (\n\t\t\t-(log_prob / math.log(2)).mean().detach()\n\t\t) / len(eval_loader)\n\n\tmetrics = {key: val.item() for key, val in metrics.items()}\n\tmetrics = {f'{split}_{key}': val for key, val in metrics.items()}\n\n\treturn metrics\n\n\ndef fit_masked_language_model(model, train_seqs, num_epochs, batch_size, lr, patience, mask_ratio, max_shift,\n\t\t\t\t\t\t\t weights=None, log_prefix=''):\n\n\t# random translation data augmentation, apply tokenizer\n\ttrain_transform = []\n\tif max_shift > 0:\n\t\ttrain_transform.append(gfp_transforms.SequenceTranslation(max_shift))\n\ttrain_transform.append(gfp_transforms.StringToLongTensor(model.tokenizer))\n\ttrain_transform = torchvision.transforms.Compose(train_transform)\n\n\t# make dataset, dataloader\n\ttrain_dataset = gfp_dataset.TransformTensorDataset([train_seqs], train_transform)\n\n\tif weights is None:\n\t\tloader_kwargs = dict(batch_size=batch_size, shuffle=True)\n\telse:\n\t\tsampler = torch.utils.data.WeightedRandomSampler(weights, batch_size, replacement=True)\n\t\tbatch_sampler = torch.utils.data.BatchSampler(sampler, batch_size=batch_size, drop_last=False)\n\t\tloader_kwargs = dict(batch_sampler=batch_sampler)\n\n\ttrain_loader = torch.utils.data.DataLoader(\n\t\ttrain_dataset, collate_fn=gfp_transforms.padding_collate_fn, **loader_kwargs\n\t)\n\n\toptimizer = torch.optim.Adam(model.param_groups(lr))\n\tlr_sched = torch.optim.lr_scheduler.ReduceLROnPlateau(\n\t\toptimizer, patience=math.ceil(patience / 2)\n\t)\n\n\trecords = []\n\tbest_score, best_epoch, best_weights = None, 0, None\n\tmodel.requires_grad_(True)\n\tfor epoch in range(num_epochs):\n\t\tmetrics = {}\n\t\tmetrics.update(\n\t\t\tmlm_train_epoch(model, optimizer, train_loader, mask_ratio)\n\t\t)\n\t\t# use avg. train loss as convergence crit.\n\t\tlr_sched.step(metrics['train_loss'])\n\t\tbest_score, best_epoch, best_weights, stop = check_early_stopping(\n\t\t\tmodel,\n\t\t\tbest_score,\n\t\t\tbest_epoch,\n\t\t\tbest_weights,\n\t\t\tmetrics['train_loss'],\n\t\t\tepoch + 1,\n\t\t\tpatience,\n\t\t\tsave_weights=True,\n\t\t\t)\n\n\t\t# logging\n\t\tmetrics.update(dict(best_score=best_score, best_epoch=best_epoch))\n\t\tif len(log_prefix) > 0:\n\t\t\tmetrics = {'/'.join((log_prefix, key)): val for key, val in metrics.items()}\n\t\ttry:\n\t\t\twandb.log(metrics)\n\t\texcept:\n\t\t\tpass\n\t\trecords.append(metrics)\n\n\t\tif stop:\n\t\t\tbreak\n\n\tmodel.load_state_dict(best_weights)\n\tmodel.requires_grad_(False)\n\n\treturn records\n",
"import numpy as np\n\nimport torch\n\nfrom pymoo.factory import get_mutation\nfrom pymoo.core.mutation import Mutation\n\nfrom lambo import utils\nfrom lambo.tasks.chem.logp import prop_func\nfrom lambo.models.mlm import sample_tokens\n\n\ndef get_mlm_mutation(mlm_obj, problem, cand_idx, res_idx):\n seqs = [problem.candidate_pool[i].mutant_residue_seq for i in cand_idx]\n base_tok_idxs = utils.str_to_tokens(seqs, mlm_obj.tokenizer)\n\n mask_idxs = res_idx.reshape(-1, 1)\n src_tok_idxs = base_tok_idxs.clone().to(mlm_obj.device)\n np.put_along_axis(src_tok_idxs, mask_idxs, mlm_obj.tokenizer.padding_idx, axis=1)\n\n with torch.no_grad():\n tgt_tok_logits, _ = mlm_obj.logits_from_tokens(src_tok_idxs)\n new_tok_idxs, _ = sample_tokens(\n base_tok_idxs, tgt_tok_logits, mlm_obj.tokenizer, replacement=False\n )\n new_tok_idxs = np.take_along_axis(new_tok_idxs, mask_idxs, axis=1).reshape(-1)\n new_toks = [mlm_obj.tokenizer.convert_id_to_token(t_idx) for t_idx in new_tok_idxs]\n sampling_vocab_idxs = np.array([\n mlm_obj.tokenizer.sampling_vocab.index(tok) for tok in new_toks\n ])\n return sampling_vocab_idxs\n\n\n#following https://peerj.com/articles/pchem-11.pdf\ndef safe_vocab_mutation(tokenizer, problem, cand_idx, res_idx):\n muts = []\n seqs = [problem.candidate_pool[i].mutant_residue_seq for i in cand_idx]\n for seq, idx in zip(seqs, res_idx):\n tokens = tokenizer.decode(tokenizer.encode(seq)).split(\" \")[1:-1]\n safe_mut = None\n for i in range(50):\n mut_idx = np.random.randint(0, len(tokenizer.sampling_vocab))\n mut_res = tokenizer.sampling_vocab[mut_idx]\n mut_seq = \"\".join(tokens[:idx] + [mut_res] + tokens[(idx + 1):])\n if prop_func(mut_seq) > -100:\n safe_mut = mut_idx\n break\n\n if safe_mut is None:\n muts.append(np.random.randint(0, len(tokenizer.sampling_vocab)))\n else:\n muts.append(safe_mut)\n\n return np.array(muts)\n\n\nclass UniformMutation(Mutation):\n def __init__(self, tokenizer=None, mlm_obj=None, safe_mut=False):\n self.tokenizer = tokenizer\n self.mlm_obj = mlm_obj\n self.safe_mut = safe_mut\n\n def _do(self, problem, x, **kwargs):\n query_batches = problem.x_to_query_batches(x)\n batch_shape, num_vars = query_batches.shape[:-1], query_batches.shape[-1]\n flat_queries = query_batches.reshape(-1, num_vars)\n num_samples = flat_queries.shape[0]\n\n x0 = flat_queries[..., 0]\n seqs = [problem.candidate_pool[i].mutant_residue_seq for i in x0]\n\n #NEXT LINE WON'T WORK UNLESS WE CHANGE CANDIDATE POOL TO NON-EMPTY IN TASK INIT\n x1 = np.random.randint(problem.xl[1], problem.xu[1], num_samples)\n x1 = np.array([idx % len(seq) for idx, seq in zip(x1, seqs)])\n\n if self.mlm_obj is None and not self.safe_mut:\n x2 = np.random.randint(0, len(self.tokenizer.sampling_vocab), num_samples)\n elif self.safe_mut:\n x2 = safe_vocab_mutation(self.tokenizer, problem, x0, x1)\n else:\n x2 = get_mlm_mutation(self.mlm_obj, problem, x0, x1)\n\n x3 = np.random.randint(0, len(problem.op_types), num_samples)\n\n new_queries = np.stack([x0, x1, x2, x3], axis=-1).reshape(*batch_shape, -1)\n new_x = problem.query_batches_to_x(new_queries)\n\n return new_x\n\n\nclass LocalMutation(Mutation):\n def __init__(self, eta, prob, tokenizer=None, mlm_obj=None, safe_mut=False):\n super().__init__()\n self.poly_mutation = get_mutation('int_pm', eta=eta, prob=prob)\n self.tokenizer = tokenizer\n self.mlm_obj = mlm_obj\n self.safe_mut = safe_mut\n\n def _do(self, problem, x, **kwargs):\n query_batches = problem.x_to_query_batches(x)\n batch_shape, num_vars = query_batches.shape[:-1], query_batches.shape[-1]\n flat_queries = query_batches.reshape(-1, num_vars)\n num_samples = flat_queries.shape[0]\n\n x0 = flat_queries[..., 0]\n seqs = [problem.candidate_pool[i].mutant_residue_seq for i in x0]\n\n mut_x = self.poly_mutation._do(problem, x)\n mut_x = problem.x_to_query_batches(mut_x).reshape(-1, num_vars)\n x1 = mut_x[..., 1]\n\n # x1 = np.array([idx % len(seq) for idx, seq in zip(x1, seqs)])\n\n for i, idx in enumerate(x0):\n num_tokens = len(self.tokenizer.encode(problem.candidate_pool[idx].mutant_residue_seq)) - 2\n x1[i] = min(num_tokens - 1, x1[i])\n # TODO always work with token indices?\n # num_tokens = len(self.tokenizer.encode(cand_seq))\n # x1[i] = min(num_tokens - 2, x1[i]) # skip end token\n # x1[i] = max(1, x1[i]) # skip start token\n\n if self.mlm_obj is None and not self.safe_mut:\n x2 = np.random.randint(0, len(self.tokenizer.sampling_vocab), num_samples)\n elif self.safe_mut:\n x2 = safe_vocab_mutation(self.tokenizer, problem, x0, x1)\n else:\n x2 = get_mlm_mutation(self.mlm_obj, problem, x0, x1)\n\n x3 = np.random.randint(0, len(problem.op_types), num_samples)\n\n new_queries = np.stack([x0, x1, x2, x3], axis=-1).reshape(*batch_shape, -1)\n new_x = problem.query_batches_to_x(new_queries)\n\n return new_x\n"
] | [
[
"numpy.take_along_axis",
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"numpy.arange",
"numpy.put_along_axis",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"torch.utils.data.WeightedRandomSampler",
"torch.multinomial",
"torch.tensor",
"torch.is_tensor",
"torch.distributions.Categorical",
"torch.no_grad",
"numpy.array",
"torch.utils.data.BatchSampler",
"numpy.random.randint"
],
[
"numpy.take_along_axis",
"numpy.put_along_axis",
"numpy.stack",
"torch.no_grad",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mmr12/DeepLearning18 | [
"3e683c570ea8f5e224767a41a0e152267cfd08e7"
] | [
"data_loader/baseline_generator.py"
] | [
"import numpy as np\nimport os\nimport sys\n# To import from sibling directory ../utils\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + \"/..\")\nfrom data_loader.load_utils import load_obj\nfrom data_loader.load_utils import try_to_load_as_pickled_object\nfrom sklearn.model_selection import train_test_split\nfrom data_loader.process_files import process_all_files\n\nclass DataGenerator:\n def __init__(self, config):\n self.config = config\n\n # load data here\n #input = try_to_load_as_pickled_object('./data/patches.pkl')\n #y = try_to_load_as_pickled_object('./data/labels_patches.pkl')\n print(\"\\nloading the data\")\n input, y = process_all_files([0,1000,2000,3000,4000,5000,6000,7000,8000,9000])\n print(\"\\ndata loaded\")\n\n self.input, self.input_dev, self.y, self.y_dev = train_test_split(input,\n y,\n test_size=self.config.val_split)\n\n def next_batch(self, batch_size):\n idx = np.random.choice(len(self.input), batch_size)\n yield self.input[idx], self.y[idx]\n\n def next_batch_dev(self, batch_size):\n idx = np.random.choice(len(self.input_dev), batch_size)\n yield self.input_dev[idx], self.y_dev[idx]\n"
] | [
[
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Data-drone/scaling_deep_learning | [
"476346179c4575ad6aeecc8c6a1b427d00abde5a"
] | [
"tensorflow/SAM Model - Petastorm.py"
] | [
"# Databricks notebook source\n# MAGIC %md\n# MAGIC \n# MAGIC ## Training and packaging a Tensorflow 2.x model with Model Hub\n# MAGIC \n# MAGIC - based on: https://www.tensorflow.org/text/tutorials/classify_text_with_bert\n# MAGIC - Sentiment Analysis Model\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC #### Extra libraries not in DB ML Image\n# MAGIC - tensorflow-text\n# MAGIC - tf-models-official\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### Load Libs\n\n# COMMAND ----------\n\nimport os\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport tensorflow_text as text\nfrom official.nlp import optimization # to create AdamW optimizer\n\nimport matplotlib.pyplot as plt\n\ntf.get_logger().setLevel('DEBUG')\n\n# COMMAND ----------\n\n# Extra Dirs setup for DB\nlog_dir = '/dbfs/Users/[email protected]/tf_log_dirs'\ndataset_dir = '/dbfs/user/brian.law/data/'\n\n## Extra Cache Path for Petastorm\ncache_pathing = 'user/brian.law/pt_cache_1/'\nlocal_cache_path = os.path.join('/dbfs', cache_pathing)\ncache_dir = 'file://' + local_cache_path\n\n# COMMAND ----------\n\n## Clean up and create cache\ndbutils.fs.rm(local_cache_path, True)\ndbutils.fs.mkdirs(local_cache_path)\n\n# COMMAND ----------\n\n# MAGIC %run ./utils/aclimdb_utils\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### Tensorboard Setup\n\n# COMMAND ----------\n\n# MAGIC %load_ext tensorboard\n# MAGIC experiment_log_dir = log_dir\n\n# COMMAND ----------\n\n# MAGIC %tensorboard --logdir $experiment_log_dir\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ## Sentiment Analysis\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### Dataset Loading\n\n# COMMAND ----------\n\n# MAGIC %run ./dataloaders/aclimdb_dataloaders\n\n# COMMAND ----------\n\ntrain_ds, val_ds, test_ds, size_train, size_val, size_test = get_petastorm_dataset(cache_dir=cache_dir, partitions=4)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### Loading Pretrained Model\n# MAGIC \n# MAGIC - Can choose any of the models in map_name_to_handle as long as it first in mem (this is single GPU example)\n# MAGIC - Each model has an associated preprocess\n\n# COMMAND ----------\n\n# MAGIC %run ./models/aclimdb_models\n\n# COMMAND ----------\n\nclassifier_model = build_tf_raw_model()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### Validating Model Works\n\n# COMMAND ----------\n\ntext_test = ['this is such an amazing movie!']\n\nbert_raw_result = classifier_model(tf.constant(text_test))\nprint(tf.sigmoid(bert_raw_result))\n\n# COMMAND ----------\n\n# requires pydot and graphviz - installed on cluster level\ntf.keras.utils.plot_model(classifier_model)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### Model Training\n\n# COMMAND ----------\n\n## Adding extra MLflow steps\n# due to extra tf requirements we will manually call autolog and turn off the artifact logging\nimport mlflow\n\n# COMMAND ----------\n\n# Defining Optimizer\n\nepochs = 3\nbatch_size = 128\n\nsteps_per_epoch = size_train // batch_size \nnum_train_steps = steps_per_epoch * epochs\nnum_warmup_steps = int(0.1*num_train_steps)\n\ninit_lr = 3e-5\n\n# COMMAND ----------\n\n# Compile model\nloss = tf.keras.losses.BinaryCrossentropy(from_logits=True)\nmetrics = tf.metrics.BinaryAccuracy()\n\noptimizer = optimization.create_optimizer(init_lr=init_lr,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n optimizer_type='adamw')\n\nclassifier_model.compile(optimizer=optimizer,\n loss=loss,\n metrics=metrics)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### Advanced MLFlow Logging\n# MAGIC \n# MAGIC We need to set these up to include with the main training loop\n\n# COMMAND ----------\n\n# unless we specify extra then mlflow will just log these\nfrom mlflow.models.signature import ModelSignature\nfrom mlflow.types.schema import Schema, ColSpec\n\nmlflow.tensorflow.get_default_pip_requirements()\n\n# COMMAND ----------\n\n# extra pip requirements for our specific example\nextra_reqs = [\n f\"tensorflow-text==2.8.*\",\n f\"tf-models-official==2.7.0\"\n] \n\n# for the input signature field\ninput_examples = [\n 'this is such an amazing movie!', # this is the same sentence tried earlier\n 'The movie was great!',\n 'The movie was meh.',\n 'The movie was okish.',\n 'The movie was terrible...'\n]\n\n# lets manually spec input and output schema\n\ninput_schema = Schema([\n ColSpec(\"string\", \"An Input Sentence to Evaluate\")\n])\n\noutput_schema = Schema([ColSpec(\"float\", \"The Sentiment Score\")])\n\nsignature = ModelSignature(inputs=input_schema, outputs=output_schema)\n\n# COMMAND ----------\n\nimport datetime\n# Main training Loop\nfrom petastorm.spark import make_spark_converter\n\nwith mlflow.start_run(experiment_id='224704298431727') as run:\n \n mlflow.tensorflow.autolog(log_models=False)\n \n ## Adding Extra logging\n run_log_dir = os.path.join(log_dir, datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n #debug_dir = os.path.join(run_log_dir, 'debug')\n\n tf.debugging.experimental.enable_dump_debug_info(\n run_log_dir,\n tensor_debug_mode=\"FULL_HEALTH\",\n circular_buffer_size=-1)\n \n # didn't seem to work?\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=run_log_dir, histogram_freq=1, update_freq=1)\n \n ### Extra code for Petastorm\n converter_train = make_spark_converter(train_ds)\n converter_val = make_spark_converter(val_ds)\n \n with converter_train.make_tf_dataset(batch_size=batch_size, workers_count=10) as train_pt_ds, \\\n converter_val.make_tf_dataset(batch_size=batch_size, workers_count=10) as val_pt_ds:\n \n # The dataset that comes out might not be:\n # -- the right format\n # -- the right ordering\n train_iter = train_pt_ds.map(lambda x: (x[0], x[1]))\n val_iter = val_pt_ds.map(lambda x: (x[0], x[1]))\n \n ### End Extra Code for Petastorm\n \n history = classifier_model.fit(x=train_iter,\n validation_data=val_iter,\n validation_steps=size_val // batch_size,\n epochs=epochs,\n steps_per_epoch=steps_per_epoch,\n batch_size=batch_size,\n callbacks=[tensorboard_callback])\n \n # we need to first save out the model to a temp folder then we can log it\n dataset_name = 'imdb'\n saved_model_path = './{}_bert'.format(dataset_name.replace('/', '_'))\n\n #classifier_model.save(saved_model_path, include_optimizer=False)\n \n # try manually speccing some things in log model\n # tf_signature_def_key was from trial and error seems like\n # meta_graph_tags is none by design \n # classifier_model automatically sets this\n \n #mlflow.tensorflow.log_model(tf_saved_model_dir=saved_model_path,\n # tf_meta_graph_tags=None,\n # tf_signature_def_key='serving_default', # default from tf official model\n # artifact_path='model', # model is default for mlflow in order to link to UI\n # signature=signature,\n # input_example=input_examples,\n # extra_pip_requirements=extra_reqs)\n\n fig = accuracy_and_loss_plots(history)\n mlflow.log_figure(fig, 'training_perf.png')\n \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### Model Evaluation\n# MAGIC \n# MAGIC -- option in training steps\n\n# COMMAND ----------\n\nconverter_test = make_spark_converter(test_ds)\n\nwith converter_test.make_tf_dataset(batch_size=batch_size, workers_count=10) as test_pt_ds:\n \n test_iter = test_pt_ds.map(lambda x: (x[0], x[1]))\n \n # Note if the steps isn't defined then it will loop infinitely\n loss, accuracy = classifier_model.evaluate(test_iter,\n steps=size_test // batch_size)\n\n print(f'Loss: {loss}')\n print(f'Accuracy: {accuracy}')\n\n# COMMAND ----------\n\nmlflow.end_run()\n\n# COMMAND ----------\n\n### clean up\nclean_up = False\n\nif clean_up:\n from numba import cuda\n \n cuda.select_device(0)\n cuda.close()\n \n\n# COMMAND ----------\n\n\n"
] | [
[
"tensorflow.constant",
"tensorflow.metrics.BinaryAccuracy",
"tensorflow.keras.utils.plot_model",
"tensorflow.sigmoid",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.get_logger",
"tensorflow.debugging.experimental.enable_dump_debug_info",
"tensorflow.keras.callbacks.TensorBoard"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
herrlich10/mripy | [
"df9a8e57a21163579af49c59a9dcd2da279cb9fa",
"df9a8e57a21163579af49c59a9dcd2da279cb9fa"
] | [
"mripy/afni.py",
"mripy/decoding.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, absolute_import, unicode_literals\nimport sys, os, re, shlex, shutil, glob, subprocess, collections\nfrom os import path\nfrom datetime import datetime\nimport numpy as np\nfrom scipy import interpolate\nimport matplotlib as mpl\nfrom . import six\n\n\n# Test afni installation\n# has_afni = bool(re.search('version', subprocess.check_output(['afni', '-ver']).decode('utf-8'), re.IGNORECASE))\nhas_afni = True\n\n# # Find afni path\n# config_dir = path.expanduser('~/.mripy')\n# if not path.exists(config_dir):\n# os.makedirs(config_dir)\n# if has_afni:\n# config_file = path.join(config_dir, 'afni_path')\n# if path.exists(config_file):\n# with open(config_file, 'r') as f:\n# afni_path = f.readline()\n# else:\n# afni_path = subprocess.check_output('find ~ -iregex \".*/abin\"', shell=True).decode('utf-8').split('\\n')[0]\n# with open(config_file, 'w') as f:\n# f.write(afni_path)\n# else:\n# afni_path = ''\n\n\ndef filter_output(lines, tags=None, pattern=None, ex_tags=None, ex_pattern=None):\n '''\n Filter output lines according to their initial tags (++, *+, **, etc.) and/or\n a regex search pattern.\n\n Parameters\n ----------\n tags : list of tags\n Default is [], which means all lines will pass the filter.\n pattern : str\n ex_tags : list of tags to exclude\n ex_pattern : str\n '''\n if tags is None:\n tags = []\n if ex_tags is None:\n ex_tags = []\n if len(tags) > 0:\n lines = [line for line in lines if line[:2] in tags]\n if len(ex_tags) > 0:\n lines = [line for line in lines if line[:2] not in ex_tags]\n if pattern is not None:\n lines = [line for line in lines if re.search(pattern, line)]\n if ex_pattern is not None:\n lines = [line for line in lines if not re.search(ex_pattern, line)]\n return lines\n\n\ndef check_output(cmd, tags=None, pattern=None, verbose=0, **kwargs):\n '''\n The syntax of subprocess.check_output(shell=False) is tedious for long cmd.\n But for security reason, we don't want to use shell=True for external cmd.\n This helper function allows you to execute a single cmd without shell=True.\n\n Parameters\n ----------\n cmd : str\n A single command string packed with all options (but no wildcard)\n **kwargs :\n Go to `subprocess.check_output(**kwargs)`\n\n Returns\n -------\n lines : list of lines\n Much easier to deal with compared with subprocess.check_output()\n '''\n if isinstance(cmd, six.string_types):\n cmd = shlex.split(cmd) # Split by space, preserving quoted substrings\n lines = subprocess.check_output(cmd, stderr=subprocess.STDOUT, **kwargs).decode('utf-8').split('\\n')\n lines = filter_output(lines, tags, pattern)\n if verbose:\n for line in lines:\n print(line, file=sys.stderr if line.startswith('*') else sys.stdout)\n return lines\n\n\ndef call(cmd):\n if isinstance(cmd, six.string_types):\n cmd = shlex.split(cmd) # Split by space, preserving quoted substrings\n cmd_str = ' '.join(cmd)\n print('>>', cmd_str)\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)\n for line in iter(p.stdout.readline, b''): # The 2nd argument is sentinel character\n print(line.decode('utf-8'), end='')\n p.stdout.close() # Notify the child process that the PIPE has been broken\n if p.wait():\n raise RuntimeError(f'Error occurs when executing the following command (returncode={p.returncode}):\\n{cmd_str}') \n\n\ndef split_out_file(out_file, split_path=False, trailing_slash=False):\n '''\n Ensure that path.join(out_dir, prefix, ext) can be checked by path.exists().\n\n >>> split_out_file('dset.nii')\n ('dset', '.nii')\n >>> split_out_file('dset.1D')\n ('dset', '.1D')\n >>> split_out_file('folder/dset')\n ('folder/dset', '+orig.HEAD')\n >>> split_out_file('folder/dset+orig', split_path=True)\n ('folder', 'dset', '+orig.HEAD')\n >>> split_out_file('dset+orig.', split_path=True)\n ('', 'dset', '+orig.HEAD')\n >>> split_out_file('folder/dset+orig.HEAD', split_path=True, trailing_slash=True)\n ('folder/', 'dset', '+orig.HEAD')\n >>> split_out_file('dset+tlrc.BRIK', split_path=True, trailing_slash=True)\n ('', 'dset', '+tlrc.HEAD')\n '''\n out_dir, out_name = path.split(out_file)\n if trailing_slash and out_dir:\n out_dir += '/'\n match = re.match(r'(.+)(.nii|.nii.gz|.1D|.1D.dset|.1D.roi|.niml.dset|.niml.roi|.gii.dset|.csv)$', out_name)\n if match:\n prefix, ext = match.groups()\n else:\n match = re.match(r'(.+)(\\+(?:orig|tlrc))(?:.|.HEAD|.BRIK)?$', out_name)\n if match:\n prefix, ext = match.groups()\n ext += '.HEAD'\n else:\n prefix = out_name\n ext = '+orig.HEAD'\n if split_path:\n return out_dir, prefix, ext\n else:\n return path.join(out_dir, prefix), ext\n\n\ndef insert_suffix(fname, suffix):\n prefix, ext = split_out_file(fname)\n return f\"{prefix}{suffix}{ext}\"\n\n\ndef get_prefix(fname, with_path=False):\n '''\n Return \"dset\" given \"path/to/dset+orig.HEAD\", \"dset+orig.\", \"dset+tlrc\", \"dsets\"\n '''\n if path.splitext(fname)[1] in ['.niml', '.1D', '.dset']: # For surface dset\n match = re.match(r'(.+)\\.(?:niml|1D)(?:\\.dset)?', fname)\n prefix = match.group(1)\n else: # For 3d dset\n # fstem = path.splitext(path.basename(fname))[0]\n if fname[-5:].upper() in ['.HEAD', '.BRIK']:\n fstem = fname[:-5]\n elif fname.endswith('.'):\n fstem = fname[:-1]\n else:\n fstem = fname\n prefix = fstem[:-5] if len(fstem) > 5 and fstem[-5:] in ['+orig', '+tlrc'] else fstem\n if not with_path:\n prefix = path.basename(prefix)\n return prefix\n\n\ndef get_surf_vol(suma_dir):\n '''\n Infer SUMA SurfVol filename with full path (agnostic about file type: .nii vs +orig.HEAD/BRIK).\n '''\n # TODO: SurfVol.depth.nii\n candidates = glob.glob(path.join(suma_dir, '*_SurfVol*'))\n candidates = [f for f in candidates if re.search(r'_SurfVol(?:\\.nii|\\+orig\\.HEAD)', f)]\n if len(candidates) == 0:\n raise ValueError(f'>> Cannot identify SurfVol in \"{suma_dir}\"')\n else:\n return candidates[0]\n\n\ndef get_suma_subj(suma_dir):\n '''Infer SUMA subject given path to SUMA folder.'''\n match = re.match('(.+)_SurfVol.+', path.basename(get_surf_vol(suma_dir)))\n if match:\n return match.group(1)\n else:\n raise RuntimeError(f'>> Cannot infer SUMA subject from \"{suma_dir}\"')\n\n\ndef get_surf_type(suma_dir):\n '''Infer SUMA surface mesh file type (.gii vs .asc).'''\n surf_files = [f for f in os.listdir(suma_dir) if re.match('(?:lh|rh).(?:pial|smoothwm|inflated).*', f)]\n return path.splitext(surf_files[0])[1]\n\n\nSPEC_HEMIS = ['lh', 'rh', 'both', 'mh', 'bh']\nHEMI_PATTERN = r'(?:(?<=[^a-zA-Z0-9])|^)(?:lh|rh|both|mh|bh)(?=[^a-zA-Z0-9])'\n\ndef substitute_hemi(fname, hemi='{0}'):\n return re.sub(HEMI_PATTERN, hemi, fname)\n\n\ndef get_suma_spec(suma_spec):\n '''\n Infer other spec files from one spec file (either lh.spec, rh.spec, or both.spec).\n \n Parameters\n ----------\n suma_spec : str\n Either a .spec file or the suma_dir.\n '''\n if path.isdir(suma_spec): # It is actually the `suma_dir`\n subj = get_suma_subj(suma_spec)\n return {hemi: path.join(suma_spec, f\"{subj}_{hemi}.spec\") for hemi in SPEC_HEMIS}\n else: # It is a .spec file\n spec_fmt = re.sub(f\"({'|'.join(SPEC_HEMIS)}).spec\", '{0}.spec', suma_spec)\n return {hemi: spec_fmt.format(hemi) for hemi in SPEC_HEMIS}\n\n\ndef get_suma_info(suma_dir, suma_spec=None):\n info = {}\n info['subject'] = get_suma_subj(suma_dir)\n if suma_spec is None: # Infer spec files from suma_dir\n info['spec'] = get_suma_spec(suma_dir)\n else: # Infer other spec files from one spec file\n info['spec'] = get_suma_spec(suma_spec)\n return info\n\n\ndef get_hemi(fname):\n basename = path.basename(fname)\n match = re.search(HEMI_PATTERN, basename)\n if match:\n hemi = match.group(0)\n else:\n raise ValueError(f'** ERROR: Cannot infer \"hemi\" from \"{basename}\"')\n return hemi\n\n\ndef infer_surf_dset_variants(fname, hemis=SPEC_HEMIS):\n '''\n >>> infer_surf_dset_variants('data.niml.dset')\n {'lh': 'lh.data.niml.dset', 'rh': 'rh.data.niml.dset', 'both': 'both.data.niml.dset', mh': 'mh.data.niml.dset'}\n >>> infer_surf_dset_variants('lh.data.niml.dset')\n {'lh': 'lh.data.niml.dset'}\n\n Parameters\n ----------\n fname : str, list, or dict\n '''\n if isinstance(fname, six.string_types):\n match = re.search(HEMI_PATTERN, path.basename(fname))\n if match:\n fname = {match.group(0): fname}\n else:\n out_dir, prefix, ext = split_out_file(fname, split_path=True, trailing_slash=True)\n fname = {hemi: f\"{out_dir}{hemi}.{prefix}{ext}\" for hemi in hemis}\n if not isinstance(fname, dict):\n fdict = {}\n for f in fname:\n match = re.search(HEMI_PATTERN, path.basename(f))\n if match:\n fdict[match.group(0)] = f\n else:\n raise ValueError(f'** ERROR: Cannot infer \"hemi\" from \"{path.basename(f)}\"')\n fname = fdict\n return fname\n\n\ndef get_ORIENT(fname, format='str'):\n '''\n Parameters\n ----------\n format : str, {'code', 'str', 'mat', 'sorter'}\n\n References\n ----------\n [1] https://afni.nimh.nih.gov/pub/dist/doc/program_help/README.attributes.html\n #define ORI_R2L_TYPE 0 // Right to Left\n #define ORI_L2R_TYPE 1 // Left to Right\n #define ORI_P2A_TYPE 2 // Posterior to Anterior\n #define ORI_A2P_TYPE 3 // Anterior to Posterior\n #define ORI_I2S_TYPE 4 // Inferior to Superior\n #define ORI_S2I_TYPE 5 // Superior to Inferior\n\n Thus \"0 3 4\" is standard DICOM Reference Coordinates System, i.e., RAI.\n The AFNI convention is also that R-L, A-P, and I-S are negative-to-positive, i.e., RAI.\n\n [2] https://nipy.org/nibabel/nifti_images.html\n On the other hand, NIFTI images have an affine relating the voxel coordinates \n to world coordinates in RAS+ space, or LPI in AFNI's term.\n '''\n res = check_output(['3dAttribute', 'ORIENT_SPECIFIC', fname])[-2]\n ORIENT = np.fromiter(map(int, res.split()), int)\n code2str = np.array(['R', 'L', 'P', 'A', 'I', 'S'])\n code2mat = np.array([[ 1, 0, 0],\n [-1, 0, 0],\n [ 0,-1, 0],\n [ 0, 1, 0],\n [ 0, 0, 1],\n [ 0, 0,-1]])\n code2axis = np.array([0, 0, 1, 1, 2, 2])\n if format == 'code':\n return ORIENT\n elif format == 'str':\n return ''.join(code2str[ORIENT])\n elif format == 'mat':\n return code2mat[ORIENT]\n elif format == 'sorter':\n return np.argsort(code2axis[ORIENT])\n\n\ndef get_DIMENSION(fname):\n '''\n [x, y, z, t, 0]\n '''\n res = check_output(['3dAttribute', 'DATASET_DIMENSIONS', fname])[-2]\n DIMENSION = np.fromiter(map(int, res.split()), int)\n return DIMENSION\n\n\ndef get_ORIGIN(fname):\n res = check_output(['3dAttribute', 'ORIGIN', fname])[-2]\n ORIGIN = np.fromiter(map(float, res.split()), float)\n return ORIGIN\n\n\ndef get_DELTA(fname):\n res = check_output(['3dAttribute', 'DELTA', fname])[-2]\n DELTA = np.fromiter(map(float, res.split()), float)\n return DELTA\n\n\ndef get_affine(fname):\n ORIENT = get_ORIENT(fname, format='sorter')\n ORIGIN = get_ORIGIN(fname)\n DELTA = get_DELTA(fname)\n MAT = np.c_[np.diag(DELTA), ORIGIN][ORIENT,:]\n return MAT\n\n\ndef get_affine_nifti(fname):\n MAT = np.diag([-1,-1, 1]) @ get_affine(fname)\n return MAT\n\n\ndef get_dims(fname):\n '''\n Dimensions (number of voxels) of the data matrix.\n See also: get_head_dims\n '''\n # res = check_output(['@GetAfniDims', fname])[-2] # There can be leading warnings for oblique datasets\n res = check_output(['3dinfo', '-n4', fname])[-2] # `@GetAfniDims` may not work for things like `dset.nii'[0..10]'`\n return np.int_(res.split()) # np.fromiter(map(int, res.split()), int)\n\n\ndef get_head_dims(fname):\n '''\n Dimensions (number of voxels) along R-L, A-P, I-S axes.\n See also: get_dims\n '''\n res = check_output(['3dinfo', '-orient', '-n4', fname])[-2]\n res = res.split()\n orient = res[0]\n dims = np.int_(res[1:])\n ori2ax = {'R': 0, 'L': 0, 'A': 1, 'P': 1, 'I': 2, 'S': 2}\n axes = [ori2ax[ori] for ori in orient]\n return np.r_[dims[np.argsort(axes)], dims[3]]\n\n\ndef get_head_delta(fname):\n '''\n Resolution (voxel size) along R-L, A-P, I-S axes.\n '''\n res = check_output(['3dinfo', '-orient', '-d3', fname])[-2]\n res = res.split()\n orient = res[0]\n delta = np.abs(np.float_(res[1:]))\n ori2ax = {'R': 0, 'L': 0, 'A': 1, 'P': 1, 'I': 2, 'S': 2}\n axes = [ori2ax[ori] for ori in orient]\n return delta[np.argsort(axes)]\n\n\ndef get_head_extents(fname):\n '''\n Spatial extent along R, L, A, P, I and S.\n '''\n res = check_output(['3dinfo', '-extent', fname])[-2]\n return np.float_(res.split())\n\n\ndef get_brick_labels(fname, label2index=False):\n res = check_output(['3dAttribute', 'BRICK_LABS', fname])[-2]\n labels = res.split('~')[:-1] # Each label ends with \"~\"\n if label2index:\n return {label: k for k, label in enumerate(labels)}\n else:\n return np.array(labels)\n\n\ndef set_brick_labels(fname, labels):\n check_output(['3drefit', '-relabel_all_str', ' '.join(labels), fname])\n\n\ndef get_TR(fname):\n return float(check_output(['3dinfo', '-TR', fname])[-2])\n\n\ndef get_slice_timing(fname):\n res = check_output(['3dinfo', '-slice_timing', fname])[-2]\n times = np.float_(res.split('|'))\n return times\n\n\ndef set_slice_timing(fname, times, TR):\n '''\n We have to provide a TR because we don't know whether the default value TR=1.0 is valid.\n '''\n n_slices = get_dims(fname)[2]\n assert(len(times)==n_slices)\n times_cmd = [str(t) for t in times] # This has to be provided as separate arguments\n check_output(['3drefit', '-Tslices'] + times_cmd + ['-TR', str(TR), fname])\n\n\ndef get_attribute(fname, name, type=None):\n res = check_output(['3dAttribute', name, fname])[-2]\n if type == 'int':\n return np.int_(res[:-1].split())\n elif type == 'float':\n return np.float_(res[:-1].split())\n else:\n return res[:-1]\n\n\ndef set_attribute(fname, name, value, type=None):\n values = np.atleast_1d(value)\n if type == 'str' or isinstance(value, str):\n check_output(['3drefit', '-atrstring', name, f\"{value}\", fname])\n elif type == 'int' or np.issubdtype(values.dtype, np.integer):\n check_output(['3drefit', '-atrint', name, f\"{' '.join([str(v) for v in values])}\", fname])\n elif type == 'float' or np.issubdtype(values.dtype, np.floating):\n check_output(['3drefit', '-atrfloat', name, f\"{' '.join([str(v) for v in values])}\", fname])\n\n\ndef get_nifti_field(fname, name, type=None):\n res = check_output(['nifti_tool', '-disp_hdr', '-field', name, '-infiles', fname])[-2]\n if type == 'int':\n return np.int_(res.split()[3:])\n elif type == 'float':\n return np.float_(res.split()[3:])\n else:\n return res[37:]\n\n\ndef set_nifti_field(fname, name, value, out_file=None):\n values = np.atleast_1d(value)\n check_output(['nifti_tool', '-mod_hdr', '-mod_field', name, f\"{' '.join([str(v) for v in values])}\", '-infiles', fname] \n + (['-overwrite'] if out_file is None else ['-prefix', out_file]))\n\n\ndef get_S2E_mat(fname, mat='S2E'):\n mat = {'S2E': 'S2B', 'S2B': 'S2B', 'E2S': 'B2S', 'B2S': 'B2S'}[mat]\n res = check_output(\"cat_matvec -ONELINE '{0}::ALLINEATE_MATVEC_{1}_000000'\".format(fname, mat))[-2]\n return np.float_(res.split()).reshape(3,4)\n\n\ndef generate_spec(fname, surfs, ext=None, **kwargs):\n if ext is None:\n ext = '.gii'\n defaults = dict(dict(type={'.asc': 'FS', '.gii': 'GII'}[ext], state=None, anat=None, parent=None), **kwargs)\n surfs = [dict(defaults, **({'name': surf} if isinstance(surf, six.string_types) else surf)) for surf in surfs]\n has_smoothwm = np.any([('smoothwm' in surf['name']) for surf in surfs])\n is_both = np.any([('lh' in surf['name']) for surf in surfs]) and np.any([('rh' in surf['name']) for surf in surfs])\n for surf in surfs:\n match = re.search(rf'([l|r]h)\\.(.+)\\.{ext[1:]}', surf['name'])\n surf['hemi'] = match.group(1)\n surf['surf'] = match.group(2)\n is_anat = surf['surf'] in ['pial', 'smoothwm', 'white']\n if surf['state'] is None:\n if not is_anat and is_both:\n surf['state'] = '_'.join([surf['surf'], surf['hemi']])\n else:\n surf['state'] = surf['surf']\n if surf['anat'] is None:\n surf['anat'] = 'Y' if is_anat else 'N'\n if surf['parent'] is None:\n if surf['name'] == 'smoothwm' or not has_smoothwm:\n surf['parent'] = 'SAME'\n else:\n surf['parent'] = '.'.join([surf['hemi'], 'smoothwm', ext[1:]])\n cmds = []\n for surf in surfs:\n cmds.extend(['-tsnad', surf['type'], surf['state'], surf['name'], surf['anat'], surf['parent']])\n subprocess.check_call(['quickspec', '-spec', fname, '-overwrite'] + cmds)\n\n\ndef update_afnirc(**kwargs):\n rc_file = path.expanduser('~/.afnirc')\n bak_file = path.expanduser('~/.afnirc.{0}.bak'.format(datetime.now().strftime('%Y%m%d')))\n if not path.exists(bak_file):\n shutil.copy(rc_file, bak_file)\n with open(rc_file, 'r') as fin:\n lines = fin.read().splitlines()\n updated = []\n is_managed = False\n managed_begin = '// Managed by mripy: begin'\n managed_end = '// Managed by mripy: end'\n managed = collections.OrderedDict()\n for line in lines:\n if not is_managed:\n if line == managed_begin:\n is_managed = True\n else:\n updated.append(line)\n else:\n if line == managed_end:\n is_managed = False\n else:\n match = re.search('(\\S+)\\s+=\\s+((?:.(?!//))+)(?:\\s+//\\s+(.+))?', line)\n managed[match.group(1)] = (match.group(2).strip(), match.group(3)) # key, value, comment (can be None)\n for k, v in kwargs.items():\n if not isinstance(v, tuple):\n kwargs[k] = (v, None)\n managed.update(kwargs)\n n_managed = len([v for v in managed.values() if v[0] is not None])\n if n_managed > 0:\n if updated[-1] != '':\n updated.append('')\n updated.append(managed_begin)\n for key, (value, comment) in managed.items():\n if value is not None:\n updated.append(' {0: <24} = {1}'.format(key, value) +\n ('\\t// {0}'.format(comment) if comment is not None else ''))\n if n_managed > 0:\n updated.append(managed_end)\n with open(rc_file, 'w') as fout:\n fout.write('\\n'.join(updated))\n\n\ndef add_colormap(cmap, name=None, cyclic=False, index=None, categorical=False):\n '''\n cmap : list of RGB colors | matplotlib.colors.LinearSegmentedColormap\n '''\n if name is None:\n if isinstance(cmap, mpl.colors.LinearSegmentedColormap):\n name = cmap.name\n else:\n name = 'User{0:02d}'.format(index)\n if isinstance(cmap, mpl.colors.LinearSegmentedColormap):\n cmap = plots.get_color_list(cmap)\n if index is None:\n index = 1\n # Make colormap dir\n cmap_dir = path.expanduser('~/abin/colormaps')\n if not path.exists(cmap_dir):\n os.makedirs(cmap_dir)\n # Generate palette file\n temp_file = 'colors.tmp'\n with open(temp_file, 'w') as fout:\n fout.writelines(['\\t'.join(map(str, color))+'\\n' for color in cmap])\n cmap_file = path.join(cmap_dir, '{0}.pal'.format(name))\n with open(cmap_file, 'w') as fout:\n if categorical:\n subprocess.check_call(['MakeColorMap', '-f', temp_file, '-ah', name, '-nc', str(len(cmap))], stdout=fout)\n else:\n subprocess.check_call(['MakeColorMap', '-f', temp_file, '-ah', name] +\n (['-nc', str(128), '-sl'] if cyclic else ['-nc', str(129)]), stdout=fout)\n os.remove(temp_file)\n # Update .afnirc\n update_afnirc(**{'AFNI_COLORSCALE_{0:02d}'.format(index): path.relpath(cmap_file, path.expanduser('~'))})\n\n\ndef write_colorscale_file(fname, pal_name, colors, locations=None, interp=None):\n '''\n Parameters\n ----------\n fname : `*.pal` file name\n pal_name : palette name (or title)\n colors : a list of RGB colors within [0,1]\n first color (bottom) -> last color (top)\n locations : locations of the breakpoints where colors are defined\n 0 (bottom) -> 1 (top)\n interp : 'linear'|'nearest'\n\n AFNI document says \"There are exactly 128 color locations on an AFNI colorscale.\"\n For details, see https://afni.nimh.nih.gov/pub/dist/doc/OLD/afni_colorscale.html\n But in fact, if you fill the colorscale file with a lot of colors, only the first 256 colors will be used.\n '''\n if locations is None:\n locations = np.linspace(0, 1, len(colors))\n if interp is None:\n interp = 'linear'\n cmap = interpolate.interp1d(locations, colors, kind=interp, axis=0, bounds_error=False, fill_value='extrapolate')\n clist = [mpl.colors.to_hex(color) for color in cmap(np.linspace(0, 1, 256))]\n with open(fname, 'w') as fout:\n fout.write(f\"{pal_name}\\n\")\n fout.writelines([f\"{color}\\n\" for color in reversed(clist)])\n\n\ndef parse_patch(patch):\n '''\n Notes\n -----\n 1. Each replacement is started with one or more comment lines. The last\n comment line is treated as replacement target, which may contain an\n optional replacement directive at the end:\n # This is an example <replace command=\"1\"/>\n \n Possible directives for replacing the original scripts includes:\n\n 1) command=\"n\": replace n commands\n 2) line=\"n\": replace n lines\n 3) until=\"regexp\": replace until a specific line (the regexp is the\n last line to be replaced)\n\n 2. Each replacement must end with two consecutive newlines.\n '''\n with open(patch, 'r') as fin:\n lines = fin.read().splitlines()\n replacements = []\n is_content = False\n n_newlines = 0\n for k, line in enumerate(lines):\n if is_content:\n contents.append(line)\n if line.strip() == '':\n n_newlines += 1\n if n_newlines >= 2:\n is_content = False\n else:\n n_newlines = 0\n if not is_content or k+1 == len(lines):\n for kk in range(min(2, len(contents))):\n if contents[-1] == '':\n contents.pop(-1)\n else:\n break\n contents.append('# </patch>')\n replacements.append({'target': target, 'directives': directives, 'contents': contents})\n elif line[0] == '#':\n if k+1 < len(lines) and lines[k+1][0] != '#':\n match = re.match('((?:(?!<replace).)*)(?:<replace(.*)/>)?', line)\n target = match.group(1).rstrip()\n if match.group(2) is not None:\n attributes = shlex.split(match.group(2).strip())\n directives = dict([attr.split('=') for attr in attributes])\n else:\n directives = {'command': 1}\n is_content = True\n contents = ['# <patch>']\n return replacements\n\n\ndef patch_afni_proc(original, patch, inplace=True):\n replacements = parse_patch(patch)\n n = 0\n with open(original, 'r') as fin:\n lines = fin.read().splitlines()\n patched = []\n n_to_replace = 0\n for k, line in enumerate(lines):\n if n == len(replacements):\n patched.append(line)\n else:\n replacement = replacements[n]\n if not n_to_replace:\n patched.append(line)\n match = re.search(replacement['target'], line)\n if match:\n replacement['indent'] = match.start()\n replacement['n_lines'] = six.MAXSIZE\n directives = replacement['directives']\n if 'command' in directives:\n nc = 0\n n_lines = 0\n while nc < int(directives['command']):\n n_lines += 1\n x = lines[k+n_lines].strip()\n if x != '' and x[0] != '#' and x[-1] != '\\\\':\n nc += 1\n replacement['n_lines'] = min(replacement['n_lines'], n_lines)\n if 'until' in directives:\n n_lines = 0\n while not re.match(directives['until'], lines[k+n_lines]):\n n_lines += 1\n replacement['n_lines'] = min(replacement['n_lines'], n_lines)\n if 'line' in directives:\n replacement['n_lines'] = min(replacement['n_lines'], int(directives['line']))\n n_to_replace = replacement['n_lines']\n else:\n patched.append('# ' + line)\n n_to_replace -= 1\n if n_to_replace == 0:\n for content in replacement['contents']:\n patched.append(' '*replacement['indent'] + content)\n if not inplace:\n p, f = path.split(original)\n fname = path.join(p, 'patched.'+f)\n else:\n shutil.copy(original, original+'.bak')\n fname = original\n with open(fname, 'w') as fout:\n fout.write('\\n'.join(patched))\n\n\nif __name__ == '__main__':\n pass",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, absolute_import, unicode_literals\nfrom os import path\nfrom collections import OrderedDict\nimport numpy as np\nfrom scipy import stats\nimport pandas as pd\nfrom sklearn import model_selection, metrics\nfrom . import six, utils\n\n\ndef standardize_within_group(X, groups, with_mean=True, with_std=True):\n '''\n This is an extension of the \"mean centering\" method proposed in [1].\n Can be used as a replacement for the training-set-wise standardization.\n Both with_mean and with_std may provide some extra performance.\n\n References\n ----------\n [1] Lee, S., & Kable, J. W. (2018). Simple but robust improvement in \n multivoxel pattern classification. PloS One, 13(11), e0207083.\n '''\n X = X.copy()\n for g in np.unique(groups):\n indexer = (groups==g)\n u = X[indexer].mean(axis=0, keepdims=True)\n s = X[indexer].std(axis=0, keepdims=True) if with_std else 1\n if with_mean:\n X[indexer] = (X[indexer] - u) / s\n else:\n X[indexer] = (X[indexer] - u) / s + u\n return X\n\n\ndef permute_within_group(y, groups):\n y = y.copy()\n for g in np.unique(groups):\n indexer = (groups==g)\n y[indexer] = y[indexer][np.random.permutation(np.sum(indexer))]\n return y\n\n\ndef cross_validate_ext(model, X, y, groups=None, cv=None, pred_kws=None, method=None):\n if cv is None:\n cv = model_selection.LeaveOneGroupOut() # One group of each run\n if method is None:\n method = 'predict'\n pred_kws = dict(dict(), **({} if pred_kws is None else pred_kws))\n res = []\n idx = []\n for train_index, test_index in cv.split(X, y, groups):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n model.fit(X_train, y_train)\n res.append(getattr(model, method)(X_test, **pred_kws))\n idx.extend(test_index)\n sorter = np.argsort(idx)\n if isinstance(res[0], tuple) and len(res[0]) > 1: # predict() has more the one output\n res = tuple([np.concatenate([r[k] for r in res], axis=0)[sorter] for k in range(len(res[0]))])\n else: # predict() has only one output\n res = np.concatenate(res, axis=0)[sorter]\n return res\n\n\ndef cross_validate_with_permutation(model, X, y, groups, rois=None, n_permutations=1000, scoring=None, cv=None):\n if rois is None:\n X, y, groups, rois = [X], [y], [groups], ['NA']\n if cv is None:\n cv = model_selection.LeaveOneGroupOut() # One group of each run\n if scoring is None:\n scoring = {'performance': metrics.make_scorer(metrics.accuracy_score)}\n def cross_validate(X, y, groups, roi, permute):\n if permute:\n y = permute_within_group(y, groups)\n scores = model_selection.cross_validate(model, X, y, groups, \\\n scoring=scoring, cv=cv, return_train_score=True, n_jobs=1)\n res = OrderedDict(roi=roi, permute=permute, train=np.mean(scores['train_performance']), \n test=np.mean(scores['test_performance']))\n return res\n res = []\n for XX, yy, gg, roi in zip(X, y, groups, rois):\n for permute in range(n_permutations+1):\n res.append(cross_validate(XX, yy, gg, roi, permute))\n res = pd.DataFrame(res)\n return res\n\n\ndef compute_critical_value(x, y, permute='permute', data=None, alpha=0.05, tail=2):\n '''\n Get critical values based on permutation distribution, and\n account for multiple comparisons using extreme statistics.\n\n Parameters\n ----------\n x : str, list of str\n Columns along which multiple comparisons occur (e.g., roi, time).\n y : str\n Column for performance measurement (e.g., test_accuracy, PC, RT).\n data : pd.DataFrame(x, y, permute)\n permute == 0 is originally observed data, >= 1 is permutation data.\n '''\n # Mean performance for each condition and each permutation\n by = [x, permute] if isinstance(x, six.string_types) else list(x) + [permute]\n df = data[data[permute]>0].groupby(by=by)[y].mean() # This is a Series with MultiIndex\n # Globally corrected critical value\n max_dist = df.groupby(permute).max().values # Max distribution\n min_dist = df.groupby(permute).min().values # Min distribution\n if tail == 2:\n gmax = np.percentile(max_dist, (1-alpha/2)*100)\n gmin = np.percentile(min_dist, alpha/2*100)\n else:\n gmax = np.percentile(max_dist, (1-alpha)*100)\n gmin = np.percentile(min_dist, alpha*100)\n # Per-comparison (uncorrected) critical value\n if tail == 2:\n pmax = df.groupby(x).quantile(1-alpha/2)\n pmin = df.groupby(x).quantile(alpha/2)\n else:\n pmax = df.groupby(x).quantile(1-alpha)\n pmin = df.groupby(x).quantile(alpha)\n bounds = pd.concat([pmin, pmax], axis=1)\n bounds.columns = ['lower', 'upper']\n bounds = pd.concat([pd.DataFrame([{'lower': gmin, 'upper': gmax}], index=['overall']), bounds], axis=0)\n # Determine significance\n obs = data[data[permute]==0]\n if obs.size > 0: # Contain originally observed data\n bounds['obs_mean'] = pd.concat([pd.Series([np.nan], ['overall']), obs.groupby(by=x)[y].mean()], axis=0) # Mean response\n bounds['obs_std'] = pd.concat([pd.Series([np.nan], ['overall']), obs.groupby(by=x)[y].std()], axis=0)\n bounds['obs_n'] = pd.concat([pd.Series([-1], ['overall']), obs.groupby(by=x)[y].count()], axis=0)\n n_comparisons = len(obs[x].unique())\n if tail == 2: # The two-tailed p value is twice the one-tailed p value (assuming you correctly predicted the direction of the difference)\n bounds['corrected'] = (bounds.obs_mean < bounds.lower['overall']) | (bounds.upper['overall'] < bounds.obs_mean) # Significance (corrected)\n bounds['uncorrected'] = (bounds.obs_mean < bounds.lower) | (bounds.upper < bounds.obs_mean) # Significance (uncorrected)\n elif tail == 1:\n bounds['corrected'] = (bounds.upper['overall'] < bounds.obs_mean)\n bounds['p_corr'] = [np.nan] + [1-stats.percentileofscore(max_dist, v)/100 for v in bounds.obs_mean[1:]]\n bounds['uncorrected'] = (bounds.upper < bounds.obs_mean)\n bounds['p_uncorr'] = [np.nan] + [1-stats.percentileofscore(df[k], v)/100 for k, v in bounds.obs_mean[1:].iteritems()]\n bounds['bonferroni'] = bounds['p_uncorr'] * n_comparisons\n elif tail == -1:\n bounds['corrected'] = (bounds.obs_mean < bounds.lower['overall'])\n bounds['uncorrected'] = (bounds.obs_mean < bounds.lower)\n return bounds\n\n\n# def plot_permutation(x, y, subject='subject', permute='permute', data=None, plot=None, \n# color=None, x_order=None, xtick_format=None, chance=0, alpha=0.05, tail=2, mcc='extreme', \n# figsize=None, star_shift=None, star_alpha=None,\n# dist_kws=None, scatter_kws=None, line_kws=None, bar_kws=None, point_kws=None, chance_kws=None, ref_kws=None, \n# show_mcc=True, show_n=True, show_p=False, show_ref=False, show_num=False):\n# '''\n# Parameters\n# ----------\n# x : str\n# y : str\n# data : pd.DataFrame(x, y, permute)\n# permute == 0 is originally observed data, >= 1 is permutation data.\n# '''\n# if figsize is not None:\n# fig = plt.figure(figsize=figsize)\n# else:\n# fig = plt.gcf()\n# pmt = data[data[permute]>0]\n# has_pmt = (pmt.size > 0)\n# obs = data[data[permute]==0]\n# if x_order is None:\n# x_order = data[x].unique()\n# else:\n# x_order = [x_label for x_label in x_order if x_label in data[x].values]\n# if has_pmt:\n# bounds = compute_critical_value(x=x, y=y, data=data, alpha=alpha, tail=tail)\n# df_pmt = pmt.groupby(by=[x, permute])[y].mean().reset_index()\n# df_obs = obs.groupby(by=[x, subject])[y].mean().reset_index()\n# obs_mean = df_obs.groupby(x)[y].mean()\n# obs_n = df_obs.groupby(x)[y].count()\n# x_loc = np.arange(len(df_obs[x].unique()))\n# if plot is None:\n# plot = 'violinplot' if has_pmt else 'barplot'\n# if plot == 'violinplot':\n# # Plot permutation distribution\n# dist_kws = dict(dict(color='gray', inner=None, linewidth=0), **(dist_kws if dist_kws is not None else {}))\n# sns.violinplot(x=x, y=y, data=df_pmt, order=x_order, **dist_kws)\n# # Plot originally observed data\n# scatter_kws = dict(dict(color=color, s=100, linewidths=1, edgecolors='k'), **(scatter_kws if scatter_kws is not None else {}))\n# plt.scatter(np.arange(len(x_order)), bounds.loc[x_order,'obs_mean'], **scatter_kws)\n# elif plot == 'lineplot':\n# line_kws = dict(dict(), **(line_kws if line_kws is not None else {}))\n# sns.lineplot(x=x, y=y, data=df_obs, ci=(1-alpha)*100, palette=color, **line_kws)\n# x_loc = df_obs[x].unique()\n# elif plot == 'barplot':\n# color = 'gray' if color is None else color\n# bar_kws = dict(dict(), **(bar_kws if bar_kws is not None else {}))\n# sns.barplot(x=x, y=y, data=df_obs, order=x_order, ci=(1-alpha)*100, color=color, **bar_kws)\n# elif plot == 'finalplot':\n# # Plot permutation distribution\n# dist_kws = dict(dict(color='gray', alpha=0.5, inner=None, linewidth=0), **(dist_kws if dist_kws is not None else {}))\n# sns.violinplot(x=x, y=y, data=df_pmt, order=x_order, **dist_kws)\n# # Plot bootstrap errorbars\n# color = 'k' if color is None else color\n# point_kws = dict(dict(linestyles='', scale=0.5, errwidth=2, capsize=0.1, facecolors='r'), **(point_kws if point_kws is not None else {}))\n# sns.pointplot(x=x, y=y, data=df_obs, order=x_order, ci=(1-alpha)*100, color=color, **point_kws)\n# # Plot originally observed data\n# scatter_kws = dict(dict(s=50, marker='o', linewidths=1, edgecolors=color, zorder=10), **(scatter_kws if scatter_kws is not None else {}))\n# plt.scatter(np.arange(len(x_order)), bounds.loc[x_order,'obs_mean'], **scatter_kws)\n# # Shift long ticklabels\n# if xtick_format is None:\n# xtick_format = ('normal' if plot == 'lineplot' else 'rotated')\n# if xtick_format == 'rotated':\n# plt.setp(plt.gca().get_xticklabels(), rotation=-30)\n# dx = 20/72; dy = 0/72 \n# offset = matplotlib.transforms.ScaledTranslation(dx, dy, fig.dpi_scale_trans)\n# for label in plt.gca().xaxis.get_majorticklabels():\n# label.set_transform(label.get_transform() + offset)\n# elif xtick_format == 'short':\n# plt.setp(plt.gca(), xticklabels=[label.get_text().split('_')[0] for label in plt.gca().get_xticklabels()])\n# elif xtick_format == 'normal':\n# pass\n# elif xtick_format == 'final':\n# plt.setp(plt.gca(), xticklabels=[label.get_text().split('_')[0] for label in plt.gca().get_xticklabels()])\n# plt.setp(plt.gca().get_xticklabels(), rotation=45, ha='right')\n# dx = 15/72; dy = 5/72 \n# offset = matplotlib.transforms.ScaledTranslation(dx, dy, fig.dpi_scale_trans)\n# for label in plt.gca().xaxis.get_majorticklabels():\n# label.set_transform(label.get_transform() + offset)\n# # Plot chance level\n# chance_kws = dict(dict(color='C3', ls='--', zorder=1), **(chance_kws if chance_kws is not None else {}))\n# plt.axhline(chance, **chance_kws)\n# # Plot reference line\n# if show_ref:\n# ref_kws = dict(dict(ref=0.55, color='gray', lw=0.5, ls='--'), **(ref_kws if ref_kws is not None else {}))\n# ref = ref_kws.pop('ref')\n# plt.axhline(ref, **ref_kws)\n# if star_alpha is None:\n# star_alpha = [0.3, 1]\n# if has_pmt:\n# # Plot multiple comparison correction band\n# if show_mcc:\n# plt.axhspan(bounds.loc['overall','lower'] if tail==2 else chance, bounds.loc['overall','upper'], color='r', alpha=0.1)\n# # Plot significant stars\n# if star_shift is None:\n# star_shift = bounds.ix[1:,'obs_std']/np.sqrt(bounds.ix[1:,'obs_n']) * 2.2 # Ignore first row\n# else:\n# star_shift = pd.Series(star_shift, index=bounds.index)\n# for k, x_label in enumerate(x_order):\n# if bounds.loc[x_label,'uncorrected']:\n# plt.text(x_loc[k], bounds.loc[x_label,'obs_mean']+star_shift[x_label], '*', ha='center', alpha=star_alpha[0])\n# if bounds.loc[x_label,'corrected']:\n# plt.text(x_loc[k], bounds.loc[x_label,'obs_mean']+star_shift[x_label], '*', ha='center', alpha=star_alpha[1])\n# if show_p:\n# if mcc == 'none':\n# plt.text(x_loc[k], 0.15, f\"{bounds.loc[x_label,'p_uncorr']:.3f}\", transform=myplot.transHDVA(), ha='center', alpha=star_alpha[0], fontsize='xx-small')\n# if mcc == 'extreme':\n# plt.text(x_loc[k], 0.15, f\"{bounds.loc[x_label,'p_corr']:.3f}\", transform=myplot.transHDVA(), ha='center', alpha=star_alpha[0], fontsize='xx-small')\n# elif mcc == 'bonferroni':\n# plt.text(x_loc[k], 0.15, f\"{bounds.loc[x_label,'bonferroni']:.3f}\", transform=myplot.transHDVA(), ha='center', alpha=star_alpha[1], fontsize='xx-small')\n# # Plot performance\n# if show_num:\n# for k, x_label in enumerate(x_order):\n# plt.text(x_loc[k], bounds.loc['overall','upper']*1.1-0.05 if has_pmt else 0.9, f\"{obs_mean[x_label]:.3f}\", \n# transform=plt.gca().transData if has_pmt else myplot.transHDVA(), ha='center', alpha=star_alpha[0], fontsize='xx-small')\n# # Plot obs_n\n# if show_n:\n# if len(set(obs_n[x_order].values)) == 1: # All equal\n# plt.text(0.95, 0.05, f\"$n={obs_n[x_order[0]]}$\", transform=plt.gca().transAxes, ha='right', fontsize='x-small')\n# else:\n# for k, x_label in enumerate(x_order):\n# plt.text(x_loc[k], 0.05, f\"$n={obs_n[x_label]}$\" if k == 0 else f\"${obs_n[x_label]}$\", \n# transform=myplot.transHDVA(), ha='center', fontsize='x-small')\n\n\nif __name__ == '__main__':\n pass\n"
] | [
[
"numpy.diag",
"numpy.linspace",
"numpy.issubdtype",
"matplotlib.colors.to_hex",
"numpy.int_",
"numpy.atleast_1d",
"scipy.interpolate.interp1d",
"numpy.any",
"numpy.float_",
"numpy.argsort",
"numpy.array"
],
[
"pandas.concat",
"sklearn.model_selection.cross_validate",
"pandas.Series",
"numpy.unique",
"pandas.DataFrame",
"numpy.percentile",
"numpy.concatenate",
"scipy.stats.percentileofscore",
"numpy.mean",
"sklearn.metrics.make_scorer",
"sklearn.model_selection.LeaveOneGroupOut",
"numpy.argsort",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
animesh-007/digit-identify | [
"236befc520af08ff838dfdf2ae0392d8afb7598a"
] | [
"app.py"
] | [
"import os\r\n#Define backend as tensorflow\r\nos.environ['KERAS_BACKEND']='tensorflow'\r\n#It is important to import keras after changing backend\r\nimport keras\r\nfrom flask import Flask, render_template,request\r\nfrom scipy.misc import imsave, imread, imresize\r\nimport numpy as np\r\n#import keras.models\r\nimport re\r\n\r\nimport sys \r\nimport os\r\nsys.path.append(os.path.abspath(\"./model\"))\r\nfrom load import * \r\n\r\napp = Flask(__name__)\r\n\r\nglobal model, graph\r\n\r\nmodel, graph = init()\r\n\r\ndef convertImage(imgData1):\r\n\timgstr = re.search(r'base64,(.*)',imgData1).group(1)\r\n\t#print(imgstr)\r\n\twith open('output.png','wb') as output:\r\n\t\toutput.write(imgstr.decode('base64'))\r\n\t\r\n\r\[email protected]('/')\r\ndef index():\r\n\treturn render_template(\"index.html\")\r\n\r\[email protected]('/predict/',methods=['GET','POST'])\r\ndef predict():\r\n\timgData = request.get_data()\r\n\r\n\tconvertImage(imgData)\r\n\tprint(\"debug\")\r\n\r\n\tx = imread('output.png',mode='L')\r\n\tx = np.invert(x)\r\n\tx = imresize(x,(28,28))\r\n\tx = x.reshape(1,28,28,1)\r\n\r\n\tprint(\"debug2\")\r\n\twith graph.as_default():\r\n\t\tout = model.predict(x)\r\n\t\tprint(out)\r\n\t\tprint(np.argmax(out,axis=1))\r\n\t\tprint(\"debug3\")\r\n\t\tresponse = np.array_str(np.argmax(out,axis=1))\r\n\t\treturn response\t\r\n\t\r\n\r\nif __name__ == \"__main__\":\r\n\t#decide what port to run the app in\r\n\t#port = int(os.environ.get('PORT', 5000))\r\n\r\n\t#run the app locally on the givn port\r\n\tapp.run(host='127.0.0.1', port=1245)\r\n\r\n"
] | [
[
"scipy.misc.imresize",
"numpy.argmax",
"scipy.misc.imread",
"numpy.invert"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.10",
"0.16",
"0.19",
"0.18",
"0.12",
"1.0",
"0.17",
"1.2"
],
"tensorflow": []
}
] |
rozlana-g/FEDOT | [
"a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c",
"a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c",
"a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c",
"a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c"
] | [
"test/unit/data/test_data.py",
"fedot/core/optimisers/gp_comp/param_free_gp_optimiser.py",
"examples/time_series/ts_forecasting_composing.py",
"fedot/core/operations/evaluation/operation_implementations/data_operations/decompose.py"
] | [
"import os\nfrom copy import deepcopy\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom sklearn.datasets import load_iris\n\nfrom fedot.core.data.data import InputData, OutputData\nfrom fedot.core.data.multi_modal import MultiModalData\nfrom fedot.core.repository.dataset_types import DataTypesEnum\nfrom fedot.core.repository.tasks import Task, TaskTypesEnum\nfrom fedot.core.utils import fedot_project_root\nfrom test.unit.tasks.test_classification import get_image_classification_data\n\n\[email protected]()\ndef data_setup() -> InputData:\n predictors, response = load_iris(return_X_y=True)\n np.random.seed(1)\n np.random.shuffle(predictors)\n np.random.shuffle(response)\n predictors = predictors[:100]\n response = response[:100]\n data = InputData(features=predictors, target=response, idx=np.arange(0, 100),\n task=Task(TaskTypesEnum.classification),\n data_type=DataTypesEnum.table)\n return data\n\n\[email protected]()\ndef output_dataset():\n task = Task(TaskTypesEnum.classification)\n\n samples = 1000\n x = 10.0 * np.random.rand(samples, ) - 5.0\n x = np.expand_dims(x, axis=1)\n threshold = 0.5\n y = 1.0 / (1.0 + np.exp(np.power(x, -1.0)))\n classes = np.array([0.0 if val <= threshold else 1.0 for val in y])\n classes = np.expand_dims(classes, axis=1)\n data = OutputData(idx=np.arange(0, 100), features=x, predict=classes,\n task=task, data_type=DataTypesEnum.table)\n\n return data\n\n\ndef test_data_subset_correct(data_setup):\n subset_size = 50\n subset = data_setup.subset(0, subset_size - 1)\n\n assert len(subset.idx) == subset_size\n assert len(subset.features) == subset_size\n assert len(subset.target) == subset_size\n\n\ndef test_data_subset_incorrect(data_setup):\n subset_size = 105\n with pytest.raises(ValueError):\n assert data_setup.subset(0, subset_size)\n\n with pytest.raises(ValueError):\n assert data_setup.subset(-1, subset_size)\n with pytest.raises(ValueError):\n assert data_setup.subset(-1, -1)\n\n\ndef test_data_from_csv():\n test_file_path = str(os.path.dirname(__file__))\n file = '../../data/simple_classification.csv'\n task = Task(TaskTypesEnum.classification)\n df = pd.read_csv(os.path.join(test_file_path, file))\n data_array = np.array(df).T\n features = data_array[1:-1].T\n target = data_array[-1]\n idx = data_array[0]\n expected_features = InputData(features=features, target=target,\n idx=idx,\n task=task,\n data_type=DataTypesEnum.table).features\n actual_features = InputData.from_csv(\n os.path.join(test_file_path, file)).features\n assert np.array_equal(expected_features, actual_features)\n\n\ndef test_with_custom_target():\n test_file_path = str(os.path.dirname(__file__))\n file = '../../data/simple_classification.csv'\n file_custom = '../../data/simple_classification_with_custom_target.csv'\n\n file_data = InputData.from_csv(\n os.path.join(test_file_path, file))\n\n expected_features = file_data.features\n expected_target = file_data.target\n\n custom_file_data = InputData.from_csv(\n os.path.join(test_file_path, file_custom), delimiter=';')\n actual_features = custom_file_data.features\n actual_target = custom_file_data.target\n\n assert not np.array_equal(expected_features, actual_features)\n assert not np.array_equal(expected_target, actual_target)\n\n custom_file_data = InputData.from_csv(\n os.path.join(test_file_path, file_custom), delimiter=';',\n columns_to_drop=['redundant'], target_columns='custom_target')\n\n actual_features = custom_file_data.features\n actual_target = custom_file_data.target\n\n assert np.array_equal(expected_features, actual_features)\n assert np.array_equal(expected_target, actual_target)\n\n\ndef test_data_from_predictions(output_dataset):\n data_1 = output_dataset\n data_2 = output_dataset\n data_3 = output_dataset\n new_input_data = InputData.from_predictions(outputs=[data_1, data_2, data_3])\n assert new_input_data.features.all() == np.array(\n [data_1.predict, data_2.predict, data_3.predict]).all()\n\n\ndef test_data_from_image():\n _, _, dataset_to_validate = get_image_classification_data()\n\n assert dataset_to_validate.data_type == DataTypesEnum.image\n assert type(dataset_to_validate.features) == np.ndarray\n assert type(dataset_to_validate.target) == np.ndarray\n\n\ndef test_data_from_json():\n # several features\n files_path = os.path.join('test', 'data', 'multi_modal')\n path = os.path.join(str(fedot_project_root()), files_path)\n data = InputData.from_json_files(path, fields_to_use=['votes', 'year'],\n label='rating', task=Task(TaskTypesEnum.regression))\n assert data.features.shape[1] == 2 # check there is two features\n assert len(data.target) == data.features.shape[0] == len(data.idx)\n\n # single feature\n data = InputData.from_json_files(path, fields_to_use=['votes'],\n label='rating', task=Task(TaskTypesEnum.regression))\n assert len(data.features.shape) == 1 # check there is one feature\n assert len(data.target) == len(data.features) == len(data.idx)\n\n\ndef test_multi_modal_data():\n num_samples = 5\n target = np.asarray([0, 0, 1, 0, 1])\n img_data = InputData(idx=range(num_samples),\n features=None, # in test the real data is not passed\n target=target,\n data_type=DataTypesEnum.text,\n task=Task(TaskTypesEnum.classification))\n tbl_data = InputData(idx=range(num_samples),\n features=None, # in test the real data is not passed\n target=target,\n data_type=DataTypesEnum.table,\n task=Task(TaskTypesEnum.classification))\n\n multi_modal = MultiModalData({\n 'data_source_img': img_data,\n 'data_source_table': tbl_data,\n })\n\n assert multi_modal.task.task_type == TaskTypesEnum.classification\n assert len(multi_modal.idx) == 5\n assert multi_modal.num_classes == 2\n assert np.array_equal(multi_modal.target, target)\n\n\ndef test_target_data_from_csv_correct():\n \"\"\" Function tests two ways of processing target columns in \"from_csv\"\n method\n \"\"\"\n test_file_path = str(os.path.dirname(__file__))\n file = '../../data/multi_target_sample.csv'\n path = os.path.join(test_file_path, file)\n task = Task(TaskTypesEnum.regression)\n\n # Process one column\n target_column = '1_day'\n one_column_data = InputData.from_csv(path, target_columns=target_column,\n columns_to_drop=['date'], task=task)\n\n # Process multiple target columns\n target_columns = ['1_day', '2_day', '3_day', '4_day', '5_day', '6_day', '7_day']\n seven_columns_data = InputData.from_csv(path, target_columns=target_columns,\n columns_to_drop=['date'], task=task)\n\n assert one_column_data.target.shape == (499, 1)\n assert seven_columns_data.target.shape == (499, 7)\n\n\ndef test_table_data_shuffle():\n test_file_path = str(os.path.dirname(__file__))\n file = '../../data/simple_classification.csv'\n\n data = InputData.from_csv(os.path.join(test_file_path, file))\n shuffled_data = deepcopy(data)\n shuffled_data.shuffle()\n\n assert not np.array_equal(data.idx, shuffled_data.idx)\n assert not np.array_equal(data.features, shuffled_data.features)\n assert not np.array_equal(data.target, shuffled_data.target)\n\n assert np.array_equal(data.idx, sorted(shuffled_data.idx))\n",
"from copy import deepcopy\nfrom typing import (Any, List, Optional, Tuple)\n\nimport numpy as np\nfrom deap import tools\n\nfrom fedot.core.log import Log\nfrom fedot.core.optimisers.gp_comp.gp_operators import clean_operators_history, duplicates_filtration, \\\n num_of_parents_in_crossover\nfrom fedot.core.optimisers.gp_comp.gp_optimiser import GPGraphOptimiser, GPGraphOptimiserParameters\nfrom fedot.core.optimisers.gp_comp.iterator import SequenceIterator, fibonacci_sequence\nfrom fedot.core.optimisers.gp_comp.operators.inheritance import GeneticSchemeTypesEnum, inheritance\nfrom fedot.core.optimisers.gp_comp.operators.regularization import regularized_population\nfrom fedot.core.optimisers.gp_comp.operators.selection import selection\nfrom fedot.core.optimisers.timer import OptimisationTimer\nfrom fedot.core.optimisers.utils.population_utils import is_equal_archive\nfrom fedot.core.repository.quality_metrics_repository import ComplexityMetricsEnum, MetricsEnum, MetricsRepository\n\nDEFAULT_MAX_POP_SIZE = 55\n\n\nclass GPGraphParameterFreeOptimiser(GPGraphOptimiser):\n \"\"\"\n Implementation of the parameter-free adaptive evolutionary optimiser\n (population size and genetic operators rates is changing over time).\n For details, see original paper: https://arxiv.org/abs/2001.10178\n :param initial_graph: graph which was initialized outside the optimiser\n :param requirements: composer requirements\n :param graph_generation_params: parameters for new graph generation\n :param metrics: quality metrics\n :param parameters: parameters of graph optimiser\n :param max_population_size: maximum population size\n :param log: optional parameter for log object\n :param archive_type: type of archive with best individuals\n :param complexity_metric: Supplementary metric which uses in single-objective type of algorithm (in multi-objective\n option this parameter is ignored)\n\n \"\"\"\n\n def __init__(self, initial_graph, requirements, graph_generation_params, metrics: List[MetricsEnum],\n parameters: Optional[GPGraphOptimiserParameters] = None,\n max_population_size: int = DEFAULT_MAX_POP_SIZE,\n sequence_function=fibonacci_sequence, log: Log = None, archive_type=None,\n suppl_metric=MetricsRepository().metric_by_id(ComplexityMetricsEnum.node_num)):\n super().__init__(initial_graph, requirements, graph_generation_params, metrics, parameters, log, archive_type)\n\n if archive_type is not None:\n self.archive = archive_type\n\n if self.parameters.genetic_scheme_type != GeneticSchemeTypesEnum.parameter_free:\n self.log.warn(f'Invalid genetic scheme type was changed to parameter-free. Continue.')\n self.parameters.genetic_scheme_type = GeneticSchemeTypesEnum.parameter_free\n\n self.sequence_function = sequence_function\n self.max_pop_size = max_population_size\n self.iterator = SequenceIterator(sequence_func=self.sequence_function, min_sequence_value=1,\n max_sequence_value=self.max_pop_size,\n start_value=self.requirements.pop_size)\n\n self.requirements.pop_size = self.iterator.next()\n self.metrics = metrics\n\n self.qual_position = 0\n self.compl_position = 1\n\n self.suppl_metric = suppl_metric\n\n def optimise(self, objective_function, offspring_rate: float = 0.5, on_next_iteration_callback=None):\n if on_next_iteration_callback is None:\n on_next_iteration_callback = self.default_on_next_iteration_callback\n\n self._init_population()\n\n num_of_new_individuals = self.offspring_size(offspring_rate)\n self.log.info(f'pop size: {self.requirements.pop_size}, num of new inds: {num_of_new_individuals}')\n\n with OptimisationTimer(timeout=self.requirements.timeout, log=self.log) as t:\n self.population = self._evaluate_individuals(self.population, objective_function, timer=t)\n\n if self.archive is not None:\n self.archive.update(self.population)\n\n on_next_iteration_callback(self.population, self.archive)\n\n self.log_info_about_best()\n\n while t.is_time_limit_reached(self.generation_num) is False \\\n and self.generation_num != self.requirements.num_of_generations - 1:\n\n self.log.info(f'Generation num: {self.generation_num}')\n\n self.num_of_gens_without_improvements = self.update_stagnation_counter()\n self.log.info(f'max_depth: {self.max_depth}, no improvements: {self.num_of_gens_without_improvements}')\n\n if self.parameters.with_auto_depth_configuration and self.generation_num != 0:\n self.max_depth_recount()\n\n self.max_std = self.update_max_std()\n\n individuals_to_select = \\\n regularized_population(reg_type=self.parameters.regularization_type,\n population=self.population,\n objective_function=objective_function,\n graph_generation_params=self.graph_generation_params, timer=t)\n\n if self.parameters.multi_objective:\n filtered_archive_items = duplicates_filtration(archive=self.archive,\n population=individuals_to_select)\n individuals_to_select = deepcopy(individuals_to_select) + filtered_archive_items\n\n if num_of_new_individuals == 1 and len(self.population) == 1:\n new_population = list(self.reproduce(self.population[0]))\n new_population = self._evaluate_individuals(new_population, objective_function, timer=t)\n else:\n num_of_parents = num_of_parents_in_crossover(num_of_new_individuals)\n\n selected_individuals = selection(types=self.parameters.selection_types,\n population=individuals_to_select,\n pop_size=num_of_parents,\n params=self.graph_generation_params)\n\n new_population = []\n\n for parent_num in range(0, len(selected_individuals), 2):\n new_population += self.reproduce(selected_individuals[parent_num],\n selected_individuals[parent_num + 1])\n\n new_population = self._evaluate_individuals(new_population, objective_function, timer=t)\n\n self.requirements.pop_size = self.next_population_size(new_population)\n num_of_new_individuals = self.offspring_size(offspring_rate)\n self.log.info(f'pop size: {self.requirements.pop_size}, num of new inds: {num_of_new_individuals}')\n\n self.prev_best = deepcopy(self.best_individual)\n\n self.population = inheritance(self.parameters.genetic_scheme_type, self.parameters.selection_types,\n self.population,\n new_population, self.num_of_inds_in_next_pop,\n graph_params=self.graph_generation_params)\n\n if not self.parameters.multi_objective and self.with_elitism:\n self.population.append(self.prev_best)\n\n if self.archive is not None:\n self.archive.update(self.population)\n\n on_next_iteration_callback(self.population, self.archive)\n self.log.info(f'spent time: {round(t.minutes_from_start, 1)} min')\n self.log_info_about_best()\n\n self.generation_num += 1\n clean_operators_history(self.population)\n\n best = self.result_individual()\n self.log.info('Result:')\n self.log_info_about_best()\n\n output = [self.graph_generation_params.adapter.restore(ind.graph) for ind in best] if isinstance(best, list) \\\n else self.graph_generation_params.adapter.restore(best.graph)\n return output\n\n @property\n def with_elitism(self) -> bool:\n if self.parameters.multi_objective:\n return False\n else:\n return self.requirements.pop_size >= 7\n\n @property\n def current_std(self):\n if self.parameters.multi_objective:\n std = np.std([self.get_main_metric(ind) for ind in self.population])\n else:\n std = np.std([ind.fitness for ind in self.population])\n return std\n\n def update_max_std(self):\n if self.generation_num == 0:\n std_max = self.current_std\n if len(self.population) == 1:\n self.requirements.mutation_prob = 1\n self.requirements.crossover_prob = 0\n else:\n self.requirements.mutation_prob = 0.5\n self.requirements.crossover_prob = 0.5\n else:\n if self.max_std < self.current_std:\n std_max = self.current_std\n else:\n std_max = self.max_std\n return std_max\n\n def _check_mo_improvements(self, offspring: List[Any]) -> Tuple[bool, bool]:\n complexity_decreased = False\n fitness_improved = False\n offspring_archive = tools.ParetoFront()\n offspring_archive.update(offspring)\n is_archive_improved = not is_equal_archive(self.archive, offspring_archive)\n if is_archive_improved:\n best_ind_in_prev = min(self.archive.items, key=self.get_main_metric)\n best_ind_in_current = min(offspring_archive.items, key=self.get_main_metric)\n fitness_improved = self.get_main_metric(best_ind_in_current) < self.get_main_metric(best_ind_in_prev)\n for offspring_ind in offspring_archive.items:\n if self.get_main_metric(offspring_ind) <= self.get_main_metric(best_ind_in_prev) \\\n and self.get_suppl_metric(offspring_ind) < self.get_suppl_metric(best_ind_in_prev):\n complexity_decreased = True\n break\n return fitness_improved, complexity_decreased\n\n def _check_so_improvements(self, offspring: List[Any]) -> Tuple[bool, bool]:\n best_in_offspring = self.get_best_individual(offspring, equivalents_from_current_pop=False)\n fitness_improved = best_in_offspring.fitness < self.best_individual.fitness\n complexity_decreased = self.suppl_metric(best_in_offspring.graph) < self.suppl_metric(\n self.best_individual.graph) and best_in_offspring.fitness <= self.best_individual.fitness\n return fitness_improved, complexity_decreased\n\n def next_population_size(self, offspring: List[Any]) -> int:\n improvements_checker = self._check_so_improvements\n if self.parameters.multi_objective:\n improvements_checker = self._check_mo_improvements\n fitness_improved, complexity_decreased = improvements_checker(offspring)\n is_max_pop_size_reached = not self.iterator.has_next()\n progress_in_both_goals = fitness_improved and complexity_decreased and not is_max_pop_size_reached\n no_progress = not fitness_improved and not complexity_decreased and not is_max_pop_size_reached\n if (progress_in_both_goals and len(self.population) > 2) or no_progress:\n if progress_in_both_goals:\n if self.iterator.has_prev():\n next_population_size = self.iterator.prev()\n else:\n next_population_size = len(self.population)\n else:\n next_population_size = self.iterator.next()\n\n self.requirements.mutation_prob, self.requirements.crossover_prob = self.operators_prob_update(\n std=float(self.current_std), max_std=float(self.max_std))\n\n else:\n next_population_size = len(self.population)\n return next_population_size\n\n def operators_prob_update(self, std: float, max_std: float) -> Tuple[float, float]:\n mutation_prob = 1 - (std / max_std) if max_std > 0 and std != max_std else 0.5\n crossover_prob = 1 - mutation_prob\n return mutation_prob, crossover_prob\n\n def offspring_size(self, offspring_rate: float = None) -> int:\n if self.iterator.has_prev():\n num_of_new_individuals = self.iterator.prev()\n self.iterator.next()\n else:\n num_of_new_individuals = 1\n return num_of_new_individuals\n\n def get_main_metric(self, ind: Any) -> float:\n return ind.fitness.values[self.qual_position]\n\n def get_suppl_metric(self, ind: Any) -> float:\n return ind.fitness.values[self.compl_position]\n",
"import datetime\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\n\nfrom fedot.core.composer.gp_composer.gp_composer import \\\n GPComposerBuilder, GPComposerRequirements\nfrom fedot.core.composer.gp_composer.specific_operators import parameter_change_mutation\nfrom fedot.core.data.data import InputData\nfrom fedot.core.optimisers.gp_comp.gp_optimiser import GPGraphOptimiserParameters\nfrom fedot.core.optimisers.gp_comp.operators.mutation import MutationTypesEnum\nfrom fedot.core.pipelines.node import PrimaryNode, SecondaryNode\nfrom fedot.core.pipelines.pipeline import Pipeline\nfrom fedot.core.repository.dataset_types import DataTypesEnum\nfrom fedot.core.repository.quality_metrics_repository import \\\n MetricsRepository, RegressionMetricsEnum\nfrom fedot.core.repository.tasks import Task, TaskTypesEnum, TsForecastingParams\n\nwarnings.filterwarnings('ignore')\n\n\ndef get_source_pipeline():\n \"\"\"\n Return pipeline with the following structure:\n lagged - ridge \\\n -> ridge\n lagged - ridge /\n \"\"\"\n\n # First level\n node_lagged_1 = PrimaryNode('lagged')\n node_lagged_2 = PrimaryNode('lagged')\n\n # Second level\n node_ridge_1 = SecondaryNode('ridge', nodes_from=[node_lagged_1])\n node_ridge_2 = SecondaryNode('ridge', nodes_from=[node_lagged_2])\n\n # Third level - root node\n node_final = SecondaryNode('ridge', nodes_from=[node_ridge_1, node_ridge_2])\n pipeline = Pipeline(node_final)\n\n return pipeline\n\n\ndef get_available_operations():\n \"\"\" Function returns available operations for primary and secondary nodes \"\"\"\n primary_operations = ['lagged', 'smoothing', 'gaussian_filter', 'ar']\n secondary_operations = ['lagged', 'ridge', 'lasso', 'knnreg', 'linear',\n 'scaling', 'ransac_lin_reg', 'rfe_lin_reg']\n return primary_operations, secondary_operations\n\n\ndef display_validation_metric(predicted, real, actual_values,\n is_visualise: bool) -> None:\n \"\"\" Function calculate metrics based on predicted and tests data\n\n :param predicted: predicted values\n :param real: real values\n :param actual_values: source time series\n :param is_visualise: is it needed to show the plots\n \"\"\"\n\n rmse_value = mean_squared_error(real, predicted, squared=False)\n mae_value = mean_absolute_error(real, predicted)\n print(f'RMSE - {rmse_value:.2f}')\n print(f'MAE - {mae_value:.2f}\\n')\n\n if is_visualise:\n plot_results(actual_time_series=actual_values,\n predicted_values=predicted,\n len_train_data=len(actual_values) - len(predicted))\n\n\ndef plot_results(actual_time_series, predicted_values, len_train_data,\n y_name='Sea surface height, m'):\n \"\"\"\n Function for drawing plot with predictions\n\n :param actual_time_series: the entire array with one-dimensional data\n :param predicted_values: array with predicted values\n :param len_train_data: number of elements in the training sample\n :param y_name: name of the y axis\n \"\"\"\n\n plt.plot(np.arange(0, len(actual_time_series)),\n actual_time_series, label='Actual values', c='green')\n plt.plot(np.arange(len_train_data, len_train_data + len(predicted_values)),\n predicted_values, label='Predicted', c='blue')\n\n # Plot black line which divide our array into train and test\n plt.plot([len_train_data, len_train_data],\n [min(actual_time_series), max(actual_time_series)], c='black',\n linewidth=1)\n plt.ylabel(y_name, fontsize=15)\n plt.xlabel('Time index', fontsize=15)\n plt.legend(fontsize=15)\n plt.grid()\n plt.show()\n\n\ndef prepare_train_test_input(train_part, len_forecast):\n \"\"\" Function return prepared data for fit and predict\n\n :param len_forecast: forecast length\n :param train_part: time series which can be used as predictors for train\n\n :return train_input: Input Data for fit\n :return predict_input: Input Data for predict\n :return task: Time series forecasting task with parameters\n \"\"\"\n\n # Specify the task to solve\n task = Task(TaskTypesEnum.ts_forecasting,\n TsForecastingParams(forecast_length=len_forecast))\n\n train_input = InputData(idx=np.arange(0, len(train_part)),\n features=train_part,\n target=train_part,\n task=task,\n data_type=DataTypesEnum.ts)\n\n start_forecast = len(train_part)\n end_forecast = start_forecast + len_forecast\n predict_input = InputData(idx=np.arange(start_forecast, end_forecast),\n features=train_part,\n target=None,\n task=task,\n data_type=DataTypesEnum.ts)\n\n return train_input, predict_input, task\n\n\ndef fit_predict_for_pipeline(pipeline, train_input, predict_input):\n \"\"\" Function apply fit and predict operations\n\n :param pipeline: pipeline to process\n :param train_input: InputData for fit\n :param predict_input: InputData for predict\n\n :return preds: prediction of the pipeline\n \"\"\"\n # Fit it\n pipeline.fit_from_scratch(train_input)\n\n # Predict\n predicted_values = pipeline.predict(predict_input)\n # Convert to one dimensional array\n preds = np.ravel(np.array(predicted_values.predict))\n\n return preds\n\n\ndef run_ts_forecasting_problem(forecast_length=50,\n with_visualisation=True,\n cv_folds=None) -> None:\n \"\"\" Function launch time series task with composing\n\n :param forecast_length: length of the forecast\n :param with_visualisation: is it needed to show the plots\n :param cv_folds: is it needed apply cross validation and what number\n of folds to use\n \"\"\"\n file_path = '../../cases/data/metocean/metocean_data_test.csv'\n\n df = pd.read_csv(file_path)\n time_series = np.array(df['sea_height'])\n\n # Train/test split\n train_part = time_series[:-forecast_length]\n test_part = time_series[-forecast_length:]\n\n # Prepare data for train and test\n train_input, predict_input, task = prepare_train_test_input(train_part,\n forecast_length)\n\n # Get pipeline with pre-defined structure\n init_pipeline = get_source_pipeline()\n\n # Init check\n preds = fit_predict_for_pipeline(pipeline=init_pipeline,\n train_input=train_input,\n predict_input=predict_input)\n display_validation_metric(predicted=preds,\n real=test_part,\n actual_values=time_series,\n is_visualise=with_visualisation)\n\n # Get available_operations type\n primary_operations, secondary_operations = get_available_operations()\n\n # Composer parameters\n composer_requirements = GPComposerRequirements(\n primary=primary_operations,\n secondary=secondary_operations, max_arity=3,\n max_depth=8, pop_size=10, num_of_generations=10,\n crossover_prob=0.8, mutation_prob=0.8,\n timeout=datetime.timedelta(minutes=10),\n cv_folds=cv_folds, validation_blocks=3)\n\n mutation_types = [parameter_change_mutation, MutationTypesEnum.growth, MutationTypesEnum.reduce,\n MutationTypesEnum.simple]\n optimiser_parameters = GPGraphOptimiserParameters(mutation_types=mutation_types)\n\n metric_function = MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE)\n builder = GPComposerBuilder(task=task). \\\n with_optimiser_parameters(optimiser_parameters). \\\n with_requirements(composer_requirements). \\\n with_metrics(metric_function).with_initial_pipeline(init_pipeline)\n composer = builder.build()\n\n obtained_pipeline = composer.compose_pipeline(data=train_input, is_visualise=False)\n\n ###################################\n # Obtained pipeline visualisation #\n ###################################\n if with_visualisation:\n obtained_pipeline.show()\n\n preds = fit_predict_for_pipeline(pipeline=obtained_pipeline,\n train_input=train_input,\n predict_input=predict_input)\n\n display_validation_metric(predicted=preds,\n real=test_part,\n actual_values=time_series,\n is_visualise=with_visualisation)\n\n obtained_pipeline.print_structure()\n\n\nif __name__ == '__main__':\n run_ts_forecasting_problem(forecast_length=100,\n with_visualisation=True,\n cv_folds=2)\n",
"from typing import Optional\n\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\n\nfrom fedot.core.operations.evaluation.operation_implementations. \\\n implementation_interfaces import DataOperationImplementation\nfrom fedot.core.repository.tasks import Task, TaskTypesEnum\n\n\nclass DecomposerImplementation(DataOperationImplementation):\n \"\"\" Base class for decomposing target. The idea is to find the difference\n between the actual and predicted values - the residuals. Then the residuals\n replace the original target.\n \"\"\"\n\n def __init__(self, **params: Optional[dict]):\n super().__init__()\n self.params = None\n\n def fit(self, input_data):\n \"\"\"\n The decompose operation doesn't support fit method\n \"\"\"\n pass\n\n def transform(self, input_data, is_fit_pipeline_stage: Optional[bool]):\n \"\"\"\n Method for modifying input_data\n :param input_data: data with features, target and ids\n :param is_fit_pipeline_stage: is this fit or predict stage for pipeline\n :return input_data: data with transformed features attribute\n \"\"\"\n raise NotImplementedError()\n\n @staticmethod\n def divide_inputs(input_data):\n \"\"\" Method for dividing InputData into parts:\n first came from Model parent and second came from Data parent\n\n :param input_data: InputData object\n :return prev_prediction: data obtained from \"Model parent\" at the previous node\n :return prev_features: data obtained from \"Data parent\" at the previous node\n \"\"\"\n\n features = np.array(input_data.features)\n # Array with masks\n features_mask = np.array(input_data.supplementary_data.get_compound_mask())\n unique_features_masks = np.unique(features_mask)\n\n if len(unique_features_masks) < 2:\n prefix = 'Decompose operation must have at least two parents nodes'\n raise ValueError(f'{prefix}, but got {len(unique_features_masks)}')\n\n # Get amount of nodes data already visited\n flow_lengths = input_data.supplementary_data.get_flow_mask()\n\n # Find minimum and maximum of visited nodes and first indices of them\n min_flow_length_i = np.argmin(flow_lengths)\n max_flow_length_i = np.argmax(flow_lengths)\n\n # For case when data from \"Model parent\" and \"Data parent\" go through equal number of nodes\n if min_flow_length_i == max_flow_length_i:\n # Find data models\n model_parent, data_parent = input_data.supplementary_data.define_parents(unique_features_masks,\n task=input_data.task)\n else:\n model_parent = features_mask[max_flow_length_i]\n data_parent = features_mask[min_flow_length_i]\n\n # Get prediction from \"Model parent\"\n prev_prediction_id = np.ravel(np.argwhere(features_mask == model_parent))\n prev_prediction = features[:, prev_prediction_id]\n\n # Get prediction from \"Data parent\" - it must be the last parent in parent list\n prev_features_id = np.ravel(np.argwhere(features_mask == data_parent))\n prev_features = features[:, prev_features_id]\n\n return prev_prediction, prev_features\n\n def get_params(self):\n return {}\n\n\nclass DecomposerRegImplementation(DecomposerImplementation):\n \"\"\" Class for decomposing target for regression task \"\"\"\n\n def __init__(self, **params: Optional[dict]):\n super().__init__()\n self.params = None\n\n def transform(self, input_data, is_fit_pipeline_stage: Optional[bool]):\n \"\"\"\n Method for modifying input_data\n :param input_data: data with features, target and ids\n :param is_fit_pipeline_stage: is this fit or predict stage for pipeline\n :return input_data: data with transformed features attribute\n \"\"\"\n\n # Get inputs from Data and Model parent\n prev_prediction, prev_features = self.divide_inputs(input_data)\n\n if is_fit_pipeline_stage:\n # Target must be a column or table, not one-dimensional array\n target = np.array(input_data.target)\n if len(target.shape) < 2:\n target = target.reshape((-1, 1))\n\n # Calculate difference between prediction of model and current target\n diff = target - prev_prediction\n\n # Update target\n input_data.target = diff\n # Create OutputData\n output_data = self._convert_to_output(input_data, prev_features)\n # We decompose the target, so in the future we need to ignore\n output_data.supplementary_data.is_main_target = False\n else:\n # For predict stage there is no need to worry about target\n output_data = self._convert_to_output(input_data, prev_features)\n output_data.supplementary_data.is_main_target = False\n\n return output_data\n\n\nclass DecomposerClassImplementation(DecomposerImplementation):\n \"\"\" Class for decomposing target for both binary and multiclass\n classification task\n \"\"\"\n\n def __init__(self, **params: Optional[dict]):\n super().__init__()\n self.params = None\n\n def transform(self, input_data, is_fit_pipeline_stage: Optional[bool]):\n \"\"\"\n Method for modifying input_data\n :param input_data: data with features, target and ids\n :param is_fit_pipeline_stage: is this fit or predict stage for pipeline\n :return input_data: data with transformed features attribute\n \"\"\"\n\n # Task since that model - regression\n regression_task = Task(TaskTypesEnum.regression)\n\n # Get inputs from Data and Model parent\n prev_prediction, prev_features = self.divide_inputs(input_data)\n\n if is_fit_pipeline_stage:\n # Target must be a column or table, not one-dimensional array\n target = np.array(input_data.target)\n if len(target.shape) < 2:\n target = target.reshape((-1, 1))\n\n classes = np.unique(target)\n if len(classes) > 2:\n diff = self._multi_difference(target, prev_prediction)\n else:\n # Binary classification task\n diff = self._binary_difference(classes, target, prev_prediction)\n\n # Update target\n input_data.target = diff\n # Create OutputData\n output_data = self._convert_to_output(input_data, prev_features)\n # We decompose the target, so in the future we need to ignore\n output_data.supplementary_data.is_main_target = False\n output_data.task = regression_task\n else:\n # For predict stage there is no need to worry about target\n output_data = self._convert_to_output(input_data, prev_features)\n output_data.supplementary_data.is_main_target = False\n output_data.task = regression_task\n\n return output_data\n\n @staticmethod\n def _binary_difference(classes, target, prev_prediction):\n \"\"\" Calculates difference between predictions (probabilities) and target\n for binary classification task\n :param classes: which classes are in the target\n :param target: class labels\n :param prev_prediction: predictions from previous classification model\n :return diff: difference between probabilities of classes\n \"\"\"\n minus_class = np.min(classes)\n plus_class = np.max(classes)\n\n minus_ids = np.argwhere(target == minus_class)\n plus_ids = np.argwhere(target == plus_class)\n\n # Replace class labels with probabilities (0.0 or 1.0)\n bin_target = np.copy(target)\n bin_target[minus_ids] = 0.0\n bin_target[plus_ids] = 1.0\n\n diff = bin_target - prev_prediction\n\n return diff\n\n @staticmethod\n def _multi_difference(target, prev_prediction):\n \"\"\" Calculates difference between predictions (probabilities) and target\n for multiclass classification task\n\n :param target: class labels\n :param prev_prediction: predictions from previous classification model\n :return diff: difference between probabilities of classes\n \"\"\"\n\n # Make one-hot encoding for target\n binary_enc = OneHotEncoder().fit_transform(target)\n probabilities_target = binary_enc.toarray()\n diff = probabilities_target - prev_prediction\n\n return diff\n"
] | [
[
"numpy.expand_dims",
"numpy.random.seed",
"numpy.array_equal",
"numpy.asarray",
"numpy.arange",
"numpy.power",
"sklearn.datasets.load_iris",
"numpy.random.shuffle",
"numpy.random.rand",
"numpy.array"
],
[
"numpy.std"
],
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"numpy.arange",
"sklearn.metrics.mean_absolute_error",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.min",
"numpy.unique",
"sklearn.preprocessing.OneHotEncoder",
"numpy.argwhere",
"numpy.max",
"numpy.copy",
"numpy.argmax",
"numpy.argmin",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
evelkey/vahun | [
"a7967ffd9d8e27911888057b4906fc4221c2a6fe"
] | [
"vahun_cmd.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport tensorflow as tf\nimport sys\nimport numpy as np\nfrom vahun.corpus import Corpus\nfrom vahun.genetic import evolution\nfrom vahun.genetic import experiment\nfrom vahun.tools import Timer\nfrom vahun.tools import explog\nfrom vahun.autoencoder import Autoencoder_ffnn\nfrom vahun.variational_autoencoder import Variational_autoencoder\nfrom vahun.tools import show_performance\nfrom vahun.tools import show_reconstruction\nimport argparse\n\n\n\ndef main(args=None):\n encode=args.encoding_dim\n dictsize=args.corp_len\n popsize=args.pop_size\n corp_path=args.corp_path\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n corp=Corpus(corpus_path=args.corp_path,language=\"Hun\",size=dictsize,encoding_len=args.feature_len,corpus_stream=None,printer=False)\n all_features=corp.featurize_data_charlevel_onehot(corp.hun_lower_unique)\n train=all_features[0:int(len(all_features)*0.8)]\n test=all_features[int(len(all_features)*0.8):len(all_features)]\n x_train = train.reshape((len(train), np.prod(train.shape[1:])))\n x_test = test.reshape((len(test), np.prod(test.shape[1:])))\n \n \n testcorp=Corpus(corpus_path=args.corp_path,language=\"Hun\",size=1000000,encoding_len=args.feature_len,corpus_stream=args.infile,printer=False)\n testdata=corp.featurize_data_charlevel_onehot(corp.hun_lower_unique)\n testdata= testdata.reshape((len(testdata), np.prod(testdata.shape[1:])))\n \n logger=explog(encoder_type=\"Demo_\"+str(encode),\n encoding_dim=encode,feature_len=10,\n lang=\"Hun\",unique_words=0,\n name=\"auto_demo_uni\"+str(encode),population_size=popsize,\n words=len(corp.hun_lower_unique))\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n\n exp=experiment(encoded_width=10,layermax=10,layermin=2,maxw=10,minw=3,out_dim=380)\n #exp.weights=[348, 254, 10, 254, 348, 360]\n exp.weights=[args.encoding_dim, 380]\n exp.len=len(exp.weights)\n \n if args.encoder_type==0:\n encoder=Autoencoder_ffnn(experiment=exp,\n logger=logger,tf_session=sess,\n inputdim=380,\n layerlist=exp.weights,\n encode_index=int(exp.len/2),\n optimizer =tf.train.AdamOptimizer(learning_rate = 0.001),\n nonlinear=tf.sigmoid)\n else:\n encoder=Variational_autoencoder(logger=logger,tf_session=sess,\n inputdim=380,\n encoding_size=args.encoding_dim,\n optimizer =tf.train.AdamOptimizer(learning_rate = 0.001),\n nonlinear=tf.sigmoid)\n encoder.train(x_train,x_test,512,80)\n show_reconstruction(encoder,testdata,corp,length=0)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Autoencoder command line runner')\n parser.add_argument(\"--encoder_type\", dest=\"encoder_type\", default=0, type=int, help=\"0=fully connected ffnn autoencoder, 1=variational ffnn autoencoder\")\n parser.add_argument('--corp_path', dest='corp_path', type=str,default='/mnt/permanent/Language/Hungarian/Corp/Webkorpusz/webkorpusz.wpl',help='Path to the Corpus.')\n parser.add_argument(\"--encoding_dim\", dest=\"encoding_dim\", default=160, type=int, help='Encoding dimension')\n parser.add_argument(\"--corp_len\", dest=\"corp_len\", default=2000000, type=int, help=\"Words to read from corpus\")\n parser.add_argument(\"--feature_len\", dest=\"feature_len\", default=10, type=int, help=\"Feature size\")\n parser.add_argument('--infile', type=argparse.FileType('r'),default='-',help=\"Input stream\")\n \n args = parser.parse_args()\n\n main(args)"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.train.AdamOptimizer",
"numpy.prod",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
arc144/siim-pneumothorax | [
"98fdb1fe08e9c001e0191d5024ba6c56ec82a9c8",
"98fdb1fe08e9c001e0191d5024ba6c56ec82a9c8"
] | [
"See/Model_001_f00/submit.py",
"See/Model_000_f00/dump_train_preds.py"
] | [
"import pandas as pd\nfrom zipfile import ZipFile\nimport torch as th\nimport cv2\nimport numpy as np\nimport os\nfrom glob import glob\nimport pydicom\nfrom matplotlib import pyplot as plt\nfrom segmentation_model import FPNSegmentation\n\n\ndef main():\n train_image_fns = sorted(glob(os.path.join(\n 'dicom-images-train', '*/*/*.dcm')))\n m = {os.path.basename(fn): fn for fn in train_image_fns}\n ref_file = 'Model_000_f00/f00-PREDS_VAL.zip'\n slug = 'r50d'\n weight = 'Model_000_f00/[email protected]'\n model = FPNSegmentation(slug)\n model.load_state_dict(th.load(weight))\n model = model.cuda()\n model.eval()\n\n with ZipFile(ref_file) as f:\n for fn in f.namelist()[::10]:\n path = m[fn.replace('.png', '.dcm')]\n img = pydicom.read_file(path).pixel_array\n # pimg = cv2.resize(img, (640, 640), interpolation=cv2.INTER_CUBIC)\n pimg = img.copy()\n X = th.from_numpy(pimg).unsqueeze(0).unsqueeze(0)\n with th.no_grad():\n X = X.cuda().float()\n y_pred = model(X).cpu().numpy().squeeze()\n y_pred_flip = th.flip(model(th.flip(X, (-1, ))),\n (-1, )).cpu().numpy().squeeze()\n y_pred = 0.5 * (y_pred_flip + y_pred)\n y_pred = (y_pred * 255).astype(np.uint8)\n with f.open(fn) as h:\n pred = cv2.imdecode(np.frombuffer(h.read(), 'uint8'), 0)\n\n diff = y_pred != pred\n print(\"DIFF: \", diff.sum())\n plt.subplot(2, 2, 1)\n plt.imshow(img)\n plt.subplot(2, 2, 2)\n plt.imshow(y_pred)\n plt.subplot(2, 2, 3)\n plt.imshow(pred)\n plt.subplot(2, 2, 4)\n plt.imshow(diff)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"import pandas as pd\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nimport cv2\nfrom zipfile import ZipFile\n\nfns = [\n 'Both_SEG_logdir_089_f00/f00-PREDS_VAL.zip',\n 'Both_SEG_logdir_089_f01/f01-PREDS_VAL.zip',\n 'Both_SEG_logdir_089_f02/f02-PREDS_VAL.zip',\n 'Both_SEG_logdir_089_f03/f03-PREDS_VAL.zip',\n 'Both_SEG_logdir_089_f04/f04-PREDS_VAL.zip',\n]\n\n\nout_fn = 'TRAIN_PREDS.csv'\nimage_ids, max_probs = [], []\nhandels = [ZipFile(fn) for fn in fns]\nnum_clf_empty = 0\nfor handle in handels:\n for png in tqdm(handle.namelist()):\n image_id = os.path.splitext(png)[0]\n with handle.open(png) as f:\n img = cv2.imdecode(np.frombuffer(f.read(), 'uint8'), 0)\n p = np.float32(img) / 255\n\n image_ids.append(image_id)\n max_probs.append(p.max())\n\n\npreds = pd.DataFrame({'ImageId': image_ids, 'MaxProb': max_probs})\npreds.to_csv(out_fn, index=False, columns=['ImageId', 'MaxProb'])\nprint(\"Wrote to: %s\" % out_fn)\n"
] | [
[
"matplotlib.pyplot.imshow",
"torch.load",
"torch.from_numpy",
"matplotlib.pyplot.subplot",
"torch.no_grad",
"torch.flip",
"matplotlib.pyplot.show"
],
[
"numpy.float32",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Gavin-Hoang/oneflow | [
"320038ff5efd948516f7259442190f9b31f75027",
"320038ff5efd948516f7259442190f9b31f75027",
"320038ff5efd948516f7259442190f9b31f75027",
"320038ff5efd948516f7259442190f9b31f75027",
"320038ff5efd948516f7259442190f9b31f75027"
] | [
"oneflow/python/test/ops/test_image_decode.py",
"oneflow/python/test/ops/test_TestMultiOutputOrder.py",
"oneflow/python/test/ops/test_TestReshape4KeepHeaderOnly.py",
"oneflow/python/test/ops/test_ccrelu.py",
"oneflow/python/test/ops/test_coco_reader.py"
] | [
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\nimport oneflow as flow\nfrom PIL import Image\nimport oneflow.typing as oft\n\n\ndef _of_image_decode(images):\n image_files = [open(im, \"rb\") for im in images]\n images_bytes = [imf.read() for imf in image_files]\n static_shape = (len(images_bytes), max([len(bys) for bys in images_bytes]))\n for imf in image_files:\n imf.close()\n\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.mirrored_view())\n\n @flow.global_function(func_config)\n def image_decode_job(\n images_def: oft.ListListNumpy.Placeholder(shape=static_shape, dtype=flow.int8)\n ):\n images_buffer = flow.tensor_list_to_tensor_buffer(images_def)\n decoded_images_buffer = flow.image_decode(images_buffer)\n return flow.tensor_buffer_to_tensor_list(\n decoded_images_buffer, shape=(640, 640, 3), dtype=flow.uint8\n )\n\n images_np_arr = [\n np.frombuffer(bys, dtype=np.byte).reshape(1, -1) for bys in images_bytes\n ]\n decoded_images = image_decode_job([images_np_arr]).get().numpy_lists()\n return decoded_images[0]\n\n\ndef _compare_jpg_decode_with_pil(test_case, images, print_debug_info=False):\n r\"\"\"\n The jpg image's decoded results with opencv and pil image are slightly different,\n their green channels have difference of 1.\n \"\"\"\n of_decoded_images = _of_image_decode(images)\n pil_images = [Image.open(image) for image in images]\n # convert image to BGR\n pil_decoded_images = [np.array(image)[:, :, ::-1] for image in pil_images]\n\n for of_decoded_image, pil_decoded_image in zip(\n of_decoded_images, pil_decoded_images\n ):\n of_decoded_image = of_decoded_image.squeeze()\n test_case.assertTrue(len(of_decoded_image.shape) == 3)\n test_case.assertTrue(len(pil_decoded_image.shape) == 3)\n\n diff = of_decoded_image - pil_decoded_image\n diff_index = np.where(diff != 0)\n diff_abs_values = diff[diff_index]\n\n if print_debug_info:\n print(\"of_decoded_image:\\n\", of_decoded_image, of_decoded_image.shape)\n print(\"pil_decoded_image:\\n\", pil_decoded_image, pil_decoded_image.shape)\n print(\"diff_index:\\n\", diff_index)\n print(\"diff_abs_values:\\n\", diff_abs_values)\n print(\n \"of_decoded_image diff:\\n\",\n of_decoded_image[diff_index[0], diff_index[1]],\n )\n print(\n \"pil_decoded_image diff:\\n\",\n pil_decoded_image[diff_index[0], diff_index[1]],\n )\n\n # only green channel has difference of 1\n test_case.assertTrue(np.all(diff_index[-1] == 1))\n test_case.assertTrue(np.all(diff_abs_values == 1))\n\n\ndef test_image_decode(test_case):\n _compare_jpg_decode_with_pil(\n test_case,\n [\n \"/dataset/mscoco_2017/val2017/000000000139.jpg\",\n \"/dataset/mscoco_2017/val2017/000000000632.jpg\",\n ],\n # True,\n )\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\nimport oneflow as flow\nimport oneflow.typing as oft\n\n\ndef TestMultiOutputOrder(x, name):\n return (\n flow.user_op_builder(name)\n .Op(\"TestMultiOutputOrder\")\n .Input(\"in\", [x])\n .Output(\"out1\")\n .Output(\"out2\")\n .Build()\n .InferAndTryRun()\n .RemoteBlobList()\n )\n\n\ndef GenerateTest(test_case, shape):\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(func_config)\n def TestMultiOutputOrderJob(x: oft.Numpy.Placeholder(shape)):\n return TestMultiOutputOrder(x, \"my_2_output_op\")\n\n x = np.random.rand(*shape).astype(np.float32)\n # print(\"x\", x)\n out1, out2 = TestMultiOutputOrderJob(x).get()\n out1_ndarray = out1.numpy()\n out2_ndarray = out2.numpy()\n # print(\"out1\", out1_ndarray)\n # print(\"out2\", out2_ndarray)\n out2_shape = list(shape)\n out2_shape[-1] = out2_shape[-1] * 2\n out2_shape = tuple(out2_shape)\n test_case.assertTrue(shape == out1_ndarray.shape)\n test_case.assertTrue(out2_shape == out2_ndarray.shape)\n test_case.assertTrue(np.allclose(x, out1_ndarray))\n test_case.assertTrue(\n np.allclose(np.zeros(out2_shape, dtype=np.float32), out2_ndarray)\n )\n\n\ndef test_TestMultiOutputOrder_example_1(test_case):\n GenerateTest(test_case, (7,))\n\n\ndef test_TestMultiOutputOrder_example_2(test_case):\n GenerateTest(test_case, (2, 5,))\n\n\ndef test_TestMultiOutputOrder_example_3(test_case):\n GenerateTest(test_case, (3, 3, 2,))\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\nimport oneflow as flow\nimport tensorflow as tf\nimport test_global_storage\nfrom test_util import GenArgList\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n\ndef TestReshape(x, shape, name):\n return (\n flow.user_op_builder(name)\n .Op(\"TestReshape4KeepHeaderOnly\")\n .Input(\"in\", [x])\n .Output(\"out\")\n .Attr(\"shape\", shape)\n .Build()\n .InferAndTryRun()\n .RemoteBlobList()[0]\n )\n\n\ndef compare_with_tensorflow(device_type, input_shape, output_shape):\n assert device_type in [\"gpu\", \"cpu\"]\n flow.clear_default_session()\n\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.train.primary_lr(1e-4)\n func_config.train.model_update_conf(dict(naive_conf={}))\n\n @flow.global_function(func_config)\n def ReshapeJob():\n with flow.scope.placement(device_type, \"0:0\"):\n x = flow.get_variable(\n \"x\",\n shape=input_shape,\n dtype=flow.float,\n initializer=flow.random_uniform_initializer(minval=-10, maxval=10),\n trainable=True,\n )\n loss = TestReshape(x, output_shape, \"my_test_reshape\")\n flow.losses.add_loss(loss)\n\n flow.watch(x, test_global_storage.Setter(\"x\"))\n flow.watch_diff(x, test_global_storage.Setter(\"x_diff\"))\n flow.watch(loss, test_global_storage.Setter(\"loss\"))\n flow.watch_diff(loss, test_global_storage.Setter(\"loss_diff\"))\n\n return loss\n\n # OneFlow\n check_point = flow.train.CheckPoint()\n check_point.init()\n of_out = ReshapeJob().get()\n # TensorFlow\n with tf.GradientTape(persistent=True) as tape:\n x = tf.Variable(test_global_storage.Get(\"x\"))\n tf_out = tf.reshape(x, output_shape)\n loss_diff = test_global_storage.Get(\"loss_diff\")\n tf_x_diff = tape.gradient(tf_out, x, loss_diff)\n\n assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=1e-5, atol=1e-5)\n assert np.allclose(\n test_global_storage.Get(\"x_diff\"), tf_x_diff.numpy(), rtol=1e-5, atol=1e-5\n )\n\n\ndef test_TestReshape_train_keep_header_only_grad(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"input_shape\"] = [(10, 10, 10)]\n arg_dict[\"output_shape\"] = [(100, 10), (10, 100), (5, 20, 10)]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(*arg)\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\nimport oneflow as flow\nimport oneflow.typing as oft\n\n\ndef ccrelu(x, name):\n return (\n flow.user_op_builder(name)\n .Op(\"ccrelu\")\n .Input(\"in\", [x])\n .Output(\"out\")\n .Build()\n .InferAndTryRun()\n .RemoteBlobList()[0]\n )\n\n\ndef fixed_tensor_def_test(test_case, func_config):\n func_config.default_data_type(flow.float)\n\n @flow.global_function(func_config)\n def ReluJob(a: oft.Numpy.Placeholder((5, 2))):\n return ccrelu(a, \"my_cc_relu_op\")\n\n x = np.random.rand(5, 2).astype(np.float32)\n y = ReluJob(x).get().numpy()\n test_case.assertTrue(np.array_equal(y, np.maximum(x, 0)))\n\n\ndef mirrored_tensor_def_test(test_case, func_config):\n func_config.default_data_type(flow.float)\n\n @flow.global_function(func_config)\n def ReluJob(a: oft.ListNumpy.Placeholder((5, 2))):\n return ccrelu(a, \"my_cc_relu_op\")\n\n x = np.random.rand(3, 1).astype(np.float32)\n y = ReluJob([x]).get().numpy_list()[0]\n test_case.assertTrue(np.array_equal(y, np.maximum(x, 0)))\n\n\ndef test_ccrelu(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_logical_view(flow.scope.consistent_view())\n fixed_tensor_def_test(test_case, func_config)\n\n\ndef test_mirror_ccrelu(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_logical_view(flow.scope.mirrored_view())\n mirrored_tensor_def_test(test_case, func_config)\n\n\ndef test_1n2c_mirror_dynamic_ccrelu(test_case):\n flow.config.gpu_device_num(2)\n func_config = flow.FunctionConfig()\n func_config.default_logical_view(flow.scope.mirrored_view())\n func_config.default_data_type(flow.float)\n\n @flow.global_function(func_config)\n def ReluJob(a: oft.ListNumpy.Placeholder((5, 2))):\n return ccrelu(a, \"my_cc_relu_op\")\n\n x1 = np.random.rand(3, 1).astype(np.float32)\n x2 = np.random.rand(4, 2).astype(np.float32)\n y1, y2 = ReluJob([x1, x2]).get().numpy_list()\n test_case.assertTrue(np.array_equal(y1, np.maximum(x1, 0)))\n test_case.assertTrue(np.array_equal(y2, np.maximum(x2, 0)))\n\n\[email protected]_nodes_required(2)\ndef test_ccrelu_2n1c(test_case):\n func_config = flow.FunctionConfig()\n func_config.default_logical_view(flow.scope.consistent_view())\n fixed_tensor_def_test(test_case, func_config)\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport math\nimport os\n\nimport cv2\nimport numpy as np\nimport oneflow as flow\n\nVERBOSE = False\ncoco_dict = dict()\n\n\ndef _coco(anno_file):\n global coco_dict\n\n if anno_file not in coco_dict:\n from pycocotools.coco import COCO\n\n coco_dict[anno_file] = COCO(anno_file)\n\n return coco_dict[anno_file]\n\n\ndef _make_coco_data_load_fn(\n anno_file,\n image_dir,\n nthread,\n batch_size,\n stride_partition,\n shuffle_after_epoch,\n ret_image_id_only=False,\n):\n flow.clear_default_session()\n flow.config.cpu_device_num(4)\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n func_config.default_logical_view(flow.scope.consistent_view())\n\n @flow.global_function(func_config)\n def coco_load_fn():\n with flow.scope.placement(\"cpu\", \"0:0-{}\".format(nthread - 1)):\n (\n image,\n image_id,\n image_size,\n gt_bbox,\n gt_label,\n gt_segm,\n gt_segm_index,\n ) = flow.data.coco_reader(\n annotation_file=anno_file,\n image_dir=image_dir,\n batch_size=batch_size,\n shuffle=shuffle_after_epoch,\n stride_partition=stride_partition,\n name=\"COCOReader\",\n )\n\n if ret_image_id_only:\n return image_id\n\n decoded_image = flow.image_decode(image, dtype=flow.float)\n image_list = flow.tensor_buffer_to_tensor_list(\n decoded_image, shape=(800, 1333, 3), dtype=flow.float\n )\n bbox_list = flow.tensor_buffer_to_tensor_list(\n gt_bbox, shape=(128, 4), dtype=flow.float\n )\n label_list = flow.tensor_buffer_to_tensor_list(\n gt_label, shape=(128,), dtype=flow.int32\n )\n segm_list = flow.tensor_buffer_to_tensor_list(\n gt_segm, shape=(1024, 2), dtype=flow.float\n )\n segm_index_list = flow.tensor_buffer_to_tensor_list(\n gt_segm_index, shape=(1024, 3), dtype=flow.int32\n )\n\n return (\n image_id,\n image_size,\n image_list,\n bbox_list,\n label_list,\n segm_list,\n segm_index_list,\n )\n\n return coco_load_fn\n\n\ndef _get_coco_image_samples(anno_file, image_dir, image_ids):\n coco = _coco(anno_file)\n category_id_to_contiguous_id_map = _get_category_id_to_contiguous_id_map(coco)\n image, image_size = _read_images_with_cv(coco, image_dir, image_ids)\n bbox = _read_bbox(coco, image_ids)\n label = _read_label(coco, image_ids, category_id_to_contiguous_id_map)\n img_segm_poly_list = _read_segm_poly(coco, image_ids)\n poly, poly_index = _segm_poly_list_to_tensor(img_segm_poly_list)\n samples = []\n for im, ims, b, l, p, pi in zip(image, image_size, bbox, label, poly, poly_index):\n samples.append(\n dict(image=im, image_size=ims, bbox=b, label=l, poly=p, poly_index=pi)\n )\n return samples\n\n\ndef _get_category_id_to_contiguous_id_map(coco):\n return {v: i + 1 for i, v in enumerate(coco.getCatIds())}\n\n\ndef _read_images_with_cv(coco, image_dir, image_ids):\n image_files = [\n os.path.join(image_dir, coco.imgs[img_id][\"file_name\"]) for img_id in image_ids\n ]\n image_size = [\n (coco.imgs[img_id][\"height\"], coco.imgs[img_id][\"width\"])\n for img_id in image_ids\n ]\n return (\n [cv2.imread(image_file).astype(np.single) for image_file in image_files],\n image_size,\n )\n\n\ndef _bbox_convert_from_xywh_to_xyxy(bbox, image_h, image_w):\n x, y, w, h = bbox\n x1, y1 = x, y\n x2 = x1 + max(w - 1, 0)\n y2 = y1 + max(h - 1, 0)\n\n # clip to image\n x1 = min(max(x1, 0), image_w - 1)\n y1 = min(max(y1, 0), image_h - 1)\n x2 = min(max(x2, 0), image_w - 1)\n y2 = min(max(y2, 0), image_h - 1)\n\n if x1 >= x2 or y1 >= y2:\n return None\n\n return [x1, y1, x2, y2]\n\n\ndef _read_bbox(coco, image_ids):\n img_bbox_list = []\n for img_id in image_ids:\n anno_ids = coco.getAnnIds(imgIds=[img_id])\n assert len(anno_ids) > 0, \"image with id {} has no anno\".format(img_id)\n image_h = coco.imgs[img_id][\"height\"]\n image_w = coco.imgs[img_id][\"width\"]\n\n bbox_list = []\n for anno_id in anno_ids:\n anno = coco.anns[anno_id]\n if anno[\"iscrowd\"] != 0:\n continue\n\n bbox = anno[\"bbox\"]\n assert isinstance(bbox, list)\n bbox_ = _bbox_convert_from_xywh_to_xyxy(bbox, image_h, image_w)\n if bbox_ is not None:\n bbox_list.append(bbox_)\n\n bbox_array = np.array(bbox_list, dtype=np.single)\n img_bbox_list.append(bbox_array)\n\n return img_bbox_list\n\n\ndef _read_label(coco, image_ids, category_id_to_contiguous_id_map):\n img_label_list = []\n for img_id in image_ids:\n anno_ids = coco.getAnnIds(imgIds=[img_id])\n assert len(anno_ids) > 0, \"image with id {} has no anno\".format(img_id)\n\n label_list = []\n for anno_id in anno_ids:\n anno = coco.anns[anno_id]\n if anno[\"iscrowd\"] != 0:\n continue\n cate_id = anno[\"category_id\"]\n isinstance(cate_id, int)\n label_list.append(category_id_to_contiguous_id_map[cate_id])\n label_array = np.array(label_list, dtype=np.int32)\n img_label_list.append(label_array)\n return img_label_list\n\n\ndef _read_segm_poly(coco, image_ids):\n img_segm_poly_list = []\n for img_id in image_ids:\n anno_ids = coco.getAnnIds(imgIds=[img_id])\n assert len(anno_ids) > 0, \"img {} has no anno\".format(img_id)\n\n segm_poly_list = []\n for anno_id in anno_ids:\n anno = coco.anns[anno_id]\n if anno[\"iscrowd\"] != 0:\n continue\n segm = anno[\"segmentation\"]\n assert isinstance(segm, list)\n assert len(segm) > 0, str(len(segm))\n assert all([len(poly) > 0 for poly in segm]), str(\n [len(poly) for poly in segm]\n )\n segm_poly_list.append(segm)\n\n img_segm_poly_list.append(segm_poly_list)\n\n return img_segm_poly_list\n\n\ndef _segm_poly_list_to_tensor(img_segm_poly_list):\n poly_array_list = []\n poly_index_array_list = []\n for img_idx, segm_poly_list in enumerate(img_segm_poly_list):\n img_poly_elem_list = []\n img_poly_index_list = []\n\n for obj_idx, poly_list in enumerate(segm_poly_list):\n for poly_idx, poly in enumerate(poly_list):\n img_poly_elem_list.extend(poly)\n for pt_idx, pt in enumerate(poly):\n if pt_idx % 2 == 0:\n img_poly_index_list.append([pt_idx / 2, poly_idx, obj_idx])\n\n img_poly_array = np.array(img_poly_elem_list, dtype=np.single).reshape(-1, 2)\n assert img_poly_array.size > 0, segm_poly_list\n poly_array_list.append(img_poly_array)\n\n img_poly_index_array = np.array(img_poly_index_list, dtype=np.int32)\n assert img_poly_index_array.size > 0, segm_poly_list\n poly_index_array_list.append(img_poly_index_array)\n\n return poly_array_list, poly_index_array_list\n\n\ndef _get_coco_sorted_imgs(anno_file):\n coco = _coco(anno_file)\n img_ids = coco.getImgIds()\n img_ids.sort()\n img_info_list = []\n for i, img_id in enumerate(img_ids):\n img_h = coco.imgs[img_id][\"height\"]\n img_w = coco.imgs[img_id][\"width\"]\n group_id = int(img_h / img_w)\n anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = coco.loadAnns(anno_ids)\n if not _has_valid_annotation(anno):\n continue\n\n img_info_list.append(\n dict(index=i, image_id=img_id, group_id=group_id, anno_len=len(anno_ids))\n )\n\n return img_info_list\n\n\ndef _count_visible_keypoints(anno):\n return sum(sum(1 for v in ann[\"keypoints\"][2::3] if v > 0) for ann in anno)\n\n\ndef _has_only_empty_bbox(anno):\n return all(any(o <= 1 for o in obj[\"bbox\"][2:]) for obj in anno)\n\n\ndef _has_valid_annotation(anno):\n # if it's empty, there is no annotation\n if len(anno) == 0:\n return False\n # if all boxes have close to zero area, there is no annotation\n if _has_only_empty_bbox(anno):\n return False\n # keypoints task have a slight different critera for considering\n # if an annotation is valid\n if \"keypoints\" not in anno[0]:\n return True\n # for keypoint detection tasks, only consider valid images those\n # containing at least min_keypoints_per_image\n if _count_visible_keypoints(anno) >= 10:\n return True\n\n return False\n\n\nclass GroupedDistributedSampler(object):\n def __init__(self, shards, batch_size, images, stride_sample, max_iter=3):\n assert batch_size % shards == 0\n self._images = images\n self._shards = shards\n self._shard_size = math.ceil(len(images) / shards)\n self._batch_size = batch_size\n self._batch_size_per_shard = batch_size // shards\n self._stride_sample = stride_sample\n self._max_iter = max_iter\n self._init_sample_idx()\n self._init_group_buckets()\n\n def _init_sample_idx(self):\n if self._stride_sample:\n self._sample_idx = list(range(self._shards))\n else:\n self._sample_idx = [rank * self._shard_size for rank in range(self._shards)]\n self._sample_idx_in_shard = [0 for _ in range(self._shards)]\n\n def _init_group_buckets(self):\n self._group_buckets = [[[] for _ in range(2)] for _ in range(self._shards)]\n\n def __iter__(self):\n for i in range(self._max_iter):\n sample_ids = []\n for rank in range(self._shards):\n sample_cnt_cur_rank = 0\n sample_ids_cur_rank = []\n group_buckets_cur_rank = self._group_buckets[rank]\n\n if (\n len(group_buckets_cur_rank[0]) > 0\n and len(group_buckets_cur_rank[1]) > 0\n ):\n if (\n group_buckets_cur_rank[0][0][\"index\"]\n < group_buckets_cur_rank[1][0][\"index\"]\n ):\n sample = group_buckets_cur_rank[0].pop(0)\n else:\n sample = group_buckets_cur_rank[1].pop(0)\n elif len(group_buckets_cur_rank[0]) > 0:\n sample = group_buckets_cur_rank[0].pop(0)\n elif len(group_buckets_cur_rank[1]) > 0:\n sample = group_buckets_cur_rank[1].pop(0)\n else:\n sample = self._next_sample(rank)\n\n group_id = sample[\"group_id\"]\n sample_ids_cur_rank.append(sample[\"image_id\"])\n sample_cnt_cur_rank += 1\n\n while sample_cnt_cur_rank < self._batch_size_per_shard:\n if len(group_buckets_cur_rank[group_id]) > 0:\n sample = group_buckets_cur_rank[group_id].pop(0)\n sample_ids_cur_rank.append(sample[\"image_id\"])\n sample_cnt_cur_rank += 1\n continue\n\n sample = self._next_sample(rank)\n\n if sample[\"group_id\"] == group_id:\n sample_ids_cur_rank.append(sample[\"image_id\"])\n sample_cnt_cur_rank += 1\n else:\n group_buckets_cur_rank[sample[\"group_id\"]].append(sample)\n\n sample_ids.extend(sample_ids_cur_rank)\n\n yield sample_ids\n\n def _next_sample(self, rank):\n sample = self._images[self._sample_idx[rank]]\n if self._stride_sample:\n self._sample_idx[rank] += self._shards\n else:\n self._sample_idx_in_shard[rank] += 1\n self._sample_idx[rank] += 1\n if self._sample_idx_in_shard[rank] == self._shard_size:\n self._sample_idx[rank] += (self._shards - 1) * self._shard_size\n self._sample_idx_in_shard[rank] = 0\n\n if self._sample_idx[rank] >= len(self._images):\n self._sample_idx[rank] %= len(self._images)\n\n return sample\n\n\ndef test_coco_reader(test_case, verbose=VERBOSE):\n anno_file = \"/dataset/mscoco_2017/annotations/instances_val2017.json\"\n image_dir = \"/dataset/mscoco_2017/val2017\"\n\n of_coco_load_fn = _make_coco_data_load_fn(anno_file, image_dir, 1, 2, True, True)\n image_id, image_size, image, bbox, label, poly, poly_index = of_coco_load_fn().get()\n image_id = image_id.numpy()\n image_size = image_size.numpy()\n image = image.numpy_lists()\n bbox = bbox.numpy_lists()\n label = label.numpy_lists()\n poly = poly.numpy_lists()\n poly_index = poly_index.numpy_lists()\n\n samples = _get_coco_image_samples(anno_file, image_dir, image_id)\n for i, sample in enumerate(samples):\n if verbose:\n print(\n \"#{} of label:\\n\".format(i),\n label[0][i].squeeze(),\n type(label[0][i].squeeze()),\n label[0][i].squeeze().shape,\n )\n print(\n \"#{} coco label:\\n\".format(i),\n sample[\"label\"],\n type(sample[\"label\"]),\n sample[\"label\"].shape,\n )\n test_case.assertTrue(np.array_equal(image[0][i].squeeze(), sample[\"image\"]))\n test_case.assertTrue(np.array_equal(image_size[i], sample[\"image_size\"]))\n test_case.assertTrue(np.allclose(bbox[0][i].squeeze(), sample[\"bbox\"]))\n cur_label = label[0][i].squeeze()\n if len(cur_label.shape) == 0:\n # when cur_label is scalar\n cur_label = np.array([cur_label])\n test_case.assertTrue(np.array_equal(cur_label, sample[\"label\"]))\n test_case.assertTrue(np.allclose(poly[0][i].squeeze(), sample[\"poly\"]))\n test_case.assertTrue(\n np.array_equal(poly_index[0][i].squeeze(), sample[\"poly_index\"])\n )\n\n\ndef test_coco_reader_distributed_stride(test_case, verbose=VERBOSE):\n anno_file = \"/dataset/mscoco_2017/annotations/instances_val2017.json\"\n image_dir = \"/dataset/mscoco_2017/val2017\"\n\n image_info_list = _get_coco_sorted_imgs(anno_file)\n if verbose:\n print(\"Info of the first 20 images:\")\n for i, image_info in enumerate(image_info_list[:20]):\n print(\n \"index: {}, image_id: {}, group_id: {}, anno len: {}\".format(\n i,\n image_info[\"image_id\"],\n image_info[\"group_id\"],\n image_info[\"anno_len\"],\n )\n )\n\n sampler = GroupedDistributedSampler(4, 8, image_info_list, True)\n of_coco_load_fn = _make_coco_data_load_fn(\n anno_file, image_dir, 4, 8, True, False, True\n )\n for i, sample_ids in enumerate(sampler):\n image_id = of_coco_load_fn().get().numpy()\n if verbose:\n print(\"#{} image_id:\".format(i), image_id)\n print(\"#{} sample_ids:\".format(i), sample_ids)\n test_case.assertTrue(np.array_equal(image_id, sample_ids))\n\n\ndef test_coco_reader_distributed_contiguous(test_case, verbose=VERBOSE):\n anno_file = \"/dataset/mscoco_2017/annotations/instances_val2017.json\"\n image_dir = \"/dataset/mscoco_2017/val2017\"\n\n image_info_list = _get_coco_sorted_imgs(anno_file)\n sampler = GroupedDistributedSampler(4, 8, image_info_list, False)\n of_coco_load_fn = _make_coco_data_load_fn(\n anno_file, image_dir, 4, 8, False, False, True\n )\n for i, sample_ids in enumerate(sampler):\n image_id = of_coco_load_fn().get().numpy()\n if verbose:\n print(\"#{} image_id:\".format(i), image_id)\n print(\"#{} sample_ids:\".format(i), sample_ids)\n test_case.assertTrue(np.array_equal(image_id, sample_ids))\n"
] | [
[
"numpy.all",
"numpy.frombuffer",
"numpy.array",
"numpy.where"
],
[
"numpy.random.rand",
"numpy.zeros",
"numpy.allclose"
],
[
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.reshape",
"tensorflow.GradientTape",
"tensorflow.config.experimental.set_memory_growth"
],
[
"numpy.maximum",
"numpy.random.rand"
],
[
"numpy.array",
"numpy.array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dutxubo/mmdetection | [
"607d2fc0bdff5a8f07e6a92da899505bd083dfc5"
] | [
"mmdet/models/bbox_heads/bbox_head.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.core import (auto_fp16, bbox_target, delta2bbox, force_fp32,\n multiclass_nms)\nfrom ..builder import build_loss\nfrom ..losses import accuracy\nfrom ..registry import HEADS\nimport numpy as np\n\[email protected]_module\nclass BBoxHead(nn.Module):\n \"\"\"Simplest RoI head, with only two fc layers for classification and\n regression respectively\"\"\"\n\n def __init__(self,\n with_avg_pool=False,\n with_cls=True,\n with_reg=True,\n roi_feat_size=7,\n in_channels=256,\n num_classes=81,\n target_means=[0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2],\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=1.0),\n loss_bbox=dict(\n type='SmoothL1Loss', beta=1.0, loss_weight=1.0)):\n super(BBoxHead, self).__init__()\n assert with_cls or with_reg\n self.with_avg_pool = with_avg_pool\n self.with_cls = with_cls\n self.with_reg = with_reg\n self.roi_feat_size = _pair(roi_feat_size)\n self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]\n self.in_channels = in_channels\n self.num_classes = num_classes\n self.target_means = target_means\n self.target_stds = target_stds\n self.reg_class_agnostic = reg_class_agnostic\n self.fp16_enabled = False\n\n self.loss_cls = build_loss(loss_cls)\n self.loss_bbox = build_loss(loss_bbox)\n\n in_channels = self.in_channels\n if self.with_avg_pool:\n self.avg_pool = nn.AvgPool2d(self.roi_feat_size)\n else:\n in_channels *= self.roi_feat_area\n if self.with_cls:\n self.fc_cls = nn.Linear(in_channels, num_classes)\n if self.with_reg:\n out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes\n self.fc_reg = nn.Linear(in_channels, out_dim_reg)\n self.debug_imgs = None\n\n def init_weights(self):\n # conv layers are already initialized by ConvModule\n if self.with_cls:\n nn.init.normal_(self.fc_cls.weight, 0, 0.01)\n nn.init.constant_(self.fc_cls.bias, 0)\n if self.with_reg:\n nn.init.normal_(self.fc_reg.weight, 0, 0.001)\n nn.init.constant_(self.fc_reg.bias, 0)\n\n @auto_fp16()\n def forward(self, x):\n if self.with_avg_pool:\n x = self.avg_pool(x)\n x = x.view(x.size(0), -1)\n cls_score = self.fc_cls(x) if self.with_cls else None\n bbox_pred = self.fc_reg(x) if self.with_reg else None\n return cls_score, bbox_pred\n\n def get_target(self, sampling_results, gt_bboxes, gt_labels,\n rcnn_train_cfg):\n pos_proposals = [res.pos_bboxes for res in sampling_results]\n neg_proposals = [res.neg_bboxes for res in sampling_results]\n pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]\n pos_gt_labels = [res.pos_gt_labels for res in sampling_results]\n reg_classes = 1 if self.reg_class_agnostic else self.num_classes\n cls_reg_targets = bbox_target(\n pos_proposals,\n neg_proposals,\n pos_gt_bboxes,\n pos_gt_labels,\n rcnn_train_cfg,\n reg_classes,\n target_means=self.target_means,\n target_stds=self.target_stds)\n return cls_reg_targets\n\n @force_fp32(apply_to=('cls_score', 'bbox_pred'))\n def loss(self,\n cls_score,\n bbox_pred,\n labels,\n label_weights,\n bbox_targets,\n bbox_weights,\n reduction_override=None):\n losses = dict()\n if cls_score is not None:\n avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)\n\n if cls_score.numel() > 0:\n losses['loss_cls'] = self.loss_cls(\n cls_score,\n labels,\n label_weights,\n avg_factor=avg_factor,\n reduction_override=reduction_override)\n losses['acc'] = accuracy(cls_score, labels)\n if bbox_pred is not None:\n pos_inds = labels > 0\n if pos_inds.any():\n if self.reg_class_agnostic:\n pos_bbox_pred = bbox_pred.view(\n bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]\n else:\n pos_bbox_pred = bbox_pred.view(\n bbox_pred.size(0), -1,\n 4)[pos_inds.type(torch.bool),\n labels[pos_inds.type(torch.bool)]]\n losses['loss_bbox'] = self.loss_bbox(\n pos_bbox_pred,\n bbox_targets[pos_inds.type(torch.bool)],\n bbox_weights[pos_inds.type(torch.bool)],\n avg_factor=bbox_targets.size(0),\n reduction_override=reduction_override)\n return losses\n\n @force_fp32(apply_to=('cls_score', 'bbox_pred'))\n def get_det_bboxes(self,\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None):\n if isinstance(cls_score, list):\n cls_score = sum(cls_score) / float(len(cls_score))\n scores = F.softmax(cls_score, dim=1) if cls_score is not None else None\n\n if bbox_pred is not None:\n bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means,\n self.target_stds, img_shape)\n else:\n bboxes = rois[:, 1:].clone()\n if img_shape is not None:\n bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)\n bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)\n\n if rescale:\n if isinstance(scale_factor, float):\n bboxes /= scale_factor\n elif isinstance(scale_factor, torch.Tensor):\n scale_factor = scale_factor.to(bboxes.device)\n bboxes = (bboxes.view(bboxes.size(0), -1, 4) /\n scale_factor).view(bboxes.size()[0], -1)\n else:\n scale_factor = torch.from_numpy(scale_factor).to(bboxes.device)\n bboxes = (bboxes.view(bboxes.size(0), -1, 4) /\n scale_factor).view(bboxes.size()[0], -1)\n\n if cfg is None:\n return bboxes, scores\n else:\n det_bboxes, det_labels = multiclass_nms(bboxes, scores,\n cfg.score_thr, cfg.nms,\n cfg.max_per_img)\n\n return det_bboxes, det_labels\n\n @force_fp32(apply_to=('bbox_preds', ))\n def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):\n \"\"\"Refine bboxes during training.\n\n Args:\n rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,\n and bs is the sampled RoIs per image. The first column is\n the image id and the next 4 columns are x1, y1, x2, y2.\n labels (Tensor): Shape (n*bs, ).\n bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class).\n pos_is_gts (list[Tensor]): Flags indicating if each positive bbox\n is a gt bbox.\n img_metas (list[dict]): Meta info of each image.\n\n Returns:\n list[Tensor]: Refined bboxes of each image in a mini-batch.\n\n Example:\n >>> # xdoctest: +REQUIRES(module:kwarray)\n >>> import kwarray\n >>> import numpy as np\n >>> from mmdet.core.bbox.demodata import random_boxes\n >>> self = BBoxHead(reg_class_agnostic=True)\n >>> n_roi = 2\n >>> n_img = 4\n >>> scale = 512\n >>> rng = np.random.RandomState(0)\n >>> img_metas = [{'img_shape': (scale, scale)}\n ... for _ in range(n_img)]\n >>> # Create rois in the expected format\n >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)\n >>> img_ids = torch.randint(0, n_img, (n_roi,))\n >>> img_ids = img_ids.float()\n >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)\n >>> # Create other args\n >>> labels = torch.randint(0, 2, (n_roi,)).long()\n >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)\n >>> # For each image, pretend random positive boxes are gts\n >>> is_label_pos = (labels.numpy() > 0).astype(np.int)\n >>> lbl_per_img = kwarray.group_items(is_label_pos,\n ... img_ids.numpy())\n >>> pos_per_img = [sum(lbl_per_img.get(gid, []))\n ... for gid in range(n_img)]\n >>> pos_is_gts = [\n >>> torch.randint(0, 2, (npos,)).byte().sort(\n >>> descending=True)[0]\n >>> for npos in pos_per_img\n >>> ]\n >>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,\n >>> pos_is_gts, img_metas)\n >>> print(bboxes_list)\n \"\"\"\n img_ids = rois[:, 0].long().unique(sorted=True)\n assert img_ids.numel() <= len(img_metas)\n\n bboxes_list = []\n for i in range(len(img_metas)):\n inds = torch.nonzero(rois[:, 0] == i).squeeze(dim=1)\n num_rois = inds.numel()\n\n bboxes_ = rois[inds, 1:]\n label_ = labels[inds]\n bbox_pred_ = bbox_preds[inds]\n img_meta_ = img_metas[i]\n pos_is_gts_ = pos_is_gts[i]\n\n bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,\n img_meta_)\n\n # filter gt bboxes\n pos_keep = 1 - pos_is_gts_\n keep_inds = pos_is_gts_.new_ones(num_rois)\n keep_inds[:len(pos_is_gts_)] = pos_keep\n\n bboxes_list.append(bboxes[keep_inds.type(torch.bool)])\n\n return bboxes_list\n\n @force_fp32(apply_to=('bbox_pred', ))\n def regress_by_class(self, rois, label, bbox_pred, img_meta):\n \"\"\"Regress the bbox for the predicted class. Used in Cascade R-CNN.\n\n Args:\n rois (Tensor): shape (n, 4) or (n, 5)\n label (Tensor): shape (n, )\n bbox_pred (Tensor): shape (n, 4*(#class+1)) or (n, 4)\n img_meta (dict): Image meta info.\n\n Returns:\n Tensor: Regressed bboxes, the same shape as input rois.\n \"\"\"\n assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape)\n\n if not self.reg_class_agnostic:\n label = label * 4\n inds = torch.stack((label, label + 1, label + 2, label + 3), 1)\n bbox_pred = torch.gather(bbox_pred, 1, inds)\n assert bbox_pred.size(1) == 4\n\n if rois.size(1) == 4:\n new_rois = delta2bbox(rois, bbox_pred, self.target_means,\n self.target_stds, img_meta['img_shape'])\n else:\n bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means,\n self.target_stds, img_meta['img_shape'])\n new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)\n\n return new_rois\n"
] | [
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.nn.init.constant_",
"torch.gather",
"torch.from_numpy",
"torch.sum",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.init.normal_",
"torch.nonzero",
"torch.nn.modules.utils._pair",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ITWSDataScience/EducationFundingAnalysisInCaliforniaGroup3Fall2021 | [
"1c7fb5265fb0a354c2a8a438fbe00f073667c397"
] | [
"data_manipulation.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Data Manipulation\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1dnbNHRyDzukEq2IVv2Bx2tdzVEbLZJc0\n\"\"\"\n\nimport pandas as pd\n\ndf1 = pd.read_csv(\"district_school_data.csv\")\ndf2 = pd.read_csv(\"district_expense_data.csv\")\nd = {'District': [], 'County': [], 'Students':[], 'Graduation Rate':[], 'Cost':[]}\ndf3 = pd.DataFrame(data=d)\n\nfor i1, row1 in df1.iterrows():\n for i2, row2 in df2.iterrows():\n if row1['District'] == row2['DISTRICT']:\n dprime = {'District': [row1['District']], 'County': [row1['County']], 'Students':[row1['Students']], 'Graduation Rate':[row1['Grad Rate']], 'Cost':[row2[' EDP 365 ']]}\n df4 = pd.DataFrame(data=dprime)\n df3 = df3.append(df4)\ndf3.to_csv(\"district_data.csv\")\n\nprint(len(df3))\n\nprint(df3)"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
JarnoRFB/statsmodels | [
"9913b6a4f0243f94a6331c90a3951849f06720f7"
] | [
"statsmodels/tsa/statespace/tests/test_options.py"
] | [
"\"\"\"\nTests for setting options in KalmanFilter, KalmanSmoother, SimulationSmoother\n\n(does not test the filtering, smoothing, or simulation smoothing for each\noption)\n\nAuthor: Chad Fulton\nLicense: Simplified-BSD\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\nfrom statsmodels.tsa.statespace.kalman_filter import (\n FILTER_CONVENTIONAL,\n FILTER_EXACT_INITIAL,\n FILTER_AUGMENTED,\n FILTER_SQUARE_ROOT,\n FILTER_UNIVARIATE,\n FILTER_COLLAPSED,\n FILTER_EXTENDED,\n FILTER_UNSCENTED,\n\n INVERT_UNIVARIATE,\n SOLVE_LU,\n INVERT_LU,\n SOLVE_CHOLESKY,\n INVERT_CHOLESKY,\n\n STABILITY_FORCE_SYMMETRY,\n\n MEMORY_STORE_ALL,\n MEMORY_NO_FORECAST,\n MEMORY_NO_PREDICTED,\n MEMORY_NO_FILTERED,\n MEMORY_NO_LIKELIHOOD,\n MEMORY_NO_GAIN,\n MEMORY_NO_SMOOTHING,\n MEMORY_NO_STD_FORECAST,\n MEMORY_CONSERVE\n)\nfrom statsmodels.tsa.statespace.kalman_smoother import (\n SMOOTHER_STATE,\n SMOOTHER_STATE_COV,\n SMOOTHER_STATE_AUTOCOV,\n SMOOTHER_DISTURBANCE,\n SMOOTHER_DISTURBANCE_COV,\n SMOOTHER_ALL\n)\nfrom statsmodels.tsa.statespace.simulation_smoother import (\n SimulationSmoother,\n SIMULATION_STATE,\n SIMULATION_DISTURBANCE,\n SIMULATION_ALL\n)\nfrom numpy.testing import assert_equal\n\n\nclass Options(object):\n @classmethod\n def setup_class(cls, *args, **kwargs):\n\n # Dummy data\n endog = np.arange(10)\n k_states = 1\n\n cls.model = SimulationSmoother(k_endog=1, k_states=k_states, *args,\n **kwargs)\n cls.model.bind(endog)\n\n\nclass TestOptions(Options):\n def test_filter_methods(self):\n model = self.model\n\n # TODO test FilterResults for accurante boolean versions of options\n # Clear the filter method\n model.filter_method = 0\n\n # Try setting via boolean\n model.filter_conventional = True\n assert_equal(model.filter_method, FILTER_CONVENTIONAL)\n\n model.filter_collapsed = True\n assert_equal(model.filter_method, FILTER_CONVENTIONAL | FILTER_COLLAPSED)\n model.filter_conventional = False\n assert_equal(model.filter_method, FILTER_COLLAPSED)\n\n # Try setting directly via method\n model.set_filter_method(FILTER_AUGMENTED)\n assert_equal(model.filter_method, FILTER_AUGMENTED)\n\n # Try setting via boolean via method\n model.set_filter_method(filter_conventional=True, filter_augmented=False)\n assert_equal(model.filter_method, FILTER_CONVENTIONAL)\n\n # Try setting and unsetting all\n model.filter_method = 0\n for name in model.filter_methods:\n setattr(model, name, True)\n assert_equal(\n model.filter_method,\n FILTER_CONVENTIONAL | FILTER_EXACT_INITIAL | FILTER_AUGMENTED |\n FILTER_SQUARE_ROOT | FILTER_UNIVARIATE | FILTER_COLLAPSED |\n FILTER_EXTENDED | FILTER_UNSCENTED\n )\n for name in model.filter_methods:\n setattr(model, name, False)\n assert_equal(model.filter_method, 0)\n\n def test_inversion_methods(self):\n model = self.model\n\n # Clear the inversion method\n model.inversion_method = 0\n\n # Try setting via boolean\n model.invert_univariate = True\n assert_equal(model.inversion_method, INVERT_UNIVARIATE)\n model.invert_cholesky = True\n assert_equal(model.inversion_method, INVERT_UNIVARIATE | INVERT_CHOLESKY)\n model.invert_univariate = False\n assert_equal(model.inversion_method, INVERT_CHOLESKY)\n\n # Try setting directly via method\n model.set_inversion_method(INVERT_LU)\n assert_equal(model.inversion_method, INVERT_LU)\n\n # Try setting via boolean via method\n model.set_inversion_method(invert_cholesky=True, invert_univariate=True, invert_lu=False)\n assert_equal(model.inversion_method, INVERT_UNIVARIATE | INVERT_CHOLESKY)\n\n # Try setting and unsetting all\n model.inversion_method = 0\n for name in model.inversion_methods:\n setattr(model, name, True)\n assert_equal(\n model.inversion_method,\n INVERT_UNIVARIATE | SOLVE_LU | INVERT_LU | SOLVE_CHOLESKY |\n INVERT_CHOLESKY\n )\n for name in model.inversion_methods:\n setattr(model, name, False)\n assert_equal(model.inversion_method, 0)\n\n def test_stability_methods(self):\n model = self.model\n\n # Clear the stability method\n model.stability_method = 0\n\n # Try setting via boolean\n model.stability_force_symmetry = True\n assert_equal(model.stability_method, STABILITY_FORCE_SYMMETRY)\n model.stability_force_symmetry = False\n assert_equal(model.stability_method, 0)\n\n # Try setting directly via method\n model.stability_method = 0\n model.set_stability_method(STABILITY_FORCE_SYMMETRY)\n assert_equal(model.stability_method, STABILITY_FORCE_SYMMETRY)\n\n # Try setting via boolean via method\n model.stability_method = 0\n model.set_stability_method(stability_method=True)\n assert_equal(model.stability_method, STABILITY_FORCE_SYMMETRY)\n\n # Try setting via keyword via method\n model.stability_method = 0\n model.set_stability_method(stability_force_symmetry=True)\n assert_equal(model.stability_method, STABILITY_FORCE_SYMMETRY)\n\n def test_conserve_memory(self):\n model = self.model\n\n # Clear the filter method\n model.conserve_memory = MEMORY_STORE_ALL\n\n # Try setting via boolean\n model.memory_no_forecast = True\n assert_equal(model.conserve_memory, MEMORY_NO_FORECAST)\n model.memory_no_filtered = True\n assert_equal(model.conserve_memory, MEMORY_NO_FORECAST | MEMORY_NO_FILTERED)\n model.memory_no_forecast = False\n assert_equal(model.conserve_memory, MEMORY_NO_FILTERED)\n\n # Try setting directly via method\n model.set_conserve_memory(MEMORY_NO_PREDICTED)\n assert_equal(model.conserve_memory, MEMORY_NO_PREDICTED)\n\n # Try setting via boolean via method\n model.set_conserve_memory(memory_no_filtered=True, memory_no_predicted=False)\n assert_equal(model.conserve_memory, MEMORY_NO_FILTERED)\n\n # Try setting and unsetting all\n model.conserve_memory = 0\n for name in model.memory_options:\n if name == 'memory_conserve':\n continue\n setattr(model, name, True)\n assert_equal(\n model.conserve_memory,\n MEMORY_NO_FORECAST | MEMORY_NO_PREDICTED | MEMORY_NO_FILTERED |\n MEMORY_NO_LIKELIHOOD | MEMORY_NO_GAIN | MEMORY_NO_SMOOTHING |\n MEMORY_NO_STD_FORECAST\n )\n assert_equal(model.conserve_memory, MEMORY_CONSERVE)\n for name in model.memory_options:\n if name == 'memory_conserve':\n continue\n setattr(model, name, False)\n assert_equal(model.conserve_memory, 0)\n\n def test_smoother_outputs(self):\n model = self.model\n\n # TODO test SmootherResults for accurante boolean versions of options\n\n # Clear the smoother output\n model.smoother_output = 0\n\n # Try setting via boolean\n model.smoother_state = True\n assert_equal(model.smoother_output, SMOOTHER_STATE)\n model.smoother_disturbance = True\n assert_equal(model.smoother_output, SMOOTHER_STATE | SMOOTHER_DISTURBANCE)\n model.smoother_state = False\n assert_equal(model.smoother_output, SMOOTHER_DISTURBANCE)\n\n # Try setting directly via method\n model.set_smoother_output(SMOOTHER_DISTURBANCE_COV)\n assert_equal(model.smoother_output, SMOOTHER_DISTURBANCE_COV)\n\n # Try setting via boolean via method\n model.set_smoother_output(smoother_disturbance=True, smoother_disturbance_cov=False)\n assert_equal(model.smoother_output, SMOOTHER_DISTURBANCE)\n\n # Try setting and unsetting all\n model.smoother_output = 0\n for name in model.smoother_outputs:\n if name == 'smoother_all':\n continue\n setattr(model, name, True)\n assert_equal(\n model.smoother_output,\n SMOOTHER_STATE | SMOOTHER_STATE_COV | SMOOTHER_STATE_AUTOCOV |\n SMOOTHER_DISTURBANCE | SMOOTHER_DISTURBANCE_COV\n )\n assert_equal(model.smoother_output, SMOOTHER_ALL)\n for name in model.smoother_outputs:\n if name == 'smoother_all':\n continue\n setattr(model, name, False)\n assert_equal(model.smoother_output, 0)\n\n def test_simulation_outputs(self):\n # TODO test changing simulation options in SimulationSmoothResults\n # instance\n\n assert_equal(self.model.get_simulation_output(SIMULATION_STATE), SIMULATION_STATE)\n assert_equal(self.model.get_simulation_output(simulate_state=True, simulate_disturbance=True), SIMULATION_ALL)\n"
] | [
[
"numpy.arange",
"numpy.testing.assert_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
salesforce/burn-after-reading | [
"939d969d67a9ba325eba9e1cb9c696908f9d88db",
"939d969d67a9ba325eba9e1cb9c696908f9d88db"
] | [
"data_loader.py",
"main_single_stream.py"
] | [
"'''\n * Copyright (c) 2021, salesforce.com, inc.\n * All rights reserved.\n * SPDX-License-Identifier: BSD-3-Clause\n * For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n'''\n\nfrom torchvision import datasets, transforms\nimport torch\nimport numpy as np\nimport random\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nimport os\nimport os.path\nimport cv2\nimport torchvision\nfrom randaugment import RandAugmentMC\nfrom wilds.common.data_loaders import get_eval_loader\nfrom wilds.datasets.camelyon17_dataset import Camelyon17Dataset\n\ntorch.manual_seed(999)\nnp.random.seed(999)\ntorch.backends.cudnn.benchmark = True\n\n\ndef load_training_from_list(root, list_path, batch_size, kwargs, shuffle=True, return_idx=False):\n transform = transforms.Compose(\n [transforms.Resize([256, 256]),\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n txt = open(list_path).readlines()\n data = ImageList_idx(root, txt, transform=transform, return_idx=return_idx)\n train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=shuffle, drop_last=True, **kwargs)\n return train_loader\n\n\ndef load_training_strong_weak(root, list_path, batch_size, kwargs, shuffle=True, return_idx=False,\n return_test_img=False):\n txt = open(list_path).readlines()\n data = Imagelist_strong_weak(root, txt, return_idx=return_idx, return_test_img=return_test_img)\n train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=shuffle, drop_last=True, **kwargs)\n return train_loader\n\n\ndef load_testing_from_list(root, list_path, batch_size, kwargs, shuffle=False, return_idx=False):\n transform = transforms.Compose(\n [transforms.Resize([256, 256]),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n txt = open(list_path).readlines()\n data = ImageList_idx(root, txt, transform=transform, return_idx=return_idx)\n train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=shuffle, drop_last=False, **kwargs)\n return train_loader\n\n\ndef make_dataset(image_list, labels):\n if labels:\n len_ = len(image_list)\n images = [(image_list[i].strip(), labels[i, :]) for i in range(len_)]\n else:\n if len(image_list[0].split()) > 2:\n images = [(val.split()[0], np.array([int(la) for la in val.split()[1:]])) for val in image_list]\n else:\n images = [(val.split()[0], int(val.split()[1])) for val in image_list]\n return images\n\n\ndef rgb_loader(path):\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n\ndef l_loader(path):\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('L')\n\n\nclass ResizeImage():\n def __init__(self, size):\n if isinstance(size, int):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img):\n th, tw = self.size\n return img.resize((th, tw))\n\n\nclass ImageList_idx(Dataset):\n def __init__(self, root, image_list, labels=None, transform=None, target_transform=None, mode='RGB',\n return_idx=True,\n idx_mask=None):\n imgs = make_dataset(image_list, labels)\n self.root = root\n self.imgs = imgs\n if idx_mask is not None:\n self.imgs = [imgs[i] for i in idx_mask]\n\n self.transform = transform\n self.target_transform = target_transform\n self.return_idx = return_idx\n\n if mode == 'RGB':\n self.loader = rgb_loader\n elif mode == 'L':\n self.loader = l_loader\n\n def __getitem__(self, index):\n path, target = self.imgs[index]\n path = os.path.join(self.root, path)\n img = self.loader(path)\n if self.transform is not None:\n img = self.transform(img)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n if self.return_idx:\n return img, target, index\n else:\n return img, target\n\n def __len__(self):\n return len(self.imgs)\n\n\ndef get_index_label(txt):\n image_list = open(txt).readlines()\n data = [(i, int(val.split()[1])) for i, val in enumerate(image_list)]\n return np.array(data)\n\n\nclass Imagelist_strong_weak(object):\n def __init__(self, root, image_list, return_idx=False, return_test_img=False):\n imgs = make_dataset(image_list, labels=None)\n self.root = root\n\n self.imgs = imgs\n self.loader = rgb_loader\n self.return_idx = return_idx\n self.return_test_img = return_test_img\n self.test = transforms.Compose([\n ResizeImage(256),\n transforms.CenterCrop(size=224)])\n self.weak = transforms.Compose([\n ResizeImage(256),\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(size=224)])\n self.strong = transforms.Compose([\n ResizeImage(256),\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(size=224),\n RandAugmentMC(n=2, m=10)])\n self.normalize = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is\n class_index of the target class.\n \"\"\"\n path, target = self.imgs[index]\n path = os.path.join(self.root, path)\n img = self.loader(path)\n\n img_strong = self.normalize(self.strong(img))\n img_weak = self.normalize(self.weak(img))\n img_test = self.normalize(self.test(img))\n\n if not self.return_idx:\n if not self.return_test_img:\n return (img_weak, img_strong), target\n else:\n return (img_weak, img_strong, img_test), target\n else:\n if not self.return_test_img:\n return (img_weak, img_strong), target, index\n else:\n return (img_weak, img_strong, img_test), target, index\n\n def __len__(self):\n return len(self.imgs)\n",
"'''\n * Copyright (c) 2021, salesforce.com, inc.\n * All rights reserved.\n * SPDX-License-Identifier: BSD-3-Clause\n * For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause\n'''\n\n\n\"\"\"Single-stream Learner\"\"\"\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport os\nimport math\nimport data_loader\nimport ResNet as models\nimport numpy as np\nfrom torch.utils import model_zoo\nimport matplotlib.pyplot as plt\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--lr', type=float, default=8e-4, help='learning rate (default: 8e-4')\nparser.add_argument('--optim', type=str, default='adam')\nparser.add_argument('--method', type=str, default='single_stream')\nparser.add_argument('--opt', type=str, default='no_ent')\nparser.add_argument('--backbone', type=str, default='resnet101')\nparser.add_argument('--dataset', type=str, default='visda-c')\nparser.add_argument('--data_root', type=str, default='/')\nparser.add_argument('--batch', type=int, default=64)\nparser.add_argument('--rand_id', type=int, default=0)\nparser.add_argument('--start', type=int, default=0)\nparser.add_argument('--th', type=float, default=0.95)\nparser.add_argument('--div_weight', type=float, default=0.4)\nparser.add_argument('--st', type=int, default=0)\nparser.add_argument('--runs', type=int, default=0)\nargs = parser.parse_args()\n\n\"\"\"params\"\"\"\n\nbatch_size = args.batch\nepochs = 1\nlr = args.lr\nmomentum = 0.9\nseed = 999\nl2_decay = 5e-4\nkwargs = {'num_workers': 0, 'pin_memory': False}\ntorch.manual_seed(seed)\nnp.random.seed(seed)\ntorch.backends.cudnn.benchmark = True\n\n\"\"\"loaders\"\"\"\n\nif args.dataset == 'visda-c':\n source_name = \"train\"\n target_name = \"validation\"\n num_classes = 12\n source_file = './data/visda-c/train_list.txt'\n target_test_file = './data/visda-c/validation_list.txt'\n\nelif args.dataset == 'fashion':\n num_classes = 6\n source_name = 'fashion_mnist'\n target_name = 'deepfashion'\n source_file = './data/fashion/fashion_mnist_train_list.txt'\n target_test_file = './data/fashion/deepfashion_train_list.txt'\n\nsource_loader = data_loader.load_training_from_list(args.data_root, source_file, batch_size, kwargs, shuffle=True)\ntarget_test_loader = data_loader.load_testing_from_list(args.data_root, target_test_file, batch_size, kwargs, shuffle=False)\n\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\n\"\"\"target plot\"\"\"\nfile_path = './data/{}/{}_{}.txt'.format(args.dataset, target_name, args.rand_id)\ntarget_train_loader = data_loader.load_training_strong_weak(args.data_root, file_path, batch_size, kwargs, shuffle=False,\n return_test_img=True)\n\nlen_source_dataset = len(source_loader.dataset)\nlen_target_dataset = len(target_test_loader.dataset)\nlen_source_loader = len(source_loader)\nlen_target_loader = len(target_train_loader)\n\n\ndef write_log(log, log_path):\n f = open(log_path, mode='a')\n f.write(str(log))\n f.write('\\n')\n f.close()\n\n\ndef load_pretrain(net):\n if '18' in args.backbone:\n url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'\n if '50' in args.backbone:\n url = 'https://download.pytorch.org/models/resnet50-19c8e357.pth'\n elif '101' in args.backbone:\n url = 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'\n pretrained_dict = model_zoo.load_url(url)\n model_dict = net.state_dict()\n for k, v in model_dict.items():\n if not \"cls_fc\" in k and not \"num_batches_tracked\" in k and not \"prototype\" in k:\n model_dict[k] = pretrained_dict[k[k.find(\".\") + 1:]]\n net.load_state_dict(model_dict)\n return net\n\n\ndef get_optim(net, LEARNING_RATE, parallel=False, optim='adam'):\n net = net.module if parallel else net\n if optim == 'adam':\n optimizer = torch.optim.Adam([\n {'params': net.sharedNet.parameters(), 'lr': LEARNING_RATE / 100},\n {'params': net.prototype.parameters(), 'lr': LEARNING_RATE},\n {'params': net.prototype_bn.parameters(), 'lr': LEARNING_RATE},\n {'params': net.cls_fc.parameters(), 'lr': LEARNING_RATE},\n ], lr=LEARNING_RATE, weight_decay=l2_decay)\n elif optim == 'sgd':\n optimizer = torch.optim.SGD([\n {'params': net.sharedNet.parameters(), 'lr': LEARNING_RATE / 10},\n {'params': net.prototype.parameters(), 'lr': LEARNING_RATE},\n {'params': net.prototype_bn.parameters(), 'lr': LEARNING_RATE},\n {'params': net.cls_fc.parameters(), 'lr': LEARNING_RATE},\n ], lr=LEARNING_RATE, weight_decay=l2_decay)\n return optimizer\n\n\ndef train(model):\n optimizer = get_optim(model, LEARNING_RATE=lr, parallel=True, optim=args.optim)\n\n data_source_iter = iter(source_loader)\n data_target_iter = iter(target_train_loader)\n\n i = 1\n\n batch_acc = []\n\n while i <= len_target_loader:\n model.train()\n\n try:\n source_data, source_label = data_source_iter.next()\n except:\n data_source_iter = iter(source_loader)\n source_data, source_label = data_source_iter.next()\n\n \"\"\"source label loss\"\"\"\n clabel_src, _ = model(source_data.cuda())\n label_loss = F.nll_loss(clabel_src.log(), source_label.cuda())\n\n del clabel_src\n\n (target_data_w, target_data_s, target_data_test), target_label = data_target_iter.next()\n if i % len_target_loader == 0:\n data_target_iter = iter(target_train_loader)\n\n _, clabel_tgt_w = model(target_data_w.cuda())\n\n pseudo_label = torch.softmax(clabel_tgt_w.detach().cpu(), dim=-1)\n max_probs, targets_u = torch.max(pseudo_label, dim=-1)\n mask = max_probs.ge(args.th).float()\n\n if args.opt.upper() == 'INFOMAX':\n # w/o CroDoBo\n clabel_tgt_w = F.softmax(clabel_tgt_w, dim=1)\n _, clabel_tgt_s = model(target_data_s.cuda())\n pb_pred_tgt = clabel_tgt_w.sum(dim=0)\n pb_pred_tgt = 1.0 / pb_pred_tgt.sum() * pb_pred_tgt\n target_div_loss = - torch.sum((pb_pred_tgt * torch.log(pb_pred_tgt + 1e-6)))\n target_entropy_loss = -torch.mean((clabel_tgt_w * torch.log(clabel_tgt_w + 1e-6)).sum(dim=1))\n del clabel_tgt_w\n del pb_pred_tgt\n Lu = (F.cross_entropy(clabel_tgt_s, targets_u.cuda(), reduction='none') * mask.cuda()).mean()\n total_loss = label_loss + 1.0 * target_entropy_loss - args.div_weight * target_div_loss + Lu\n\n elif args.opt.upper() == 'INFOMAX_S':\n # w/o CroDoBo with RandAug on l_div, l_ent\n _, clabel_tgt_s = model(target_data_s.cuda())\n Lu = (F.cross_entropy(clabel_tgt_s, targets_u.cuda(), reduction='none') * mask.cuda()).mean()\n clabel_tgt_s = F.softmax(clabel_tgt_s, dim=1)\n pb_pred_tgt = clabel_tgt_s.sum(dim=0)\n pb_pred_tgt = 1.0 / pb_pred_tgt.sum() * pb_pred_tgt\n target_div_loss = - torch.sum((pb_pred_tgt * torch.log(pb_pred_tgt + 1e-6)))\n target_entropy_loss = -torch.mean((clabel_tgt_s * torch.log(clabel_tgt_s + 1e-6)).sum(dim=1))\n total_loss = label_loss + 1.0 * target_entropy_loss - args.div_weight * target_div_loss + Lu\n\n elif 'NO_ENT' in args.opt.upper():\n # w/o CroDoBo, remove l_ent\n _, clabel_tgt_s = model(target_data_s.cuda())\n Lu = (F.cross_entropy(clabel_tgt_s, targets_u.cuda(), reduction='none') * mask.cuda()).mean()\n pred = F.softmax(clabel_tgt_w, dim=1)\n pb_pred_tgt = pred.sum(dim=0)\n pb_pred_tgt = 1.0 / pb_pred_tgt.sum() * pb_pred_tgt\n target_div_loss = - torch.sum((pb_pred_tgt * torch.log(pb_pred_tgt + 1e-6)))\n total_loss = label_loss - args.div_weight * target_div_loss + Lu\n\n elif 'NO_DIV' in args.opt.upper():\n # w/o CroDoBo, remove l_div\n _, clabel_tgt_s = model(target_data_s.cuda())\n Lu = (F.cross_entropy(clabel_tgt_s, targets_u.cuda(), reduction='none') * mask.cuda()).mean()\n pred = F.softmax(clabel_tgt_w, dim=1)\n target_entropy_loss = -torch.mean((pred * torch.log(pred + 1e-6)).sum(dim=1))\n total_loss = label_loss + 1.0 * target_entropy_loss + Lu\n\n optimizer.zero_grad()\n total_loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n model.zero_grad()\n\n \"\"\"Current Query Eval\"\"\"\n acc_tgt = test_batch(model, target_data_test, target_label)\n batch_acc.append(acc_tgt.item())\n print('Query %d Acc %.2f' % (i, acc_tgt))\n\n i = i + 1\n\n return batch_acc\n\n\ndef test_batch(net, batch_data, batch_label):\n net.eval()\n with torch.no_grad():\n batch_out, _ = net(batch_data.cuda())\n batch_out = batch_out.data.max(1)[1]\n batch_acc_avg = batch_out.eq(batch_label.cuda()).cpu().sum() * 100. / len(batch_label)\n return batch_acc_avg\n\n\ndef test_visda(net):\n \"\"\"For VisDA-C sample acc: correct/all; category acc: mean(class_acc)\"\"\"\n net.eval()\n correct = 0\n total = 0\n\n Dict_all = np.zeros(num_classes)\n Dict_acc = np.zeros(num_classes)\n\n with torch.no_grad():\n for batch_data, batch_label in target_test_loader:\n batch_out, _ = net(batch_data.cuda())\n pred = batch_out.data.cpu().max(1)[1]\n total += batch_label.size(0)\n\n for j in range(batch_label.numpy().shape[0]):\n Dict_all[batch_label[j].item()] += 1\n\n if pred[j] == batch_label[j]:\n Dict_acc[batch_label[j].item()] += 1\n correct += 1\n\n for j in range(len(Dict_all)):\n Dict_acc[j] = Dict_acc[j] / Dict_all[j] * 100.\n\n sample_acc_all = correct * 100. / total\n\n return sample_acc_all, Dict_acc, Dict_all\n\n\ndef batch_figure(batch_data):\n fig, ax = plt.subplots(figsize=(5, 4))\n t = np.arange(len(batch_data))\n ax.set_xlabel('Batch Index')\n ax.set_ylim([0, 101])\n ax.plot(t, batch_data, color='royalblue', linewidth=1.5)\n return fig\n\n\ndef test(net):\n net.eval()\n total = 0\n correct_all = 0\n\n with torch.no_grad():\n for data, label in target_test_loader:\n label = label.long()\n batch_out, _ = net(data.cuda())\n pred = batch_out.data.cpu().max(1)[1]\n correct_all += pred.eq(label).cpu().sum().item()\n total += label.size(0)\n\n acc_all = 100. * correct_all / total\n return acc_all\n\n\nif __name__ == '__main__':\n\n log_root = './log/%s/%s/%s/%.2f/div_%.2f' % (\n args.dataset, args.method, args.opt, args.th, args.div_weight)\n\n test_log_folder = os.path.join(log_root, './log_test_%s_%s_lr_%.5f_st_%d_rand_id_%d_batch_%d_run%d' % (\n args.backbone,\n args.optim, lr,\n args.st,\n args.rand_id, args.batch,\n args.runs))\n if not os.path.exists(test_log_folder):\n os.makedirs(test_log_folder)\n\n test_log = os.path.join(log_root, 'log_test_%s_%s_lr_%.5f_st_%d_rand_id_%d_batch_%d_run%d.txt' % (\n args.backbone,\n args.optim, lr,\n args.st,\n args.rand_id, args.batch,\n args.runs))\n\n model = models.MEDM_prototype(num_classes=num_classes, backbone=args.backbone)\n\n if 'densenet' not in args.backbone:\n model = load_pretrain(model)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n model = torch.nn.DataParallel(model, device_ids=[0, 1])\n model.to(device)\n\n for epoch in range(1, epochs + 1):\n epoch_batch_acc = train(model)\n\n epoch_batch_log = np.array(epoch_batch_acc)\n np.save(os.path.join(test_log_folder, 'batch_{}_log_epoch_{}.npy'.format(args.batch, epoch)), epoch_batch_log)\n\n test_msg = ''\n test_msg += 'Online Acc: %.2f\\n' % (np.array(epoch_batch_acc).mean())\n\n if args.dataset == 'visda-c':\n true_acc, class_acc, class_samples = test_visda(model)\n\n Dict_name = {0: 'plane', 1: 'bike', 2: 'bus', 3: 'car', 4: 'horse', 5: 'knife', 6: 'motor',\n 7: 'person', 8: 'plant', 9: 'sktboard', 10: 'train', 11: 'truck'}\n\n test_msg += 'One-pass Acc: sample-wise %.2f category-mean %.2f\\n' % (true_acc, class_acc.mean())\n for j in range(12):\n test_msg += '%s %.2f ' % (Dict_name[j], class_acc[j])\n write_log(test_msg, test_log)\n print(test_msg)\n\n else:\n acc_all = test(model)\n\n test_msg = 'One-pass Acc: %.2f' % acc_all\n"
] | [
[
"torch.manual_seed",
"numpy.array",
"torch.utils.data.DataLoader",
"numpy.random.seed"
],
[
"torch.nn.functional.softmax",
"torch.max",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.manual_seed",
"torch.utils.model_zoo.load_url",
"matplotlib.pyplot.subplots",
"torch.no_grad",
"torch.log",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bjajoh/lidar_transfer | [
"9a6366264b1fd95d7a84e05bd41659524fd9fd32"
] | [
"auxiliary/np_ioueval.py"
] | [
"#!/usr/bin/env python3\n# This file is covered by the LICENSE file in the root of this project.\n\nimport sys\nimport numpy as np\n\n\nclass iouEval:\n def __init__(self, n_classes, ignore=None):\n # classes\n self.n_classes = n_classes\n\n # What to include and ignore from the means\n self.ignore = np.array(ignore, dtype=np.int64)\n self.include = np.array(\n [n for n in range(self.n_classes) if n not in self.ignore], dtype=np.int64)\n print(\"[IOU EVAL] IGNORE: \", self.ignore)\n print(\"[IOU EVAL] INCLUDE: \", self.include)\n\n # reset the class counters\n self.reset()\n\n def num_classes(self):\n return self.n_classes\n\n def reset(self):\n self.conf_matrix = np.zeros((self.n_classes,\n self.n_classes),\n dtype=np.int64)\n\n def addBatch(self, x, y): # x=preds, y=targets\n # sizes should be matching\n x_row = x.reshape(-1) # de-batchify\n y_row = y.reshape(-1) # de-batchify\n\n # check\n assert(x_row.shape == x_row.shape)\n\n # create indexes\n idxs = tuple(np.stack((x_row, y_row), axis=0))\n\n # make confusion matrix (cols = gt, rows = pred)\n np.add.at(self.conf_matrix, idxs, 1)\n\n def getStats(self):\n # remove fp and fn from confusion on the ignore classes cols and rows\n conf = self.conf_matrix.copy()\n conf[self.ignore] = 0\n conf[:, self.ignore] = 0\n\n # get the clean stats\n tp = np.diag(conf)\n fp = conf.sum(axis=1) - tp\n fn = conf.sum(axis=0) - tp\n return tp, fp, fn\n\n def getIoU(self):\n tp, fp, fn = self.getStats()\n intersection = tp\n union = tp + fp + fn + 1e-15\n iou = intersection / union\n iou_mean = (intersection[self.include] / union[self.include]).mean()\n return iou_mean, iou # returns \"iou mean\", \"iou per class\" ALL CLASSES\n\n def getacc(self):\n tp, fp, fn = self.getStats()\n total_tp = tp.sum()\n total = tp[self.include].sum() + fp[self.include].sum() + 1e-15\n acc_mean = total_tp / total\n return acc_mean # returns \"acc mean\"\n\n\nif __name__ == \"__main__\":\n # mock problem\n nclasses = 2\n ignore = []\n\n # test with 2 squares and a known IOU\n lbl = np.zeros((7, 7), dtype=np.int64)\n argmax = np.zeros((7, 7), dtype=np.int64)\n\n # put squares\n lbl[2:4, 2:4] = 1\n argmax[3:5, 3:5] = 1\n\n # make evaluator\n eval = iouEval(nclasses, ignore)\n\n # run\n eval.addBatch(argmax, lbl)\n m_iou, iou = eval.getIoU()\n print(\"IoU: \", m_iou)\n print(\"IoU class: \", iou)\n m_acc = eval.getacc()\n print(\"Acc: \", m_acc)\n"
] | [
[
"numpy.add.at",
"numpy.diag",
"numpy.stack",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ssalopek/DFT_FFT_ImageDenoise | [
"9ced6175b39c0c8f205b84fcd7d3fbb16bdca8d8"
] | [
"FFT.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import fftpack\nfrom matplotlib.colors import LogNorm \nimport cv2\nimport time\n\nstart = time.time()\n#Load input image\nimage_source = cv2.imread('C:/FaksGit/FourierFilter/TestImages/man.png')\ngray_image = cv2.cvtColor(image_source, cv2.COLOR_BGR2GRAY)\n\n#Plot input image\nplt.figure()\nplt.imshow(gray_image, plt.cm.gray)\nplt.xticks([]), plt.yticks([])\nplt.title(\"Original image\")\n\n#Return the two-dimensional discrete Fourier transform of the 2-D argument x.\nimage_fft = fftpack.fft2(gray_image) \n\n#Logaritmic map\ndef show_spectrum(image_fft):\n plt.imshow(np.abs(image_fft), norm=LogNorm(vmin=5))\n plt.colorbar() #Add colorbar \n\n#Plot FT input image\nplt.figure()\nshow_spectrum(image_fft)\nplt.title(\"Fourier transform\")\n\nkeep_fraction = 0.3 #keep fraction (u oba smijera)\nimage_fft2 = image_fft.copy()\nrow, col = image_fft2.shape #get the current shape of an array\n\n#Set on zero all rows with index between row*keep_fraction and row*(1-keep_fraction)\nimage_fft2[int(row*keep_fraction):int(row*(1-keep_fraction))] = 0 \n#Similar for columns\nimage_fft2[:, int(col*keep_fraction):int(col*(1-keep_fraction))] = 0\n\n#Plot spectrum\nplt.figure()\nshow_spectrum(image_fft2)\nplt.title('Filtered Spectrum')\n\n#Return inverse two-dimensional discrete Fourier transform of arbitrary type sequence x\nimage_new = fftpack.ifft2(image_fft2).real\n\nend = time.time()\nprint(\"Time:\" ,end - start)\n\nfig = plt.figure(figsize=(20,10))\nplt.imshow(image_new, plt.cm.gray)\nplt.xticks([]), plt.yticks([])\nplt.title('Reconstructed Image FFT')\nfig.savefig('baboon_gauss60.3FFT.png')\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"matplotlib.colors.LogNorm",
"numpy.abs",
"matplotlib.pyplot.title",
"matplotlib.pyplot.colorbar",
"scipy.fftpack.fft2",
"matplotlib.pyplot.xticks",
"scipy.fftpack.ifft2",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
histolab/histolab | [
"e5e28846fada56d04b90cf32a2772fbdb2e0786a"
] | [
"histolab/filters/image_filters_functional.py"
] | [
"# encoding: utf-8\n\n# ------------------------------------------------------------------------\n# Copyright 2020 All Histolab Contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------------\n\nimport math\nimport operator\nfrom functools import reduce\nfrom typing import Any, Callable\n\nimport numpy as np\nimport PIL\nimport PIL.ImageOps\nimport skimage.color as sk_color\nimport skimage.exposure as sk_exposure\nimport skimage.feature as sk_feature\nimport skimage.filters as sk_filters\nimport skimage.future as sk_future\nimport skimage.morphology as sk_morphology\nimport skimage.segmentation as sk_segmentation\n\nfrom ..util import apply_mask_image, np_to_pil, threshold_to_mask, warn\nfrom .util import mask_percent\n\n\ndef adaptive_equalization(\n img: PIL.Image.Image, nbins: int = 256, clip_limit: float = 0.01\n) -> PIL.Image.Image:\n \"\"\"Increase image contrast using adaptive equalization.\n\n Contrast in local region of input image (gray or RGB) is increased using\n adaptive equalization\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image (gray or RGB)\n nbins : int\n Number of histogram bins. Default is 256.\n clip_limit : float, optional\n Clipping limit where higher value increases contrast. Default is 0.01\n\n Returns\n -------\n PIL.Image.Image\n image with contrast enhanced by adaptive equalization.\n \"\"\"\n if not (isinstance(nbins, int) and nbins > 0):\n raise ValueError(\"Number of histogram bins must be a positive integer\")\n img_arr = np.array(img)\n adapt_equ = sk_exposure.equalize_adapthist(img_arr, nbins, clip_limit)\n adapt_equ = np_to_pil(adapt_equ)\n return adapt_equ\n\n\ndef blue_pen_filter(img: PIL.Image.Image) -> PIL.Image.Image:\n \"\"\"Filter out blue pen marks from a diagnostic slide.\n\n The resulting mask is a composition of green filters with different thresholds\n for the RGB channels.\n\n Parameters\n ---------\n img : PIL.Image.Image\n Input RGB image\n\n Returns\n -------\n PIL.Image.Image\n Input image with the blue pen marks filtered out.\n \"\"\"\n parameters = [\n {\"red_thresh\": 60, \"green_thresh\": 120, \"blue_thresh\": 190},\n {\"red_thresh\": 120, \"green_thresh\": 170, \"blue_thresh\": 200},\n {\"red_thresh\": 175, \"green_thresh\": 210, \"blue_thresh\": 230},\n {\"red_thresh\": 145, \"green_thresh\": 180, \"blue_thresh\": 210},\n {\"red_thresh\": 37, \"green_thresh\": 95, \"blue_thresh\": 160},\n {\"red_thresh\": 30, \"green_thresh\": 65, \"blue_thresh\": 130},\n {\"red_thresh\": 130, \"green_thresh\": 155, \"blue_thresh\": 180},\n {\"red_thresh\": 40, \"green_thresh\": 35, \"blue_thresh\": 85},\n {\"red_thresh\": 30, \"green_thresh\": 20, \"blue_thresh\": 65},\n {\"red_thresh\": 90, \"green_thresh\": 90, \"blue_thresh\": 140},\n {\"red_thresh\": 60, \"green_thresh\": 60, \"blue_thresh\": 120},\n {\"red_thresh\": 110, \"green_thresh\": 110, \"blue_thresh\": 175},\n ]\n\n blue_pen_filter_img = reduce(\n (lambda x, y: x & y), [blue_filter(img, **param) for param in parameters]\n )\n return apply_mask_image(img, blue_pen_filter_img)\n\n\ndef dab_channel(img: PIL.Image.Image) -> PIL.Image.Image:\n \"\"\"Obtain DAB channel from RGB image.\n\n Input image is first converted into HED space and the DAB channel is\n rescaled for increased contrast.\n\n Parameters\n ----------\n img : Image.Image\n Input RGB image\n\n Returns\n -------\n Image.Image\n Grayscale image corresponding to input image with DAB channel enhanced.\n \"\"\"\n if img.mode not in [\"RGB\", \"RGBA\"]:\n raise ValueError(\"Input image must be RGB/RGBA.\")\n dab = np.array(rgb_to_hed(img))[:, :, 2]\n dab = sk_exposure.rescale_intensity(dab)\n return np_to_pil(dab)\n\n\ndef eosin_channel(img: PIL.Image.Image) -> PIL.Image.Image:\n \"\"\"Obtain Eosin channel from RGB image.\n\n Input image is first converted into HED space and the Eosin channel is\n rescaled for increased contrast.\n\n Parameters\n ----------\n img : Image.Image\n Input RGB image\n\n Returns\n -------\n Image.Image\n Grayscale image corresponding to input image with Eosin channel enhanced.\n \"\"\"\n if img.mode not in [\"RGB\", \"RGBA\"]:\n raise ValueError(\"Input image must be RGB/RGBA.\")\n eosin = np.array(rgb_to_hed(img))[:, :, 1]\n eosin = sk_exposure.rescale_intensity(eosin)\n return np_to_pil(eosin)\n\n\ndef green_pen_filter(img: PIL.Image.Image) -> PIL.Image.Image:\n \"\"\"Filter out green pen marks from a diagnostic slide.\n\n The resulting mask is a composition of green filters with different thresholds\n for the RGB channels.\n\n Parameters\n ---------\n img : PIL.Image.Image\n Input RGB image\n\n Returns\n -------\n PIL.Image.Image\n Input image with the green pen marks filtered out.\n \"\"\"\n parameters = [\n {\"red_thresh\": 150, \"green_thresh\": 160, \"blue_thresh\": 140},\n {\"red_thresh\": 70, \"green_thresh\": 110, \"blue_thresh\": 110},\n {\"red_thresh\": 45, \"green_thresh\": 115, \"blue_thresh\": 100},\n {\"red_thresh\": 30, \"green_thresh\": 75, \"blue_thresh\": 60},\n {\"red_thresh\": 195, \"green_thresh\": 220, \"blue_thresh\": 210},\n {\"red_thresh\": 225, \"green_thresh\": 230, \"blue_thresh\": 225},\n {\"red_thresh\": 170, \"green_thresh\": 210, \"blue_thresh\": 200},\n {\"red_thresh\": 20, \"green_thresh\": 30, \"blue_thresh\": 20},\n {\"red_thresh\": 50, \"green_thresh\": 60, \"blue_thresh\": 40},\n {\"red_thresh\": 30, \"green_thresh\": 50, \"blue_thresh\": 35},\n {\"red_thresh\": 65, \"green_thresh\": 70, \"blue_thresh\": 60},\n {\"red_thresh\": 100, \"green_thresh\": 110, \"blue_thresh\": 105},\n {\"red_thresh\": 165, \"green_thresh\": 180, \"blue_thresh\": 180},\n {\"red_thresh\": 140, \"green_thresh\": 140, \"blue_thresh\": 150},\n {\"red_thresh\": 185, \"green_thresh\": 195, \"blue_thresh\": 195},\n ]\n\n green_pen_filter_img = reduce(\n (lambda x, y: x & y), [green_filter(img, **param) for param in parameters]\n )\n return apply_mask_image(img, green_pen_filter_img)\n\n\ndef hematoxylin_channel(img: PIL.Image.Image) -> PIL.Image.Image:\n \"\"\"Obtain Hematoxylin channel from RGB image.\n\n Input image is first converted into HED space and the hematoxylin channel is\n rescaled for increased contrast.\n\n Parameters\n ----------\n img : Image.Image\n Input RGB image\n\n Returns\n -------\n Image.Image\n Grayscale image corresponding to input image with Hematoxylin channel enhanced.\n \"\"\"\n if img.mode not in [\"RGB\", \"RGBA\"]:\n raise ValueError(\"Input image must be RGB/RGBA.\")\n hematoxylin = np.array(rgb_to_hed(img))[:, :, 0]\n hematoxylin = sk_exposure.rescale_intensity(hematoxylin)\n return np_to_pil(hematoxylin)\n\n\ndef histogram_equalization(img: PIL.Image.Image, nbins: int = 256) -> PIL.Image.Image:\n \"\"\"Increase image contrast using histogram equalization.\n\n The input image (gray or RGB) is filterd using histogram equalization to increase\n contrast.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image.\n nbins : int. optional\n Number of histogram bins. Default is 256.\n\n Returns\n -------\n PIL.Image.Image\n Image with contrast enhanced by histogram equalization.\n \"\"\"\n img_arr = np.array(img)\n hist_equ = sk_exposure.equalize_hist(img_arr.flatten(), nbins=nbins)\n hist_equ = hist_equ.reshape(img_arr.shape)\n return np_to_pil(hist_equ)\n\n\ndef hysteresis_threshold(\n img: PIL.Image.Image, low: int = 50, high: int = 100\n) -> PIL.Image.Image:\n \"\"\"Apply two-level (hysteresis) threshold to an image.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image\n low : int, optional\n low threshold. Default is 50.\n high : int, optional\n high threshold. Default is 100.\n\n Returns\n -------\n PIL.Image.Image\n Image with the hysteresis threshold applied\n \"\"\"\n if low is None or high is None:\n raise ValueError(\"thresholds cannot be None\")\n hyst = sk_filters.apply_hysteresis_threshold(np.array(img), low, high)\n img_out = apply_mask_image(img, hyst)\n return img_out\n\n\ndef invert(img: PIL.Image.Image) -> PIL.Image.Image:\n \"\"\"Invert an image, i.e. take the complement of the correspondent array.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image\n\n Returns\n -------\n PIL.Image.Image\n Inverted image\n \"\"\"\n if img.mode == \"RGBA\":\n red, green, blue, alpha = img.split()\n rgb_img = PIL.Image.merge(\"RGB\", (red, green, blue))\n inverted_img_rgb = PIL.ImageOps.invert(rgb_img)\n red, green, blue = inverted_img_rgb.split()\n inverted_img = PIL.Image.merge(\"RGBA\", (red, green, blue, alpha))\n else:\n inverted_img = PIL.ImageOps.invert(img)\n\n return inverted_img\n\n\ndef kmeans_segmentation(\n img: PIL.Image.Image, n_segments: int = 800, compactness: float = 10.0\n) -> PIL.Image.Image:\n \"\"\"Segment an image with K-means segmentation\n\n By using K-means segmentation (color/space proximity) each segment is\n colored based on the average color for that segment.\n\n Parameters\n ---------\n img : PIL.Image.Image\n Input image\n n_segments : int, optional\n The number of segments. Default is 800.\n compactness : float, optional\n Color proximity versus space proximity factor. Default is 10.0.\n\n Returns\n -------\n PIL.Image.Image\n RGB image where each segment has been colored based on the average\n color for that segment.\n\n Raises\n ------\n ValueError\n If ``img`` mode is RGBA.\n \"\"\"\n if img.mode == \"RGBA\":\n raise ValueError(\"Input image cannot be RGBA\")\n img_arr = np.array(img)\n labels = sk_segmentation.slic(img_arr, n_segments, compactness, start_label=0)\n return np_to_pil(sk_color.label2rgb(labels, img_arr, kind=\"avg\", bg_label=-1))\n\n\ndef local_equalization(img: PIL.Image.Image, disk_size: int = 50) -> PIL.Image.Image:\n \"\"\"Filter gray image using local equalization.\n\n Local equalization method uses local histograms based on a disk structuring element.\n\n Parameters\n ---------\n img: PIL.Image.Image\n Input image. Notice that it must be 2D\n disk_size: int, optional\n Radius of the disk structuring element used for the local histograms. Default is\n 50.\n\n Returns\n -------\n PIL.Image.Image\n 2D image with contrast enhanced using local equalization.\n \"\"\"\n\n if len(np.array(img).shape) != 2:\n raise ValueError(\"Input must be 2D.\")\n local_equ = sk_filters.rank.equalize(\n np.array(img), selem=sk_morphology.disk(disk_size)\n )\n return np_to_pil(local_equ)\n\n\ndef local_otsu_threshold(\n img: PIL.Image.Image, disk_size: float = 3.0\n) -> PIL.Image.Image:\n \"\"\"Mask image based on local Otsu threshold.\n\n Compute local Otsu threshold for each pixel and return boolean mask\n based on pixels being less than the local Otsu threshold.\n\n Note that the input image must be 2D.\n\n Parameters\n ----------\n img: PIL.Image.Image\n Input 2-dimensional image\n disk_size : float, optional\n Radius of the disk structuring element used to compute\n the Otsu threshold for each pixel. Default is 3.0.\n\n Returns\n -------\n PIL.Image.Image\n Resulting image where local Otsu threshold values have been\n applied to original image.\n \"\"\"\n if np.array(img).ndim != 2:\n raise ValueError(\"Input must be 2D.\")\n if disk_size is None or disk_size < 0 or disk_size == np.inf:\n raise ValueError(\"Disk size must be a positive number.\")\n img_arr = np.array(img)\n local_otsu = sk_filters.rank.otsu(img_arr, sk_morphology.disk(disk_size))\n return np_to_pil(local_otsu)\n\n\ndef rag_threshold(\n img: PIL.Image.Image,\n n_segments: int = 800,\n compactness: float = 10.0,\n threshold: int = 9,\n) -> PIL.Image.Image:\n \"\"\"Combine similar K-means segmented regions based on threshold value.\n\n Segment an image with K-means, build region adjacency graph based on\n the segments, combine similar regions based on threshold value,\n and then output these resulting region segments.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image\n n_segments : int, optional\n The number of segments. Default is 800.\n compactness : float, optional\n Color proximity versus space proximity factor. Default is 10.0.\n threshold : int, optional\n Threshold value for combining regions. Default is 9.\n\n Returns\n -------\n PIL.Image.Image\n Each segment has been colored based on the average\n color for that segment (and similar segments have been combined).\n\n Raises\n ------\n ValueError\n If ``img`` mode is RGBA.\n \"\"\"\n if img.mode == \"RGBA\":\n raise ValueError(\"Input image cannot be RGBA\")\n img_arr = np.array(img)\n labels = sk_segmentation.slic(img_arr, n_segments, compactness, start_label=0)\n green = sk_future.graph.rag_mean_color(img_arr, labels)\n labels2 = sk_future.graph.cut_threshold(labels, green, threshold)\n rag = sk_color.label2rgb(labels2, img_arr, kind=\"avg\", bg_label=-1)\n return np_to_pil(rag)\n\n\ndef red_pen_filter(img: PIL.Image.Image) -> PIL.Image.Image:\n \"\"\"Filter out red pen marks on diagnostic slides.\n\n The resulting mask is a composition of red filters with different thresholds\n for the RGB channels.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input RGB image.\n\n Returns\n -------\n PIL.Image.Image\n Input image with the pen marks filtered out.\n \"\"\"\n parameters = [\n {\"red_thresh\": 150, \"green_thresh\": 80, \"blue_thresh\": 90},\n {\"red_thresh\": 110, \"green_thresh\": 20, \"blue_thresh\": 30},\n {\"red_thresh\": 185, \"green_thresh\": 65, \"blue_thresh\": 105},\n {\"red_thresh\": 195, \"green_thresh\": 85, \"blue_thresh\": 125},\n {\"red_thresh\": 220, \"green_thresh\": 115, \"blue_thresh\": 145},\n {\"red_thresh\": 125, \"green_thresh\": 40, \"blue_thresh\": 70},\n {\"red_thresh\": 100, \"green_thresh\": 50, \"blue_thresh\": 65},\n {\"red_thresh\": 85, \"green_thresh\": 25, \"blue_thresh\": 45},\n ]\n red_pen_filter_img = reduce(\n (lambda x, y: x & y), [red_filter(img, **param) for param in parameters]\n )\n return apply_mask_image(img, red_pen_filter_img)\n\n\ndef rgb_to_hed(img: PIL.Image.Image) -> PIL.Image.Image:\n \"\"\"Convert RGB channels to HED channels.\n\n image color space (RGB) is converted to Hematoxylin-Eosin-Diaminobenzidine space.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image\n\n Returns\n -------\n PIL.Image.Image\n Image in HED space\n \"\"\"\n if img.mode not in [\"RGB\", \"RGBA\"]:\n raise Exception(\"Input image must be RGB.\")\n if img.mode == \"RGBA\":\n red, green, blue, _ = img.split()\n img = PIL.Image.merge(\"RGB\", (red, green, blue))\n warn(\n \"Input image must be RGB. \"\n \"NOTE: the image will be converted to RGB before HED conversion.\"\n )\n\n img_arr = np.array(img)\n hed_arr = sk_color.rgb2hed(img_arr)\n hed = np_to_pil(hed_arr)\n\n return hed\n\n\ndef rgb_to_hsv(img: PIL.Image.Image) -> PIL.Image.Image:\n \"\"\"Convert RGB channels to HSV channels.\n\n image color space (RGB) is converted to Hue - Saturation - Value (HSV) space.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image\n\n Returns\n -------\n PIL.Image.Image\n Image in HED space\n\n Raises\n ------\n Exception\n If the image mode is not RGB\n \"\"\"\n if img.mode != \"RGB\":\n raise Exception(\"Input image must be RGB\")\n img_arr = np.array(img)\n hsv_arr = sk_color.rgb2hsv(img_arr)\n hsv = np_to_pil(hsv_arr)\n return hsv\n\n\ndef rgb_to_lab(\n img: PIL.Image.Image, illuminant: str = \"D65\", observer: int = \"2\"\n) -> PIL.Image.Image:\n \"\"\"Convert from the sRGB color space to the CIE Lab colorspace.\n\n sRGB color space reference: IEC 61966-2-1:1999\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image\n illuminant : {“A”, “D50”, “D55”, “D65”, “D75”, “E”}, optional\n The name of the illuminant (the function is NOT case sensitive).\n observer : {“2”, “10”}, optional\n The aperture angle of the observer.\n\n Returns\n -------\n PIL.Image.Image\n Image in LAB space\n\n Raises\n ------\n Exception\n If the image mode is not RGB\n \"\"\"\n if img.mode != \"RGB\":\n raise Exception(\"Input image must be RGB\")\n img_arr = np.array(img)\n lab_arr = sk_color.rgb2lab(img_arr, illuminant=illuminant, observer=observer)\n lab = np_to_pil(lab_arr)\n return lab\n\n\ndef rgb_to_od(img: PIL.Image.Image) -> np.ndarray:\n \"\"\"Convert from RGB to optical density (OD_RGB) space.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image\n\n Returns\n -------\n np.ndarray\n Array representation of the image in OD space\n \"\"\"\n if img.mode == \"RGBA\":\n red, green, blue, _ = img.split()\n img = PIL.Image.merge(\"RGB\", (red, green, blue))\n\n warn(\n \"Input image must be RGB. \"\n \"NOTE: the image will be converted to RGB before OD conversion.\"\n )\n\n img_arr = np.array(img)\n\n od_arr = -np.log((img_arr.astype(float) + 1) / 240)\n return od_arr\n\n\ndef stretch_contrast(\n img: PIL.Image.Image, low: int = 40, high: int = 60\n) -> PIL.Image.Image:\n \"\"\"Increase image contrast.\n\n Th contrast in image is increased based on intensities in a specified range\n\n Parameters\n ----------\n img: PIL.Image.Image\n Input image\n low: int\n Range low value (0 to 255). Default is 40.\n high: int\n Range high value (0 to 255). Default is 60.\n\n Returns\n -------\n PIL.Image.Image\n Image with contrast enhanced.\n \"\"\"\n if low not in range(256) or high not in range(256):\n raise Exception(\"low and high values must be in range [0, 255]\")\n img_arr = np.array(img)\n low_p, high_p = np.percentile(img_arr, (low * 100 / 255, high * 100 / 255))\n return np_to_pil(sk_exposure.rescale_intensity(img_arr, in_range=(low_p, high_p)))\n\n\n# -------- Branching function --------\n\n\ndef blue_filter(\n img: PIL.Image.Image, red_thresh: int, green_thresh: int, blue_thresh: int\n) -> np.ndarray:\n \"\"\"Filter out blueish colors in an RGB image.\n\n Create a mask to filter out blueish colors, where the mask is based on a pixel\n being above a red channel threshold value, above a green channel threshold value,\n and below a blue channel threshold value.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input RGB image\n red_thresh : int\n Red channel lower threshold value.\n green_thresh : int\n Green channel lower threshold value.\n blue_thresh : int\n Blue channel upper threshold value.\n\n Returns\n -------\n np.ndarray\n Boolean NumPy array representing the mask.\n \"\"\"\n if np.array(img).ndim != 3:\n raise ValueError(\"Input must be 3D.\")\n if not (\n 0 <= red_thresh <= 255 and 0 <= green_thresh <= 255 and 0 <= blue_thresh <= 255\n ):\n raise ValueError(\"RGB Thresholds must be in range [0, 255]\")\n img_arr = np.array(img)\n red = img_arr[:, :, 0] > red_thresh\n green = img_arr[:, :, 1] > green_thresh\n blue = img_arr[:, :, 2] < blue_thresh\n return red | green | blue\n\n\ndef canny_edges(\n img: PIL.Image.Image,\n sigma: float = 1.0,\n low_threshold: float = 0.0,\n high_threshold: float = 25.0,\n) -> np.ndarray:\n \"\"\"Filter image based on Canny edge algorithm.\n\n Note that input image must be 2D grayscale image\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input 2-dimensional image\n sigma : float, optional\n Width (std dev) of Gaussian. Default is 1.0.\n low_threshold : float, optional\n Low hysteresis threshold value. Default is 0.0.\n high_threshold : float, optional\n High hysteresis threshold value. Default is 25.0.\n\n Returns\n -------\n np.ndarray\n Boolean NumPy array representing Canny edge map.\n \"\"\"\n if np.array(img).ndim != 2:\n raise ValueError(\"Input must be 2D.\")\n img_arr = np.array(img)\n return sk_feature.canny(img_arr, sigma, low_threshold, high_threshold)\n\n\ndef filter_entropy(\n img: PIL.Image.Image,\n neighborhood: int = 9,\n threshold: float = 5.0,\n relate: Callable[..., Any] = operator.gt,\n) -> np.ndarray:\n \"\"\"Filter image based on entropy (complexity).\n\n The area of the image included in the local neighborhood is defined by a square\n neighborhood x neighborhood\n\n Note that input must be 2D.\n\n Parameters\n ----------\n img : PIL.Image.Image\n input 2-dimensional image\n neighborhood : int, optional\n Neighborhood size (defines height and width of 2D array of 1's). Default is 9.\n threshold : float, optional\n Threshold value. Default is 5.0\n relate : callable operator, optional\n Operator to be used to compute the mask from the threshold. Default is\n operator.lt\n\n Returns\n -------\n np.ndarray\n NumPy boolean array where True represent a measure of complexity.\n \"\"\"\n if np.array(img).ndim != 2:\n raise ValueError(\"Input must be 2D.\")\n img_arr = np.array(img)\n entropy = sk_filters.rank.entropy(img_arr, np.ones((neighborhood, neighborhood)))\n return threshold_to_mask(entropy, threshold, relate)\n\n\ndef grays(img: PIL.Image.Image, tolerance: int = 15) -> np.ndarray:\n \"\"\"Filter out gray pixels in RGB image.\n\n Gray pixels are those pixels where the red, green, and blue channel values\n are similar, i.e. under a specified tolerance.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image\n tolerance : int, optional\n if difference between values is below this threshold,\n values are considered similar and thus filtered out. Default is 15.\n\n Returns\n -------\n PIL.Image.Image\n Mask image where the grays values are masked out\n \"\"\"\n\n if np.array(img).ndim != 3:\n raise ValueError(\"Input must be 3D.\")\n # TODO: class image mode exception: raise exception if not RGB(A)\n img_arr = np.array(img).astype(np.int64)\n rg_diff = abs(img_arr[:, :, 0] - img_arr[:, :, 1]) > tolerance\n rb_diff = abs(img_arr[:, :, 0] - img_arr[:, :, 2]) > tolerance\n gb_diff = abs(img_arr[:, :, 1] - img_arr[:, :, 2]) > tolerance\n filter_grays = rg_diff | rb_diff | gb_diff\n return filter_grays\n\n\ndef green_channel_filter(\n img: PIL.Image.Image,\n green_thresh: int = 200,\n avoid_overmask: bool = True,\n overmask_thresh: float = 90.0,\n) -> np.ndarray:\n \"\"\"Mask pixels in an RGB image with G-channel greater than a specified threshold.\n\n Create a mask to filter out pixels with a green channel value greater than\n a particular threshold, since hematoxylin and eosin are purplish and pinkish,\n which do not have much green to them.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input RGB image\n green_thresh : int, optional\n Green channel threshold value (0 to 255). Default is 200.\n If value is greater than green_thresh, mask out pixel.\n avoid_overmask : bool, optional\n If True, avoid masking above the overmask_thresh percentage. Default is True.\n overmask_thresh : float, optional\n If avoid_overmask is True, avoid masking above this percentage value. Default is\n 90.0\n\n Returns\n -------\n np.ndarray\n Boolean mask where pixels above a particular green channel\n threshold have been masked out.\n \"\"\"\n if green_thresh > 255.0 or green_thresh < 0.0:\n raise ValueError(\"threshold must be in range [0, 255]\")\n green = np.array(img)[:, :, 1]\n g_mask = green <= green_thresh\n mask_percentage = mask_percent(g_mask)\n if avoid_overmask and (mask_percentage >= overmask_thresh) and (green_thresh < 255):\n new_green_thresh = math.ceil((255 + green_thresh) / 2)\n g_mask = green_channel_filter(\n np.array(img), new_green_thresh, avoid_overmask, overmask_thresh\n )\n return g_mask\n\n\ndef green_filter(\n img: PIL.Image.Image, red_thresh: int, green_thresh: int, blue_thresh: int\n) -> np.ndarray:\n \"\"\"Filter out greenish colors in an RGB image.\n The mask is based on a pixel being above a red channel threshold value, below a\n green channel threshold value, and below a blue channel threshold value.\n\n Note that for the green ink, the green and blue channels tend to track together, so\n for blue channel we use a lower threshold rather than an upper threshold value.\n\n Parameters\n ----------\n img : PIL.Image.Image\n RGB input image.\n red_thresh : int\n Red channel upper threshold value.\n green_thresh : int\n Green channel lower threshold value.\n blue_thresh : int\n Blue channel lower threshold value.\n\n Returns\n -------\n np.ndarray\n Boolean NumPy array representing the mask.\n \"\"\"\n if np.array(img).ndim != 3:\n raise ValueError(\"Input must be 3D.\")\n if not (\n 0 <= red_thresh <= 255 and 0 <= green_thresh <= 255 and 0 <= blue_thresh <= 255\n ):\n raise ValueError(\"RGB Thresholds must be in range [0, 255]\")\n\n img_arr = np.array(img)\n red = img_arr[:, :, 0] > red_thresh\n green = img_arr[:, :, 1] < green_thresh\n blue = img_arr[:, :, 2] < blue_thresh\n return red | green | blue\n\n\ndef hysteresis_threshold_mask(\n img: PIL.Image.Image, low: int = 50, high: int = 100\n) -> np.ndarray:\n \"\"\"Mask an image using hysteresis threshold\n\n Compute the Hysteresis threshold on the complement of a grayscale image,\n and return boolean mask based on pixels above this threshold.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image.\n low : int, optional\n low threshold. Default is 50.\n high : int, optional\n high threshold. Default is 100.\n\n Returns\n -------\n np.ndarray\n Boolean NumPy array where True represents a pixel above Otsu threshold.\n \"\"\"\n if low is None or high is None:\n raise ValueError(\"thresholds cannot be None\")\n grey_scale = PIL.ImageOps.grayscale(img)\n comp = invert(grey_scale)\n hyst_mask = sk_filters.apply_hysteresis_threshold(np.array(comp), low, high)\n return hyst_mask\n\n\ndef otsu_threshold(\n img: PIL.Image.Image, relate: Callable[..., Any] = operator.lt\n) -> np.ndarray:\n \"\"\"Mask image based on pixel above Otsu threshold.\n\n Compute Otsu threshold on image and return boolean mask based on pixels above this\n threshold.\n\n Note that Otsu threshold is expected to work correctly only for grayscale images.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image.\n relate : operator, optional\n Operator to be used to compute the mask from the threshold. Default is\n operator.lt\n\n Returns\n -------\n np.ndarray\n Boolean NumPy array where True represents a pixel above Otsu threshold.\n \"\"\"\n if img.mode in [\"RGB\", \"RGBA\"]:\n image = PIL.ImageOps.grayscale(img)\n warn(\n \"otsu_threshold is expected to work correctly only for grayscale images.\"\n \"NOTE: the image will be converted to grayscale before applying Otsu\"\n \"threshold\"\n )\n else:\n image = img\n\n otsu_thresh = sk_filters.threshold_otsu(np.array(image))\n return threshold_to_mask(image, otsu_thresh, relate)\n\n\ndef red_filter(\n img: PIL.Image.Image, red_thresh: int, green_thresh: int, blue_thresh: int\n) -> np.ndarray:\n \"\"\"Mask reddish colors in an RGB image.\n\n Create a mask to filter out reddish colors, where the mask is based on a pixel\n being above a red channel threshold value, below a green channel threshold value,\n and below a blue channel threshold value.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input RGB image\n red_thresh : int\n Red channel lower threshold value.\n green_thresh : int\n Green channel upper threshold value.\n blue_thresh : int\n Blue channel upper threshold value.\n\n Returns\n -------\n np.ndarray\n Boolean NumPy array representing the mask.\n \"\"\"\n if np.array(img).ndim != 3:\n raise ValueError(\"Input must be 3D.\")\n if not (\n 0 <= red_thresh <= 255 and 0 <= green_thresh <= 255 and 0 <= blue_thresh <= 255\n ):\n raise ValueError(\"RGB Thresholds must be in range [0, 255]\")\n\n img_arr = np.array(img)\n red = img_arr[:, :, 0] < red_thresh\n green = img_arr[:, :, 1] > green_thresh\n blue = img_arr[:, :, 2] > blue_thresh\n return red | green | blue\n\n\ndef yen_threshold(\n img: PIL.Image.Image, relate: Callable[..., Any] = operator.lt\n) -> np.ndarray:\n \"\"\"Mask image based on pixel below Yen's threshold.\n\n Compute Yen threshold on image and return boolean mask based on pixels below this\n threshold.\n\n Parameters\n ----------\n img : PIL.Image.Image\n Input image.\n relate : operator, optional\n Operator to be used to compute the mask from the threshold. Default is\n operator.lt\n\n Returns\n -------\n np.ndarray\n Boolean NumPy array where True represents a pixel below Yen's threshold.\n \"\"\"\n\n yen_thresh = sk_filters.threshold_yen(np.array(img))\n return threshold_to_mask(img, yen_thresh, relate)\n"
] | [
[
"numpy.array",
"numpy.percentile",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
paulchou0309/obj | [
"d7ae404fa73db60a6fe539d613e48f478b81dbef"
] | [
"object_detection/exporter.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functions to export object detection inference graph.\"\"\"\nimport logging\nimport os\nimport tempfile\nimport tensorflow as tf\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.training import saver as saver_lib\nfrom object_detection.builders import model_builder\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.data_decoders import tf_example_decoder\n\nslim = tf.contrib.slim\n\n\n# TODO: Replace with freeze_graph.freeze_graph_with_def_protos when\n# newer version of Tensorflow becomes more common.\ndef freeze_graph_with_def_protos(\n input_graph_def,\n input_saver_def,\n input_checkpoint,\n output_node_names,\n restore_op_name,\n filename_tensor_name,\n clear_devices,\n initializer_nodes,\n optimize_graph=True,\n variable_names_blacklist=''):\n \"\"\"Converts all variables in a graph and checkpoint into constants.\"\"\"\n del restore_op_name, filename_tensor_name # Unused by updated loading code.\n\n # 'input_checkpoint' may be a prefix if we're using Saver V2 format\n if not saver_lib.checkpoint_exists(input_checkpoint):\n raise ValueError(\n 'Input checkpoint \"' + input_checkpoint + '\" does not exist!')\n\n if not output_node_names:\n raise ValueError(\n 'You must supply the name of a node to --output_node_names.')\n\n # Remove all the explicit device specifications for this node. This helps to\n # make the graph more portable.\n if clear_devices:\n for node in input_graph_def.node:\n node.device = ''\n\n with tf.Graph().as_default():\n tf.import_graph_def(input_graph_def, name='')\n\n if optimize_graph:\n logging.info('Graph Rewriter optimizations enabled')\n rewrite_options = rewriter_config_pb2.RewriterConfig(\n optimize_tensor_layout=True)\n rewrite_options.optimizers.append('pruning')\n rewrite_options.optimizers.append('constfold')\n rewrite_options.optimizers.append('layout')\n graph_options = tf.GraphOptions(\n rewrite_options=rewrite_options, infer_shapes=True)\n else:\n logging.info('Graph Rewriter optimizations disabled')\n graph_options = tf.GraphOptions()\n config = tf.ConfigProto(graph_options=graph_options)\n with session.Session(config=config) as sess:\n if input_saver_def:\n saver = saver_lib.Saver(saver_def=input_saver_def)\n saver.restore(sess, input_checkpoint)\n else:\n var_list = {}\n reader = pywrap_tensorflow.NewCheckpointReader(input_checkpoint)\n var_to_shape_map = reader.get_variable_to_shape_map()\n for key in var_to_shape_map:\n try:\n tensor = sess.graph.get_tensor_by_name(key + ':0')\n except KeyError:\n # This tensor doesn't exist in the graph (for example it's\n # 'global_step' or a similar housekeeping element) so skip it.\n continue\n var_list[key] = tensor\n saver = saver_lib.Saver(var_list=var_list)\n saver.restore(sess, input_checkpoint)\n if initializer_nodes:\n sess.run(initializer_nodes)\n\n variable_names_blacklist = (variable_names_blacklist.split(',') if\n variable_names_blacklist else None)\n output_graph_def = graph_util.convert_variables_to_constants(\n sess,\n input_graph_def,\n output_node_names.split(','),\n variable_names_blacklist=variable_names_blacklist)\n\n return output_graph_def\n\n\ndef replace_variable_values_with_moving_averages(graph,\n current_checkpoint_file,\n new_checkpoint_file):\n \"\"\"Replaces variable values in the checkpoint with their moving averages.\n\n If the current checkpoint has shadow variables maintaining moving averages of\n the variables defined in the graph, this function generates a new checkpoint\n where the variables contain the values of their moving averages.\n\n Args:\n graph: a tf.Graph object.\n current_checkpoint_file: a checkpoint containing both original variables and\n their moving averages.\n new_checkpoint_file: file path to write a new checkpoint.\n \"\"\"\n with graph.as_default():\n variable_averages = tf.train.ExponentialMovingAverage(0.0)\n ema_variables_to_restore = variable_averages.variables_to_restore()\n with tf.Session() as sess:\n read_saver = tf.train.Saver(ema_variables_to_restore)\n read_saver.restore(sess, current_checkpoint_file)\n write_saver = tf.train.Saver()\n write_saver.save(sess, new_checkpoint_file)\n\n\ndef _image_tensor_input_placeholder(input_shape=None):\n \"\"\"Returns input placeholder and a 4-D uint8 image tensor.\"\"\"\n if input_shape is None:\n input_shape = (None, None, None, 3)\n input_tensor = tf.placeholder(\n dtype=tf.uint8, shape=input_shape, name='image_tensor')\n return input_tensor, input_tensor\n\n\ndef _tf_example_input_placeholder():\n \"\"\"Returns input that accepts a batch of strings with tf examples.\n\n Returns:\n a tuple of input placeholder and the output decoded images.\n \"\"\"\n batch_tf_example_placeholder = tf.placeholder(\n tf.string, shape=[None], name='tf_example')\n def decode(tf_example_string_tensor):\n tensor_dict = tf_example_decoder.TfExampleDecoder().decode(\n tf_example_string_tensor)\n image_tensor = tensor_dict[fields.InputDataFields.image]\n return image_tensor\n return (batch_tf_example_placeholder,\n tf.map_fn(decode,\n elems=batch_tf_example_placeholder,\n dtype=tf.uint8,\n parallel_iterations=32,\n back_prop=False))\n\n\ndef _encoded_image_string_tensor_input_placeholder():\n \"\"\"Returns input that accepts a batch of PNG or JPEG strings.\n\n Returns:\n a tuple of input placeholder and the output decoded images.\n \"\"\"\n batch_image_str_placeholder = tf.placeholder(\n dtype=tf.string,\n shape=[None],\n name='encoded_image_string_tensor')\n def decode(encoded_image_string_tensor):\n image_tensor = tf.image.decode_image(encoded_image_string_tensor,\n channels=3)\n image_tensor.set_shape((None, None, 3))\n return image_tensor\n return (batch_image_str_placeholder,\n tf.map_fn(\n decode,\n elems=batch_image_str_placeholder,\n dtype=tf.uint8,\n parallel_iterations=32,\n back_prop=False))\n\n\ninput_placeholder_fn_map = {\n 'image_tensor': _image_tensor_input_placeholder,\n 'encoded_image_string_tensor':\n _encoded_image_string_tensor_input_placeholder,\n 'tf_example': _tf_example_input_placeholder,\n}\n\n\ndef _add_output_tensor_nodes(postprocessed_tensors,\n output_collection_name='inference_op'):\n \"\"\"Adds output nodes for detection boxes and scores.\n\n Adds the following nodes for output tensors -\n * num_detections: float32 tensor of shape [batch_size].\n * detection_boxes: float32 tensor of shape [batch_size, num_boxes, 4]\n containing detected boxes.\n * detection_scores: float32 tensor of shape [batch_size, num_boxes]\n containing scores for the detected boxes.\n * detection_classes: float32 tensor of shape [batch_size, num_boxes]\n containing class predictions for the detected boxes.\n * detection_masks: (Optional) float32 tensor of shape\n [batch_size, num_boxes, mask_height, mask_width] containing masks for each\n detection box.\n\n Args:\n postprocessed_tensors: a dictionary containing the following fields\n 'detection_boxes': [batch, max_detections, 4]\n 'detection_scores': [batch, max_detections]\n 'detection_classes': [batch, max_detections]\n 'detection_masks': [batch, max_detections, mask_height, mask_width]\n (optional).\n 'num_detections': [batch]\n output_collection_name: Name of collection to add output tensors to.\n\n Returns:\n A tensor dict containing the added output tensor nodes.\n \"\"\"\n label_id_offset = 1\n boxes = postprocessed_tensors.get('detection_boxes')\n scores = postprocessed_tensors.get('detection_scores')\n classes = postprocessed_tensors.get('detection_classes') + label_id_offset\n masks = postprocessed_tensors.get('detection_masks')\n num_detections = postprocessed_tensors.get('num_detections')\n outputs = {}\n outputs['detection_boxes'] = tf.identity(boxes, name='detection_boxes')\n outputs['detection_scores'] = tf.identity(scores, name='detection_scores')\n outputs['detection_classes'] = tf.identity(classes, name='detection_classes')\n outputs['num_detections'] = tf.identity(num_detections, name='num_detections')\n if masks is not None:\n outputs['detection_masks'] = tf.identity(masks, name='detection_masks')\n for output_key in outputs:\n tf.add_to_collection(output_collection_name, outputs[output_key])\n if masks is not None:\n tf.add_to_collection(output_collection_name, outputs['detection_masks'])\n return outputs\n\ndef _add_predict_tensor_nodes(predict_tensors,\n output_collection_name='predict_op'):\n \"\"\"Adds predict nodes for region proposal boxes and feature.\n\n Adds the following nodes for output tensors -\n * proposal_boxes_normalized: float32 tensor of shape [batch_size, num_boxes, 4]\n containing region proposal boxes(after non maximum surpression).\n\n Args:\n predict_tensors: a dictionary containing the following fields\n 'proposal_boxes_normalized': [batch, max_detections, 4]\n output_collection_name: Name of collection to add output tensors to.\n\n Returns:\n A tensor dict containing the added output tensor nodes.\n \"\"\"\n label_id_offset = 1\n boxes = predict_tensors.get('proposal_boxes_normalized')\n outputs = {}\n outputs['proposal_boxes'] = tf.identity(boxes, name='proposal_boxes')\n for output_key in outputs:\n tf.add_to_collection(output_collection_name, outputs[output_key])\n return outputs\n\ndef _write_frozen_graph(frozen_graph_path, frozen_graph_def):\n \"\"\"Writes frozen graph to disk.\n\n Args:\n frozen_graph_path: Path to write inference graph.\n frozen_graph_def: tf.GraphDef holding frozen graph.\n \"\"\"\n with gfile.GFile(frozen_graph_path, 'wb') as f:\n f.write(frozen_graph_def.SerializeToString())\n logging.info('%d ops in the final graph.', len(frozen_graph_def.node))\n\n\ndef _write_saved_model(saved_model_path,\n frozen_graph_def,\n inputs,\n outputs):\n \"\"\"Writes SavedModel to disk.\n\n If checkpoint_path is not None bakes the weights into the graph thereby\n eliminating the need of checkpoint files during inference. If the model\n was trained with moving averages, setting use_moving_averages to true\n restores the moving averages, otherwise the original set of variables\n is restored.\n\n Args:\n saved_model_path: Path to write SavedModel.\n frozen_graph_def: tf.GraphDef holding frozen graph.\n inputs: The input image tensor to use for detection.\n outputs: A tensor dictionary containing the outputs of a DetectionModel.\n \"\"\"\n with tf.Graph().as_default():\n with session.Session() as sess:\n\n tf.import_graph_def(frozen_graph_def, name='')\n\n builder = tf.saved_model.builder.SavedModelBuilder(saved_model_path)\n\n tensor_info_inputs = {\n 'inputs': tf.saved_model.utils.build_tensor_info(inputs)}\n tensor_info_outputs = {}\n for k, v in outputs.items():\n tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v)\n\n detection_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs=tensor_info_inputs,\n outputs=tensor_info_outputs,\n method_name=signature_constants.PREDICT_METHOD_NAME))\n\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n detection_signature,\n },\n )\n builder.save()\n\n\ndef _write_graph_and_checkpoint(inference_graph_def,\n model_path,\n input_saver_def,\n trained_checkpoint_prefix):\n for node in inference_graph_def.node:\n node.device = ''\n with tf.Graph().as_default():\n tf.import_graph_def(inference_graph_def, name='')\n with session.Session() as sess:\n saver = saver_lib.Saver(saver_def=input_saver_def,\n save_relative_paths=True)\n saver.restore(sess, trained_checkpoint_prefix)\n saver.save(sess, model_path)\n\n\ndef _export_inference_graph(input_type,\n detection_model,\n use_moving_averages,\n trained_checkpoint_prefix,\n output_directory,\n additional_output_tensor_names=None,\n input_shape=None,\n optimize_graph=True,\n output_collection_name='inference_op'):\n \"\"\"Export helper.\"\"\"\n tf.gfile.MakeDirs(output_directory)\n frozen_graph_path = os.path.join(output_directory,\n 'frozen_inference_graph.pb')\n saved_model_path = os.path.join(output_directory, 'saved_model')\n model_path = os.path.join(output_directory, 'model.ckpt')\n\n if input_type not in input_placeholder_fn_map:\n raise ValueError('Unknown input type: {}'.format(input_type))\n placeholder_args = {}\n if input_shape is not None:\n if input_type != 'image_tensor':\n raise ValueError('Can only specify input shape for `image_tensor` '\n 'inputs.')\n placeholder_args['input_shape'] = input_shape\n placeholder_tensor, input_tensors = input_placeholder_fn_map[input_type](\n **placeholder_args)\n inputs = tf.to_float(input_tensors)\n preprocessed_inputs = detection_model.preprocess(inputs)\n output_tensors = detection_model.predict(preprocessed_inputs)\n outputs = _add_predict_tensor_nodes(output_tensors,\n output_collection_name)\n \n # postprocessed_tensors = detection_model.postprocess(output_tensors)\n # outputs = _add_output_tensor_nodes(postprocessed_tensors,\n # output_collection_name)\n # Add global step to the graph.\n slim.get_or_create_global_step()\n\n if use_moving_averages:\n temp_checkpoint_file = tempfile.NamedTemporaryFile()\n replace_variable_values_with_moving_averages(\n tf.get_default_graph(), trained_checkpoint_prefix,\n temp_checkpoint_file.name)\n checkpoint_to_use = temp_checkpoint_file.name\n else:\n checkpoint_to_use = trained_checkpoint_prefix\n\n saver = tf.train.Saver()\n input_saver_def = saver.as_saver_def()\n\n _write_graph_and_checkpoint(\n inference_graph_def=tf.get_default_graph().as_graph_def(),\n model_path=model_path,\n input_saver_def=input_saver_def,\n trained_checkpoint_prefix=checkpoint_to_use)\n\n if additional_output_tensor_names is not None:\n output_node_names = ','.join(list(outputs.keys())+additional_output_tensor_names)\n else:\n output_node_names = ','.join(outputs.keys())\n\n frozen_graph_def = freeze_graph_with_def_protos(\n input_graph_def=tf.get_default_graph().as_graph_def(),\n input_saver_def=input_saver_def,\n input_checkpoint=checkpoint_to_use,\n output_node_names=output_node_names,\n restore_op_name='save/restore_all',\n filename_tensor_name='save/Const:0',\n clear_devices=True,\n optimize_graph=optimize_graph,\n initializer_nodes='')\n _write_frozen_graph(frozen_graph_path, frozen_graph_def)\n _write_saved_model(saved_model_path, frozen_graph_def,\n placeholder_tensor, outputs)\n\n\ndef export_inference_graph(input_type,\n pipeline_config,\n trained_checkpoint_prefix,\n output_directory,\n input_shape=None,\n optimize_graph=True,\n output_collection_name='inference_op',\n additional_output_tensor_names=['SecondStageBoxPredictor/AvgPool']):\n \"\"\"Exports inference graph for the model specified in the pipeline config.\n\n Args:\n input_type: Type of input for the graph. Can be one of [`image_tensor`,\n `tf_example`].\n pipeline_config: pipeline_pb2.TrainAndEvalPipelineConfig proto.\n trained_checkpoint_prefix: Path to the trained checkpoint file.\n output_directory: Path to write outputs.\n input_shape: Sets a fixed shape for an `image_tensor` input. If not\n specified, will default to [None, None, None, 3].\n optimize_graph: Whether to optimize graph using Grappler.\n output_collection_name: Name of collection to add output tensors to.\n If None, does not add output tensors to a collection.\n additional_output_tensor_names: list of additional output\n tensors to include in the frozen graph.\n \"\"\"\n detection_model = model_builder.build(pipeline_config.model,\n is_training=False)\n _export_inference_graph(input_type, detection_model,\n pipeline_config.eval_config.use_moving_averages,\n trained_checkpoint_prefix,\n output_directory, additional_output_tensor_names,\n input_shape, optimize_graph, output_collection_name)\n"
] | [
[
"tensorflow.python.platform.gfile.GFile",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.gfile.MakeDirs",
"tensorflow.map_fn",
"tensorflow.image.decode_image",
"tensorflow.get_default_graph",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.python.pywrap_tensorflow.NewCheckpointReader",
"tensorflow.ConfigProto",
"tensorflow.to_float",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.saved_model.builder.SavedModelBuilder",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.python.client.session.Session",
"tensorflow.saved_model.utils.build_tensor_info",
"tensorflow.add_to_collection",
"tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig",
"tensorflow.saved_model.signature_def_utils.build_signature_def",
"tensorflow.GraphOptions",
"tensorflow.python.training.saver.checkpoint_exists",
"tensorflow.python.training.saver.Saver"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
lydia07/mdsearch | [
"a328e822d6d66869aeefef687887b0a39d4f4512"
] | [
"mdsearch/Similarity/sentence_similarity.py"
] | [
"import torch\nfrom scipy.spatial.distance import cosine\nfrom transformers import BertModel, BertTokenizer\nimport os\n\n\nclass SentenceSimilarity:\n\n def __init__(self, model_path='bert-base-uncased'):\n self.tokenizer = BertTokenizer.from_pretrained(model_path)\n self.model = BertModel.from_pretrained(model_path)\n self.model.eval()\n self.device = torch.device('cuda:0')\n self.model = self.model.to(self.device)\n\n def text_to_tensor(self, text):\n text = text.strip().lower()\n tokens = self.tokenizer.tokenize(text)\n tokens_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n tokens_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_ids)\n tokens_tensor = torch.tensor([tokens_ids])\n return tokens_tensor\n\n def get_embedding(self, sent):\n tokens_tensor = self.text_to_tensor(sent)\n tokens_tensor = tokens_tensor.to(self.device)\n with torch.no_grad():\n output = self.model(tokens_tensor)[0]\n embedding = output[0].mean(dim=0).cpu().numpy()\n return embedding\n\n def similarity(self, emb1, emb2):\n return cosine(emb1, emb2)\n\n\nif __name__ == '__main__':\n\n ss = SentenceSimilarity()\n s1 = 'I am a girl'\n s2 = 'I am a boy'\n s3 = 'Thank you'\n print(\"1\")\n e1 = ss.get_embedding(s1)\n print(type(e1))\n e2 = ss.get_embedding(s2)\n e3 = ss.get_embedding(s3)\n print(\"2\")\n print(1 - ss.similarity(e1, e2))\n print(1 - ss.similarity(e1, e3))\n print(\"3\")\n"
] | [
[
"torch.device",
"torch.no_grad",
"scipy.spatial.distance.cosine",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
ParadoxZW/CIFAR100-PRACTICE | [
"175d9a72fc8e7d79ec3ef8670028d1efe830e5b9"
] | [
"Chanet.py"
] | [
"# just for fun, give channel some meanings about relations\n# between positions.\nfrom modules import *\nfrom torch import tensor\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch import nn\n\n\n\nclass Group(nn.Module):\n \"\"\"\n resblocks with same input and output size.\n \"\"\"\n\n def __init__(self, n, in_channels, in_width):\n super(Group, self).__init__()\n branch1 = [SeResBlock(channels=in_channels) for _ in range(n)]\n self.branch1 = nn.Sequential(*group)\n branch2 = [Channel_Attn(id_dim=in_channels, N=in_width**2) for _ in range(n)]\n self.branch2 = nn.Sequential(*group)\n\n def forward(self, x):\n return torch.cat((self.branch1(x), self.branch2(x)), 1)\n\n\nclass Chanet(nn.Module):\n \"\"\"\n wideresnet for cifar10.\n \"\"\"\n\n def __init__(self, n=6, k=10):\n super(Chanet, self).__init__()\n self.cin = Conv2d(3, 16 * k,\n kernel_size=3, stride=1, padding=1)\n self.fc = nn.Linear(128 * k, 10)\n self.resnet = nn.Sequential(Group(n=n, in_channels=16 * k, in_width=32),\n nn.MaxPool2d(2, stride=2, padding=0),\n Group(n=n, in_channels=32 * k, in_width=16),\n nn.MaxPool2d(2, stride=2, padding=0),\n Group(n=n, in_channels=64 * k, in_width=8))\n self.GlobalAvgPooling = nn.AdaptiveAvgPool2d(1)\n\n def forward(self, x):\n x = self.cin(x)\n x = self.resnet(x)\n x = self.GlobalAvgPooling(x)\n x = self.fc(x.view(x.shape[0], -1))\n # return F.softmax(x, dim=1)\n return x\n"
] | [
[
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.MaxPool2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JingweiToo/Machine-Learning-Regression-Toolbox | [
"77f2b1ee49cf5e5116102197064ce2dc13a23ed0"
] | [
"MLR/nn.py"
] | [
"import numpy as np\r\nfrom sklearn.neural_network import MLPRegressor\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.model_selection import LeaveOneOut\r\nfrom sklearn.metrics import r2_score\r\n\r\n\r\ndef jho(feat, label, opts):\r\n ho = 0.3 # ratio of testing set\r\n hls = 100 # hidden layer size \r\n fun = 'relu' # activation function\r\n max_iter = 100 # maximum iterations\r\n \r\n if 'ho' in opts:\r\n ho = opts['ho']\r\n if 'hls' in opts:\r\n hls = opts['hls']\r\n if 'fun' in opts:\r\n fun = opts['fun']\r\n if 'T' in opts:\r\n max_iter = opts['T']\r\n \r\n # number of instances\r\n num_data = np.size(feat, 0)\r\n label = label.reshape(num_data) # Solve bug\r\n \r\n # prepare data\r\n xtrain, xtest, ytrain, ytest = train_test_split(feat, label, test_size=ho) \r\n # train model\r\n mdl = MLPRegressor(hidden_layer_sizes=(hls,), activation=fun, max_iter=max_iter) \r\n mdl.fit(xtrain, ytrain)\r\n \r\n # prediction\r\n ypred = mdl.predict(xtest)\r\n \r\n # mean square error\r\n mse = np.mean((ytest - ypred) ** 2)\r\n # r2 score\r\n r2 = r2_score(ytest, ypred)\r\n \r\n print(\"Mean Square Error (NN_HO):\", mse)\r\n print(\"R Square Score (NN_HO):\", r2)\r\n \r\n nn = {'mse': mse, 'r2': r2, 'xtest': xtest, 'ytest': ytest, 'ypred': ypred}\r\n \r\n return nn\r\n \r\n\r\ndef jkfold(feat, label, opts):\r\n kfold = 10 # number of k in kfold\r\n hls = 100 # hidden layer size \r\n fun = 'relu' # activation function\r\n max_iter = 100 # maximum iterations\r\n \r\n if 'kfold' in opts:\r\n kfold = opts['kfold']\r\n if 'hls' in opts:\r\n hls = opts['hls']\r\n if 'fun' in opts:\r\n fun = opts['fun']\r\n if 'T' in opts:\r\n max_iter = opts['T']\r\n \r\n # number of instances\r\n num_data = np.size(feat, 0)\r\n # define selected features\r\n x_data = feat\r\n y_data = label.reshape(num_data) # Solve bug\r\n \r\n fold = KFold(n_splits=kfold)\r\n fold.get_n_splits(x_data, y_data)\r\n \r\n ytest2 = []\r\n ypred2 = []\r\n t = 0\r\n for train_idx, test_idx in fold.split(x_data, y_data):\r\n xtrain = x_data[train_idx,:] \r\n ytrain = y_data[train_idx]\r\n xtest = x_data[test_idx,:]\r\n ytest = y_data[test_idx]\r\n # train model\r\n mdl = MLPRegressor(hidden_layer_sizes=(hls,), activation=fun, max_iter=max_iter) \r\n mdl.fit(xtrain, ytrain)\r\n # prediction\r\n ypred = mdl.predict(xtest)\r\n \r\n ytest2 = np.concatenate((ytest2, ytest), axis=0)\r\n ypred2 = np.concatenate((ypred2, ypred), axis=0)\r\n \r\n if t == 0:\r\n xtest2 = xtest\r\n else:\r\n xtest2 = np.concatenate((xtest2, xtest), axis=0)\r\n \r\n t += 1\r\n\r\n # mean square error\r\n mse = np.mean((ytest2 - ypred2) ** 2)\r\n # r2 score\r\n r2 = r2_score(ytest2, ypred2)\r\n \r\n print(\"Mean Square Error (NN_K-fold):\", mse)\r\n print(\"R Square Score (NN_K-fold):\", r2)\r\n \r\n nn = {'mse': mse, 'r2': r2, 'xtest': xtest2, 'ytest': ytest2, 'ypred': ypred2}\r\n \r\n return nn\r\n\r\n\r\ndef jloo(feat, label, opts):\r\n hls = 100 # hidden layer size \r\n fun = 'relu' # activation function\r\n max_iter = 100 # maximum iterations\r\n\r\n if 'hls' in opts:\r\n hls = opts['hls']\r\n if 'fun' in opts:\r\n fun = opts['fun']\r\n if 'T' in opts:\r\n max_iter = opts['T']\r\n \r\n # number of instances\r\n num_data = np.size(feat, 0)\r\n # define selected features\r\n x_data = feat\r\n y_data = label.reshape(num_data) # Solve bug\r\n \r\n loo = LeaveOneOut()\r\n loo.get_n_splits(x_data)\r\n \r\n ytest2 = []\r\n ypred2 = []\r\n t = 0\r\n for train_idx, test_idx in loo.split(x_data):\r\n xtrain = x_data[train_idx,:] \r\n ytrain = y_data[train_idx]\r\n xtest = x_data[test_idx,:]\r\n ytest = y_data[test_idx]\r\n # train model\r\n mdl = MLPRegressor(hidden_layer_sizes=(hls,), activation=fun, max_iter=max_iter) \r\n mdl.fit(xtrain, ytrain)\r\n # prediction\r\n ypred = mdl.predict(xtest)\r\n \r\n ytest2 = np.concatenate((ytest2, ytest), axis=0)\r\n ypred2 = np.concatenate((ypred2, ypred), axis=0)\r\n \r\n if t == 0:\r\n xtest2 = xtest\r\n else:\r\n xtest2 = np.concatenate((xtest2, xtest), axis=0)\r\n \r\n t += 1\r\n \r\n # mean square error\r\n mse = np.mean((ytest2 - ypred2) ** 2)\r\n # r2 score\r\n r2 = r2_score(ytest2, ypred2)\r\n \r\n print(\"Mean Square Error (NN_LOO):\", mse)\r\n print(\"R Square Score (NN_LOO):\", r2)\r\n \r\n nn = {'mse': mse, 'r2': r2, 'xtest': xtest2, 'ytest': ytest2, 'ypred': ypred2}\r\n \r\n return nn\r\n\r\n"
] | [
[
"sklearn.metrics.r2_score",
"sklearn.model_selection.LeaveOneOut",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.KFold",
"numpy.concatenate",
"numpy.size",
"numpy.mean",
"sklearn.neural_network.MLPRegressor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xxelloss/Markov-Learning | [
"56b47f046fcc130b33aeaff7792fd73ee40f0501"
] | [
"Markov_comp.py"
] | [
"# Markov chain comparison class\n# create multiple Markov_learning classes, and conduct comparison\n\nimport numpy as np\nimport Markov_learning as ml\nimport copy\n\nclass Markov_comp(object):\n # attributes\n # it may have multiple Markov_learning objects\n # maximum, 10\n ML=[]\n # how many MLs? for comparison between different evolution schedules.\n num_ML = 0\n # testmode?\n test_mode = 0\n # how many conditions?\n conditions = 0\n # status matrix size\n size = 0\n # current status matrix\n status_t0 = 0\n # total time length\n length = 0\n # matrix for comparison-regression.\n comp_matrix = []\n\n\n \n def __init__(self, conditions, size, length, schedules):\n #initialize\n # test mode, if all -1s\n if conditions == -1 & size == -1 & length == -1 & schedules == -1:\n # test mode, as published\n self.conditions = 3\n self.size = 2\n self.num_ML = 2\n # x = ml.Markov_learning(-1,-1,-1)\n# self.ML.append(x)\n# y = ml.Markov_learning(-2,-2,-2)\n# self.ML.append(y)\n self.ML_test1=copy.copy(ml.Markov_learning(-1,-1,-1))\n self.ML_test2=copy.copy(ml.Markov_learning(-2,-2,-2))\n# self.ML = [ml.Markov_learning(-1,-1,-1),ml.Markov_learning(-2,-2,-2)]\n# self.ML = [ml.Markov_learning() for i in range(2)]\n# self.ML[0] = ml.Markov_learning(-1,-1,-1)\n# self.ML[1] = ml.Markov_learning(-2,-2,-2)\n \n self.test_mode = 1\n self.length = 100\n self.status_t0 = np.zeros((self.size))\n\n\n # testmode\n def test1(self):\n if self.test_mode < 1:\n return -1\n self.ML[0].test1()\n \n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shank2358/NPMMR-Det | [
"869f3f537af9bc656f2bfdfa97ebb95bf70847a7",
"414d148ff2ba5edbe870a8dafb6336845fb9ffbb"
] | [
"model/layers/multiscale_fusion_blocks.py",
"lib/cocoapi_windows/PythonAPI/setup.py"
] | [
"import torch\nimport torch.nn as nn\nfrom ..layers.convolutions import Convolutional, Separable_Conv_dila, Separable_Conv, Deformable_Convolutional\nimport torch.nn.functional as F\nfrom ..layers.attention_blocks import SELayer\n\nclass SPP(nn.Module):\n def __init__(self, depth=512):\n super(SPP,self).__init__()\n self.__maxpool5 = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)\n self.__maxpool9 = nn.MaxPool2d(kernel_size=9, stride=1, padding=4)\n self.__maxpool13 = nn.MaxPool2d(kernel_size=13, stride=1, padding=6)\n self.__outconv = nn.Conv2d(depth * 4, depth, 1, 1)\n\n def forward(self, x):\n maxpool5 = self.__maxpool5(x)\n maxpool9 = self.__maxpool9(x)\n maxpool13 = self.__maxpool13(x)\n cat_maxpool = torch.cat([x, maxpool5, maxpool9, maxpool13], dim=1)\n SPP = self.__outconv(cat_maxpool)\n return SPP\n\nclass SPP_rec(nn.Module):\n def __init__(self, depth=512):\n super(SPP_rec,self).__init__()\n self.__maxpool5 = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)\n self.__maxpool9 = nn.MaxPool2d(kernel_size=9, stride=1, padding=4)\n self.__maxpool13 = nn.MaxPool2d(kernel_size=13, stride=1, padding=6)\n\n self.__maxpool5w = nn.MaxPool2d(kernel_size=(5,1), stride=1, padding=(2,0))\n self.__maxpool5h = nn.MaxPool2d(kernel_size=(1,5), stride=1, padding=(0,2))\n self.__maxpool9w = nn.MaxPool2d(kernel_size=(9,3), stride=1, padding=(4,1))\n self.__maxpool9h = nn.MaxPool2d(kernel_size=(3,9), stride=1, padding=(1,4))\n self.__maxpool13w = nn.MaxPool2d(kernel_size=(13,5), stride=1, padding=(6,2))\n self.__maxpool13h = nn.MaxPool2d(kernel_size=(5,13), stride=1, padding=(2,6))\n\n self.__outconv = nn.Conv2d(depth * 4, depth, 1, 1)\n\n def forward(self, x):\n maxpool5 = self.__maxpool5(x) + self.__maxpool5h(x) + self.__maxpool5w(x)\n maxpool9 = self.__maxpool9(x) + self.__maxpool9h(x) + self.__maxpool9w(x)\n maxpool13 = self.__maxpool13(x) + self.__maxpool13h(x) + self.__maxpool13w(x)\n cat_maxpool = torch.cat([x, maxpool5, maxpool9, maxpool13], dim=1)\n SPP_rec = self.__outconv(cat_maxpool)\n return SPP_rec\n\nclass ASPP_se(nn.Module):\n def __init__(self, in_channel=1280, depth=512):\n super(ASPP_se,self).__init__()\n self.__dilaconv1 = nn.Conv2d(in_channel, depth, 1, 1)\n self.__dilaconv5 = nn.Conv2d(in_channel, depth, 3, 1, padding=2, dilation=2)\n self.__dilaconv9 = nn.Conv2d(in_channel, depth, 3, 1, padding=4, dilation=4)\n self.__dilaconv13 = nn.Conv2d(in_channel, depth, 3, 1, padding=6, dilation=6)\n self.__outconv = nn.Conv2d(depth * 4, depth, 1, 1)\n self.__se = SELayer(depth)\n\n\n def forward(self, x):\n dilaconv1 = self.__dilaconv1(x)\n dilaconv5 = self.__dilaconv5(x)\n dilaconv9 = self.__dilaconv9(x)\n dilaconv13 = self.__dilaconv13(x)\n cat_dilaconv = torch.cat([dilaconv1, dilaconv5, dilaconv9, dilaconv13], dim=1)\n ASPP_se = self.__se(self.__outconv(cat_dilaconv))\n return ASPP_se\n\nclass ASPP(nn.Module):\n def __init__(self, in_channel=1280, depth=512):\n super(ASPP,self).__init__()\n self.__dilaconv1 = nn.Conv2d(in_channel, depth, 1, 1)\n self.__dilaconv5 = nn.Conv2d(in_channel, depth, 3, 1, padding=2, dilation=2)\n self.__dilaconv9 = nn.Conv2d(in_channel, depth, 3, 1, padding=4, dilation=4)\n self.__dilaconv13 = nn.Conv2d(in_channel, depth, 3, 1, padding=6, dilation=6)\n self.__outconv = nn.Conv2d(depth * 4, depth, 1, 1)\n\n def forward(self, x):\n dilaconv1 = self.__dilaconv1(x)\n dilaconv5 = self.__dilaconv5(x)\n dilaconv9 = self.__dilaconv9(x)\n dilaconv13 = self.__dilaconv13(x)\n cat_dilaconv = torch.cat([dilaconv1, dilaconv5, dilaconv9, dilaconv13], dim=1)\n ASPP = self.__outconv(cat_dilaconv)\n return ASPP\n\nclass Sparable_ASPP(nn.Module):\n def __init__(self, in_channel=1280, depth=512):\n super(Sparable_ASPP,self).__init__()\n self.__dilaconv1 = nn.Conv2d(in_channel, depth, 1, 1)\n self.__dilaconv5 = Separable_Conv_dila(in_channel, depth, 1, pad=2, dila=2)\n self.__dilaconv9 = Separable_Conv_dila(in_channel, depth, 1, pad=4, dila=4)\n self.__dilaconv13 = Separable_Conv_dila(in_channel, depth, 1, pad=6, dila=6)\n self.__outconv = nn.Conv2d(depth * 4, depth, 1, 1)\n\n def forward(self, x):\n dilaconv1 = self.__dilaconv1(x)\n dilaconv5 = self.__dilaconv5(x)\n dilaconv9 = self.__dilaconv9(x)\n dilaconv13 = self.__dilaconv13(x)\n cat_dilaconv = torch.cat([dilaconv1, dilaconv5, dilaconv9, dilaconv13], dim=1)\n ASPP = self.__outconv(cat_dilaconv)\n return ASPP\n\nclass Sparable_ASPP_se(nn.Module):\n def __init__(self, in_channel=1024, depth=512):\n super(Sparable_ASPP_se,self).__init__()\n self.__dilaconv1 = Separable_Conv(in_channel, depth, 1)\n self.__dilaconv5 = Separable_Conv_dila(depth, depth, 1, pad=2, dila=2)\n self.__dilaconv9 = Separable_Conv_dila(depth, depth//2, 1, pad=4, dila=4)\n self.__dilaconv13 = Separable_Conv_dila(depth, depth//2, 1, pad=6, dila=6)\n self.__outconv = nn.Conv2d(depth * 3, depth, 1, 1)\n #self.__outconv = Convolutional(filters_in=depth * 3, filters_out=depth, kernel_size=1, stride=1, pad=0, norm='bn', activate='leaky')\n self.__se = SELayer(depth)\n\n def forward(self, x):\n dilaconv1 = self.__dilaconv1(x)\n dilaconv5 = self.__dilaconv5(dilaconv1)\n dilaconv9 = self.__dilaconv9(dilaconv1)\n dilaconv13 = self.__dilaconv13(dilaconv1)\n cat_dilaconv = torch.cat([dilaconv1, dilaconv5, dilaconv9, dilaconv13], dim=1)\n ASPP_se = self.__se(self.__outconv(cat_dilaconv))\n #ASPP_se = self.__outconv(cat_dilaconv)\n return ASPP_se\n\nclass ASFF(nn.Module):\n def __init__(self, level, vis=False):\n super(ASFF, self).__init__()\n self.level = level\n self.dim = [512,256,128]\n self.inter_dim = self.dim[self.level]\n if level == 0:\n self.stride_level_1 = Convolutional(256, self.inter_dim, 3, 2, pad=1, norm='bn', activate='relu6')\n self.stride_level_2 = Convolutional(128, self.inter_dim, 3, 2, pad=1, norm='bn', activate='relu6')\n self.expand = Convolutional(self.inter_dim, 1024, 3, 1, pad=1, norm='bn', activate='relu6')\n elif level == 1:\n self.compress_level_0 = Convolutional(512, self.inter_dim, 1, 1, pad=0, norm='bn', activate='relu6')\n self.stride_level_2 = Convolutional(128, self.inter_dim, 3, 2, pad=1, norm='bn', activate='relu6')\n self.expand = Convolutional(self.inter_dim, 512, 3, 1, pad=1, norm='bn', activate='relu6')\n elif level == 2:\n self.compress_level_0 = Convolutional(512, self.inter_dim, 1, 1, pad=0, norm='bn', activate='relu6')\n self.compress_level_1 = Convolutional(256, self.inter_dim, 1, 1, pad=0, norm='bn', activate='relu6')\n self.expand = Convolutional(self.inter_dim, 256, 3, 1, pad=1, norm='bn', activate='relu6')\n compress_c = 16\n self.weight_level_0 = Convolutional(self.inter_dim, compress_c, 1, 1, pad=0, norm='bn', activate='relu6')\n self.weight_level_1 = Convolutional(self.inter_dim, compress_c, 1, 1, pad=0, norm='bn', activate='relu6')\n self.weight_level_2 = Convolutional(self.inter_dim, compress_c, 1, 1, pad=0, norm='bn', activate='relu6')\n self.weight_levels = nn.Conv2d(compress_c * 3, 3, kernel_size=1, stride=1, padding=0)\n self.vis = vis\n\n def forward(self, x_level_0, x_level_1, x_level_2):\n if self.level == 0:\n level_0_resized = x_level_0\n level_1_resized = self.stride_level_1(x_level_1)\n level_2_downsampled_inter = F.max_pool2d(x_level_2, 3, stride=2, padding=1)\n level_2_resized = self.stride_level_2(level_2_downsampled_inter)\n elif self.level == 1:\n level_0_compressed = self.compress_level_0(x_level_0)\n level_0_resized = F.interpolate(level_0_compressed, scale_factor=2, mode='nearest')\n level_1_resized = x_level_1\n level_2_resized = self.stride_level_2(x_level_2)\n elif self.level == 2:\n level_0_compressed = self.compress_level_0(x_level_0)\n level_0_resized = F.interpolate(level_0_compressed, scale_factor=4, mode='nearest')\n level_1_compressed = self.compress_level_1(x_level_1)\n level_1_resized = F.interpolate(level_1_compressed, scale_factor=2, mode='nearest')\n level_2_resized = x_level_2\n\n level_0_weight_v = self.weight_level_0(level_0_resized)\n level_1_weight_v = self.weight_level_1(level_1_resized)\n level_2_weight_v = self.weight_level_2(level_2_resized)\n levels_weight_v = torch.cat((level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)\n levels_weight = self.weight_levels(levels_weight_v)\n levels_weight = F.softmax(levels_weight, dim=1)\n\n fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + \\\n level_1_resized * levels_weight[:, 1:2, :, :] + \\\n level_2_resized * levels_weight[:, 2:, :, :]\n\n out = self.expand(fused_out_reduced)\n\n if self.vis:\n return out, levels_weight, fused_out_reduced.sum(dim=1)\n else:\n return out\n\nclass ASFF_Mobile(nn.Module):\n def __init__(self, level, vis=False):\n super(ASFF_Mobile, self).__init__()\n self.level = level\n self.dim = [512,256,128]\n self.inter_dim = self.dim[self.level]\n if level == 0:\n self.stride_level_1 = Separable_Conv(256, self.inter_dim, 2)\n self.stride_level_2 = Separable_Conv(128, self.inter_dim, 2)\n self.expand = Separable_Conv(self.inter_dim, 1024, 1)\n elif level == 1:\n self.compress_level_0 = Convolutional(512, self.inter_dim, 1, 1, pad=0, norm='bn', activate='relu6')\n self.stride_level_2 = Separable_Conv(128, self.inter_dim, 2)\n self.expand = Separable_Conv(self.inter_dim, 512, 1)\n elif level == 2:\n self.compress_level_0 = Convolutional(512, self.inter_dim, 1, 1, pad=0, norm='bn', activate='relu6')\n self.compress_level_1 = Convolutional(256, self.inter_dim, 1, 1, pad=0, norm='bn', activate='relu6')\n self.expand = Separable_Conv(self.inter_dim, 256, 1)\n compress_c = 16\n self.weight_level_0 = Convolutional(self.inter_dim, compress_c, 1, 1, pad=0, norm='bn', activate='relu6')\n self.weight_level_1 = Convolutional(self.inter_dim, compress_c, 1, 1, pad=0, norm='bn', activate='relu6')\n self.weight_level_2 = Convolutional(self.inter_dim, compress_c, 1, 1, pad=0, norm='bn', activate='relu6')\n self.weight_levels = nn.Conv2d(compress_c * 3, 3, kernel_size=1, stride=1, padding=0)\n self.vis = vis\n\n def forward(self, x_level_0, x_level_1, x_level_2):\n if self.level == 0:\n level_0_resized = x_level_0\n level_1_resized = self.stride_level_1(x_level_1)\n level_2_downsampled_inter = F.max_pool2d(x_level_2, 3, stride=2, padding=1)\n level_2_resized = self.stride_level_2(level_2_downsampled_inter)\n elif self.level == 1:\n level_0_compressed = self.compress_level_0(x_level_0)\n level_0_resized = F.interpolate(level_0_compressed, scale_factor=2, mode='nearest')\n level_1_resized = x_level_1\n level_2_resized = self.stride_level_2(x_level_2)\n elif self.level == 2:\n level_0_compressed = self.compress_level_0(x_level_0)\n level_0_resized = F.interpolate(level_0_compressed, scale_factor=4, mode='nearest')\n level_1_compressed = self.compress_level_1(x_level_1)\n level_1_resized = F.interpolate(level_1_compressed, scale_factor=2, mode='nearest')\n level_2_resized = x_level_2\n\n level_0_weight_v = self.weight_level_0(level_0_resized)\n level_1_weight_v = self.weight_level_1(level_1_resized)\n level_2_weight_v = self.weight_level_2(level_2_resized)\n levels_weight_v = torch.cat((level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)\n levels_weight = self.weight_levels(levels_weight_v)\n levels_weight = F.softmax(levels_weight, dim=1)\n\n fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + \\\n level_1_resized * levels_weight[:, 1:2, :, :] + \\\n level_2_resized * levels_weight[:, 2:, :, :]\n\n out = self.expand(fused_out_reduced)\n\n if self.vis:\n return out, levels_weight, fused_out_reduced.sum(dim=1)\n else:\n return out\n\nclass FeatureAdaption(nn.Module):\n def __init__(self, in_ch, out_ch, n_anchors):\n super(FeatureAdaption, self).__init__()\n self.sep=False\n self.conv_offset = nn.Conv2d(in_channels=2*n_anchors, out_channels=2*9*n_anchors, groups = n_anchors, kernel_size=1,stride=1,padding=0)\n self.dconv = Deformable_Convolutional(filters_in=in_ch, filters_out=out_ch, kernel_size=3, stride=1, pad=1, groups=n_anchors)\n\n def forward(self, input, wh_pred):\n wh_pred_new = wh_pred.detach()\n offset = self.conv_offset(wh_pred_new)\n out = self.dconv(input, offset)\n return out\n\nclass Features_Fusion(nn.Module):\n def __init__(self, in_channels, out_channels, r=16):\n super(Features_Fusion,self).__init__()\n self.out_channels = out_channels\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.conv_fc1 = Convolutional(in_channels, in_channels // r, kernel_size=1, stride=1, pad=0, norm='bn', activate='leaky')\n self.conv_fc2 = nn.Conv2d(in_channels // r, out_channels * 2, kernel_size=1, padding=0, bias=False)\n self.softmax = nn.Softmax(dim=2)\n\n\n def forward(self, x1, x2):\n batch_size = x1.size(0)\n x_mix = torch.add(x1,x2) # 逐元素相加生成 混合特征U\n x_avg = self.avg_pool(x_mix)\n x_fcout = self.conv_fc2(self.conv_fc1(x_avg)) # 先降维,后升维,结果中前一半通道值为a,后一半为b\n x_reshape = x_fcout.reshape(batch_size, self.out_channels, 2, -1) # 调整形状,变为两个全连接层的值\n x_softmax = self.softmax(x_reshape) # 使得两个全连接层对应位置进行softmax\n w1 = x_softmax[:, :, 0:1,:] #将tensor按照指定维度切分成2个tensor块\n w2 = x_softmax[:, :, 1:2,:]\n out = x1*w1 + x2*w2 # 两个加权后的特征 逐元素相加\n return out",
"from distutils.core import setup\nfrom Cython.Build import cythonize\nfrom distutils.extension import Extension\nimport numpy as np\n\n# To install and compile to your anaconda/python site-packages, simply run:\n# $ pip install git+https://github.com/philferriere/cocoapi.git#subdirectory=PythonAPI\n# Note that the original compile flags below are GCC flags unsupported by the Visual C++ 2015 build tools.\n# They can safely be removed.\n\next_modules = [\n Extension(\n 'pycocotools._mask',\n sources=['../common/maskApi.c', 'pycocotools/_mask.pyx'],\n include_dirs = [np.get_include(), '../common'],\n extra_compile_args=[] # originally was ['-std=c99'],\n )\n]\n\nsetup(name='pycocotools',\n packages=['pycocotools'],\n package_dir = {'pycocotools': 'pycocotools'},\n version='2.0',\n ext_modules=\n cythonize(ext_modules)\n )\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.functional.softmax",
"torch.add",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.interpolate",
"torch.nn.functional.max_pool2d"
],
[
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
babak2520/ml-io | [
"d79a895c3fe5e10f0f832cfdcee5a73058abb7c7"
] | [
"src/mlio-py/mlio/integ/scipy.py"
] | [
"# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport numpy as np\n\nfrom mlio._core import CooTensor\nfrom scipy.sparse import coo_matrix\n\n\ndef to_coo_matrix(tensor):\n \"\"\"\n Converts the specified Tensor to a ``coo_matrix``.\n \"\"\"\n\n if not isinstance(tensor, CooTensor):\n raise ValueError(\"The Tensor must be an Instance of CooTensor.\")\n\n s = tensor.shape\n\n if len(s) > 2:\n raise ValueError(\"Only one- and two-dimensional COO tensors are \"\n \"supported.\")\n\n if len(s) == 1:\n s = (1,) + s\n\n data = np.array(tensor.data, copy=False)\n rows = np.array(tensor.indices(0), copy=False)\n cols = np.array(tensor.indices(1), copy=False)\n\n return coo_matrix((data, (rows, cols)), s, copy=True)\n\n\ndef to_tensor(mtx):\n \"\"\"\n Converts the specified ``coo_matrix`` to a Tensor.\n \"\"\"\n\n if not isinstance(mtx, coo_matrix):\n raise ValueError(\"Only coo_matrix is supported.\")\n\n rows = mtx.row\n cols = mtx.col\n\n rows = rows.astype(np.int64, copy=True)\n cols = cols.astype(np.int64, copy=True)\n\n return CooTensor(mtx.shape, mtx.data, [rows, cols], copy=False)\n"
] | [
[
"scipy.sparse.coo_matrix",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
MothVine/DESC | [
"8f18ca63b34dad07ec67a4d43945d39287b303b8",
"8f18ca63b34dad07ec67a4d43945d39287b303b8",
"8f18ca63b34dad07ec67a4d43945d39287b303b8"
] | [
"tests/test_configuration.py",
"desc/vmec.py",
"desc/equilibrium.py"
] | [
"import numpy as np\nimport pytest\nimport unittest\nfrom desc.equilibrium import Equilibrium, EquilibriaFamily\nfrom desc.grid import ConcentricGrid\nfrom desc.profiles import PowerSeriesProfile, SplineProfile\nfrom desc.geometry import (\n FourierRZCurve,\n FourierRZToroidalSurface,\n ZernikeRZToroidalSection,\n)\n\n\nclass TestConstructor(unittest.TestCase):\n def test_defaults(self):\n\n eq = Equilibrium()\n\n self.assertEqual(eq.spectral_indexing, \"ansi\")\n self.assertEqual(eq.NFP, 1)\n self.assertEqual(eq.L, 1)\n self.assertEqual(eq.M, 1)\n self.assertEqual(eq.N, 0)\n self.assertEqual(eq.sym, False)\n self.assertTrue(eq.surface.eq(FourierRZToroidalSurface()))\n self.assertIsInstance(eq.pressure, PowerSeriesProfile)\n np.testing.assert_allclose(eq.p_l, [0])\n self.assertIsInstance(eq.iota, PowerSeriesProfile)\n np.testing.assert_allclose(eq.i_l, [0])\n\n def test_supplied_objects(self):\n\n pressure = SplineProfile([1, 2, 3])\n iota = SplineProfile([2, 3, 4])\n surface = ZernikeRZToroidalSection(spectral_indexing=\"ansi\")\n axis = FourierRZCurve([-1, 10, 1], [1, 0, -1], NFP=2)\n\n eq = Equilibrium(pressure=pressure, iota=iota, surface=surface, axis=axis)\n\n self.assertTrue(eq.pressure.eq(pressure))\n self.assertTrue(eq.iota.eq(iota))\n self.assertTrue(eq.surface.eq(surface))\n self.assertTrue(eq.axis.eq(axis))\n self.assertEqual(eq.spectral_indexing, \"ansi\")\n self.assertEqual(eq.NFP, 2)\n\n surface2 = FourierRZToroidalSurface(NFP=3)\n eq2 = Equilibrium(surface=surface2)\n self.assertEqual(eq2.NFP, 3)\n self.assertEqual(eq2.axis.NFP, 3)\n\n eq3 = Equilibrium(surface=surface, axis=None)\n np.testing.assert_allclose(eq3.axis.R_n, [10])\n\n def test_dict(self):\n\n inputs = {\n \"L\": 4,\n \"M\": 2,\n \"N\": 2,\n \"NFP\": 3,\n \"sym\": False,\n \"spectral_indexing\": \"ansi\",\n \"surface\": np.array(\n [[0, 0, 0, 10, 0], [0, 1, 0, 1, 1], [0, -1, 1, 0.1, 0.1]]\n ),\n \"axis\": np.array([[0, 10, 0]]),\n \"pressure\": np.array([[0, 10], [2, 5]]),\n \"iota\": np.array([[0, 1], [2, 3]]),\n }\n eq = Equilibrium(**inputs)\n\n self.assertEqual(eq.L, 4)\n self.assertEqual(eq.M, 2)\n self.assertEqual(eq.N, 2)\n self.assertEqual(eq.NFP, 3)\n self.assertEqual(eq.spectral_indexing, \"ansi\")\n np.testing.assert_allclose(eq.p_l, [10, 0, 5])\n np.testing.assert_allclose(eq.i_l, [1, 0, 3])\n self.assertIsInstance(eq.surface, FourierRZToroidalSurface)\n np.testing.assert_allclose(\n eq.Rb_lmn,\n [\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 10.0,\n 1.0,\n 0.0,\n 0.0,\n 0.1,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n )\n np.testing.assert_allclose(\n eq.Zb_lmn,\n [\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 1.0,\n 0.0,\n 0.0,\n 0.1,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ],\n )\n\n inputs[\"surface\"] = np.array([[0, 0, 0, 10, 0], [1, 1, 0, 1, 1]])\n eq = Equilibrium(**inputs)\n self.assertEqual(eq.bdry_mode, \"poincare\")\n np.testing.assert_allclose(\n eq.Rb_lmn, [10.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n )\n\n def test_asserts(self):\n\n with pytest.raises(AssertionError):\n eq = Equilibrium(L=3.4)\n with pytest.raises(AssertionError):\n eq = Equilibrium(M=3.4)\n with pytest.raises(AssertionError):\n eq = Equilibrium(N=3.4)\n with pytest.raises(AssertionError):\n eq = Equilibrium(NFP=3.4j)\n with pytest.raises(ValueError):\n eq = Equilibrium(surface=np.array([[1, 1, 1, 10, 2]]))\n with pytest.raises(TypeError):\n eq = Equilibrium(surface=FourierRZCurve())\n with pytest.raises(TypeError):\n eq = Equilibrium(axis=2)\n with pytest.raises(ValueError):\n eq = Equilibrium(surface=FourierRZToroidalSurface(NFP=1), NFP=2)\n with pytest.raises(TypeError):\n eq = Equilibrium(pressure=\"abc\")\n with pytest.raises(TypeError):\n eq = Equilibrium(iota=\"def\")\n\n def test_supplied_coeffs(self):\n\n R_lmn = np.random.random(3)\n Z_lmn = np.random.random(3)\n L_lmn = np.random.random(3)\n eq = Equilibrium(R_lmn=R_lmn, Z_lmn=Z_lmn, L_lmn=L_lmn)\n np.testing.assert_allclose(R_lmn, eq.R_lmn)\n np.testing.assert_allclose(Z_lmn, eq.Z_lmn)\n np.testing.assert_allclose(L_lmn, eq.L_lmn)\n\n with pytest.raises(ValueError):\n eq = Equilibrium(L=4, R_lmn=R_lmn)\n\n\nclass TestInitialGuess(unittest.TestCase):\n def test_default_set(self):\n eq = Equilibrium()\n eq.set_initial_guess()\n np.testing.assert_allclose(eq.compute(\"V\")[\"V\"], 2 * 10 * np.pi * np.pi * 1 * 1)\n del eq._axis\n eq.set_initial_guess()\n np.testing.assert_allclose(eq.compute(\"V\")[\"V\"], 2 * 10 * np.pi * np.pi * 1 * 1)\n\n def test_errors(self):\n\n eq = Equilibrium()\n with pytest.raises(ValueError):\n eq.set_initial_guess(1, \"a\", 4, 5, 6)\n with pytest.raises(ValueError):\n eq.set_initial_guess(1, 2)\n with pytest.raises(ValueError):\n eq.set_initial_guess(eq, eq.surface)\n with pytest.raises(TypeError):\n eq.set_initial_guess(eq.surface, [1, 2, 3])\n del eq._surface\n with pytest.raises(ValueError):\n eq.set_initial_guess()\n\n with pytest.raises(ValueError):\n eq.set_initial_guess(\"path\", 3)\n with pytest.raises(ValueError):\n eq.set_initial_guess(\"path\", \"hdf5\")\n with pytest.raises(ValueError):\n eq.surface = eq.get_surface_at(rho=1)\n eq.change_resolution(2, 2, 2)\n eq._initial_guess_surface(eq.R_basis, eq.R_lmn, eq.R_basis)\n with pytest.raises(ValueError):\n eq._initial_guess_surface(\n eq.R_basis, eq.surface.R_lmn, eq.surface.R_basis, mode=\"foo\"\n )\n\n def test_guess_from_other(self):\n\n eq1 = Equilibrium(L=4, M=2)\n eq2 = Equilibrium(L=2, M=1)\n eq2.set_initial_guess(eq1)\n\n eq2.change_resolution(L=4, M=2)\n np.testing.assert_allclose(eq1.R_lmn, eq2.R_lmn)\n np.testing.assert_allclose(eq1.Z_lmn, eq2.Z_lmn)\n\n def test_guess_from_file(self):\n\n eq1 = Equilibrium(L=24, M=12, sym=True, spectral_indexing=\"fringe\")\n path = \"./tests/inputs/SOLOVEV_output.h5\"\n eq1.set_initial_guess(path)\n eq2 = EquilibriaFamily.load(path)\n\n np.testing.assert_allclose(eq1.R_lmn, eq2[-1].R_lmn)\n np.testing.assert_allclose(eq1.Z_lmn, eq2[-1].Z_lmn)\n\n def test_guess_from_surface(self):\n\n eq = Equilibrium()\n surface = FourierRZToroidalSurface()\n # turn the circular cross section into an elipse w AR=2\n surface.set_coeffs(m=-1, n=0, R=None, Z=2)\n # move z axis up to 0.5 for no good reason\n axis = FourierRZCurve([0, 10, 0], [0, 0.5, 0])\n eq.set_initial_guess(surface, axis)\n np.testing.assert_allclose(eq.compute(\"V\")[\"V\"], 2 * 10 * np.pi * np.pi * 2 * 1)\n\n def test_guess_from_surface2(self):\n\n eq = Equilibrium()\n # specify an interior flux surface\n surface = FourierRZToroidalSurface(rho=0.5)\n eq.set_initial_guess(surface)\n np.testing.assert_allclose(\n eq.compute(\"V\")[\"V\"], 2 * 10 * np.pi * np.pi * 2 ** 2\n )\n\n def test_guess_from_points(self):\n eq = Equilibrium(L=3, M=3, N=1)\n # these are just the default circular tokamak with a random normal\n # perturbation with std=0.03, fixed for repeatability\n eq.R_lmn = np.array(\n [\n 3.94803875e-02,\n 7.27321367e-03,\n -8.88095373e-03,\n 1.47523628e-02,\n 1.18518478e-02,\n -2.61657165e-02,\n -1.27473081e-02,\n 3.26441003e-02,\n 4.47427817e-03,\n 1.24734770e-02,\n 9.99231496e00,\n -2.74400311e-03,\n 1.00447777e00,\n 3.22285107e-02,\n 1.16571026e-02,\n -3.15868165e-03,\n -6.77657739e-04,\n -1.97894171e-02,\n 2.13535622e-02,\n -2.19703593e-02,\n 5.15586341e-02,\n 3.39651128e-02,\n -1.66077603e-02,\n -2.20514583e-02,\n -3.13335598e-02,\n 7.16090760e-02,\n -1.30064709e-03,\n -4.00687024e-02,\n 5.25583677e-02,\n 4.04325991e-03,\n ]\n )\n eq.Z_lmn = np.array(\n [\n 2.58179465e-02,\n -6.58108612e-03,\n 3.67459870e-02,\n 9.32236734e-04,\n -2.07982449e-03,\n -1.67700140e-02,\n 2.56951390e-02,\n -4.49230035e-04,\n 9.93325894e-02,\n 4.28162330e-03,\n 9.39812383e-03,\n 9.95829268e-01,\n 4.14468984e-02,\n -3.10725101e-02,\n -1.42026152e-02,\n -2.20423483e-02,\n -1.37389716e-02,\n -1.31592276e-02,\n -3.13922472e-02,\n 1.88145630e-03,\n 2.72255620e-02,\n -9.42746650e-03,\n 2.15264372e-02,\n 2.43549268e-02,\n 5.33383228e-02,\n 1.65948808e-02,\n 1.45908076e-03,\n 1.85101895e-02,\n 1.25967662e-02,\n -2.07374046e-02,\n ]\n )\n grid = ConcentricGrid(L=6, M=6, N=2, node_pattern=\"ocs\")\n coords = eq.compute(\"R\", grid)\n coords = eq.compute(\"lambda\", grid, coords)\n eq2 = Equilibrium(L=3, M=3, N=1)\n eq2.set_initial_guess(grid.nodes, coords[\"R\"], coords[\"Z\"], coords[\"lambda\"])\n np.testing.assert_allclose(eq.R_lmn, eq2.R_lmn, atol=1e-8)\n np.testing.assert_allclose(eq.Z_lmn, eq2.Z_lmn, atol=1e-8)\n np.testing.assert_allclose(eq.L_lmn, eq2.L_lmn, atol=1e-8)\n\n\nclass TestSurfaces(unittest.TestCase):\n def test_get_rho_surface(self):\n eq = Equilibrium()\n surf = eq.get_surface_at(rho=0.5)\n print(\"eq\", eq)\n\n print(\"surf\", surf)\n\n np.testing.assert_allclose(\n surf.compute_surface_area(), 4 * np.pi ** 2 * 10 * 0.5\n )\n assert surf.rho == 0.5\n\n def test_get_zeta_surface(self):\n eq = Equilibrium()\n surf = eq.get_surface_at(zeta=np.pi)\n np.testing.assert_allclose(surf.compute_surface_area(), np.pi * (1.0) ** 2)\n assert surf.zeta == np.pi\n\n def test_get_theta_surface(self):\n eq = Equilibrium()\n with pytest.raises(NotImplementedError):\n surf = eq.get_surface_at(theta=np.pi)\n\n def test_asserts(self):\n eq = Equilibrium()\n with pytest.raises(ValueError):\n surf = eq.get_surface_at(rho=1, zeta=2)\n with pytest.raises(AssertionError):\n surf = eq.get_surface_at(rho=1.2)\n",
"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom netCDF4 import Dataset, stringtochar\nfrom scipy import optimize, interpolate, integrate\n\nfrom desc.backend import sign\nfrom desc.utils import Timer, area_difference\nfrom desc.grid import Grid, LinearGrid\nfrom desc.basis import DoubleFourierSeries\nfrom desc.transform import Transform\nfrom desc.profiles import PowerSeriesProfile\nfrom desc.equilibrium import Equilibrium\nfrom desc.boundary_conditions import LCFSConstraint\nfrom desc.vmec_utils import (\n ptolemy_identity_fwd,\n ptolemy_identity_rev,\n fourier_to_zernike,\n zernike_to_fourier,\n)\n\n\nclass VMECIO:\n \"\"\"Performs input from VMEC netCDF files to DESC Equilibrium and vice-versa.\"\"\"\n\n @classmethod\n def load(cls, path, L=-1, M=-1, N=-1, spectral_indexing=\"fringe\"):\n \"\"\"Load a VMEC netCDF file as a Equilibrium.\n\n Parameters\n ----------\n path : str\n File path of input data.\n L : int, optional\n Radial resolution. Default determined by index.\n M : int, optional\n Poloidal resolution. Default = MPOL-1 from VMEC solution.\n N : int, optional\n Toroidal resolution. Default = NTOR from VMEC solution.\n spectral_indexing : str, optional\n Type of Zernike indexing scheme to use. (Default = ``'fringe'``)\n\n Returns\n -------\n eq: Equilibrium\n Equilibrium that resembles the VMEC data.\n\n \"\"\"\n file = Dataset(path, mode=\"r\")\n inputs = {}\n\n # parameters\n inputs[\"Psi\"] = float(file.variables[\"phi\"][-1])\n inputs[\"NFP\"] = int(file.variables[\"nfp\"][0])\n inputs[\"M\"] = M if M > 0 else int(file.variables[\"mpol\"][0] - 1)\n inputs[\"N\"] = N if N >= 0 else int(file.variables[\"ntor\"][0])\n inputs[\"spectral_indexing\"] = spectral_indexing\n default_L = {\n \"ansi\": inputs[\"M\"],\n \"fringe\": 2 * inputs[\"M\"],\n }\n inputs[\"L\"] = L if L >= 0 else default_L[inputs[\"spectral_indexing\"]]\n\n # data\n xm = file.variables[\"xm\"][:].filled()\n xn = file.variables[\"xn\"][:].filled() / inputs[\"NFP\"]\n rmnc = file.variables[\"rmnc\"][:].filled()\n zmns = file.variables[\"zmns\"][:].filled()\n lmns = file.variables[\"lmns\"][:].filled()\n try:\n rmns = file.variables[\"rmns\"][:].filled()\n zmnc = file.variables[\"zmnc\"][:].filled()\n lmnc = file.variables[\"lmnc\"][:].filled()\n inputs[\"sym\"] = False\n except:\n rmns = np.zeros_like(rmnc)\n zmnc = np.zeros_like(zmns)\n lmnc = np.zeros_like(lmns)\n inputs[\"sym\"] = True\n\n # profiles\n preset = file.dimensions[\"preset\"].size\n p0 = file.variables[\"presf\"][0] / file.variables[\"am\"][0]\n inputs[\"pressure\"] = np.zeros((preset, 2))\n inputs[\"pressure\"][:, 0] = np.arange(0, 2 * preset, 2)\n inputs[\"pressure\"][:, 1] = file.variables[\"am\"][:] * p0\n inputs[\"iota\"] = np.zeros((preset, 2))\n inputs[\"iota\"][:, 0] = np.arange(0, 2 * preset, 2)\n inputs[\"iota\"][:, 1] = file.variables[\"ai\"][:]\n\n file.close\n\n # boundary\n m, n, Rb_lmn = ptolemy_identity_fwd(xm, xn, s=rmns[-1, :], c=rmnc[-1, :])\n m, n, Zb_lmn = ptolemy_identity_fwd(xm, xn, s=zmns[-1, :], c=zmnc[-1, :])\n inputs[\"surface\"] = np.vstack((np.zeros_like(m), m, n, Rb_lmn, Zb_lmn)).T\n\n # initialize Equilibrium\n eq = Equilibrium(**inputs)\n\n # R\n m, n, R_mn = ptolemy_identity_fwd(xm, xn, s=rmns, c=rmnc)\n eq.R_lmn = fourier_to_zernike(m, n, R_mn, eq.R_basis)\n\n # Z\n m, n, Z_mn = ptolemy_identity_fwd(xm, xn, s=zmns, c=zmnc)\n eq.Z_lmn = fourier_to_zernike(m, n, Z_mn, eq.Z_basis)\n\n # lambda\n m, n, L_mn = ptolemy_identity_fwd(xm, xn, s=lmns, c=lmnc)\n eq.L_lmn = fourier_to_zernike(m, n, L_mn, eq.L_basis)\n\n BC = eq.surface.get_constraint(\n eq.R_basis,\n eq.Z_basis,\n eq.L_basis,\n )\n eq.x = BC.make_feasible(eq.x)\n\n return eq\n\n @classmethod\n def save(cls, eq, path, surfs=128, verbose=1):\n \"\"\"Save an Equilibrium as a netCDF file in the VMEC format.\n\n Parameters\n ----------\n eq : Equilibrium\n Equilibrium to save.\n path : str\n File path of output data.\n surfs: int\n Number of flux surfaces to interpolate at (Default = 128).\n verbose: int\n Level of output (Default = 1).\n * 0: no output\n * 1: status of quantities computed\n * 2: as above plus timing information\n\n Returns\n -------\n None\n\n \"\"\"\n timer = Timer()\n timer.start(\"Total time\")\n\n \"\"\" VMEC netCDF file is generated in VMEC2000/Sources/Input_Output/wrout.f\n see lines 300+ for full list of included variables\n \"\"\"\n file = Dataset(path, mode=\"w\", format=\"NETCDF3_64BIT_OFFSET\")\n\n Psi = eq.Psi\n NFP = eq.NFP\n M = eq.M\n N = eq.N\n M_nyq = M + 4\n N_nyq = N + 2 if N > 0 else 0\n\n # VMEC radial coordinate: s = rho^2 = Psi / Psi(LCFS)\n s_full = np.linspace(0, 1, surfs)\n s_half = s_full[0:-1] + 0.5 / (surfs - 1)\n r_full = np.sqrt(s_full)\n r_half = np.sqrt(s_half)\n\n # dimensions\n file.createDimension(\"radius\", surfs) # number of flux surfaces\n file.createDimension(\n \"mn_mode\", (2 * N + 1) * M + N + 1\n ) # number of Fourier modes\n file.createDimension(\n \"mn_mode_nyq\", (2 * N_nyq + 1) * M_nyq + N_nyq + 1\n ) # number of Nyquist Fourier modes\n file.createDimension(\"n_tor\", N + 1) # number of toroidal Fourier modes\n file.createDimension(\"preset\", 21) # dimension of profile inputs\n file.createDimension(\"ndfmax\", 101) # used for am_aux & ai_aux\n file.createDimension(\"time\", 100) # used for fsqrt & wdot\n file.createDimension(\"dim_00001\", 1)\n file.createDimension(\"dim_00020\", 20)\n file.createDimension(\"dim_00100\", 100)\n file.createDimension(\"dim_00200\", 200)\n preset = file.dimensions[\"preset\"].size\n\n # parameters\n timer.start(\"parameters\")\n if verbose > 0:\n print(\"Saving parameters\")\n\n version_ = file.createVariable(\"version_\", np.float64)\n version_[:] = 9 # VMEC version at the time of this writing\n\n input_extension = file.createVariable(\"input_extension\", \"S1\", (\"dim_00100\",))\n input_extension[:] = stringtochar(\n np.array([\" \" * 100], \"S\" + str(file.dimensions[\"dim_00100\"].size))\n ) # VMEC input filename: input.[input_extension]\n\n mgrid_mode = file.createVariable(\"mgrid_mode\", \"S1\", (\"dim_00001\",))\n mgrid_mode[:] = stringtochar(\n np.array([\"\"], \"S\" + str(file.dimensions[\"dim_00001\"].size))\n )\n\n mgrid_file = file.createVariable(\"mgrid_file\", \"S1\", (\"dim_00200\",))\n mgrid_file[:] = stringtochar(\n np.array([\"none\" + \" \" * 196], \"S\" + str(file.dimensions[\"dim_00200\"].size))\n )\n\n ier_flag = file.createVariable(\"ier_flag\", np.int32)\n ier_flag.long_name = \"error flag (0 = solved equilibrium, 1 = unsolved)\"\n ier_flag[:] = int(not eq.solved)\n\n lfreeb = file.createVariable(\"lfreeb__logical__\", np.int32)\n lfreeb.long_name = \"free boundary logical (0 = fixed boundary)\"\n lfreeb[:] = 0\n\n lrecon = file.createVariable(\"lrecon__logical__\", np.int32)\n lrecon.long_name = \"reconstruction logical (0 = no reconstruction)\"\n lrecon[:] = 0\n\n lrfp = file.createVariable(\"lrfp__logical__\", np.int32)\n lrfp.long_name = \"reverse-field pinch logical (0 = not an RFP)\"\n lrfp[:] = 0\n\n lasym = file.createVariable(\"lasym__logical__\", np.int32)\n lasym.long_name = \"asymmetry logical (0 = stellarator symmetry)\"\n lasym[:] = int(not eq.sym)\n\n nfp = file.createVariable(\"nfp\", np.int32)\n nfp.long_name = \"number of field periods\"\n nfp[:] = NFP\n\n ns = file.createVariable(\"ns\", np.int32)\n ns.long_name = \"number of flux surfaces\"\n ns[:] = surfs\n\n mpol = file.createVariable(\"mpol\", np.int32)\n mpol.long_name = \"number of poloidal Fourier modes\"\n mpol[:] = M + 1\n\n ntor = file.createVariable(\"ntor\", np.int32)\n ntor.long_name = \"number of positive toroidal Fourier modes\"\n ntor[:] = N\n\n mnmax = file.createVariable(\"mnmax\", np.int32)\n mnmax.long_name = \"total number of Fourier modes\"\n mnmax[:] = file.dimensions[\"mn_mode\"].size\n\n xm = file.createVariable(\"xm\", np.float64, (\"mn_mode\",))\n xm.long_name = \"poloidal mode numbers\"\n xm[:] = np.tile(np.linspace(0, M, M + 1), (2 * N + 1, 1)).T.flatten()[\n -file.dimensions[\"mn_mode\"].size :\n ]\n\n xn = file.createVariable(\"xn\", np.float64, (\"mn_mode\",))\n xn.long_name = \"toroidal mode numbers\"\n xn[:] = np.tile(np.linspace(-N, N, 2 * N + 1) * NFP, M + 1)[\n -file.dimensions[\"mn_mode\"].size :\n ]\n\n mnmax_nyq = file.createVariable(\"mnmax_nyq\", np.int32)\n mnmax_nyq.long_name = \"total number of Nyquist Fourier modes\"\n mnmax_nyq[:] = file.dimensions[\"mn_mode_nyq\"].size\n\n xm_nyq = file.createVariable(\"xm_nyq\", np.float64, (\"mn_mode_nyq\",))\n xm_nyq.long_name = \"poloidal Nyquist mode numbers\"\n xm_nyq[:] = np.tile(\n np.linspace(0, M_nyq, M_nyq + 1), (2 * N_nyq + 1, 1)\n ).T.flatten()[-file.dimensions[\"mn_mode_nyq\"].size :]\n\n xn_nyq = file.createVariable(\"xn_nyq\", np.float64, (\"mn_mode_nyq\",))\n xn_nyq.long_name = \"toroidal Nyquist mode numbers\"\n xn_nyq[:] = np.tile(np.linspace(-N_nyq, N_nyq, 2 * N_nyq + 1) * NFP, M_nyq + 1)[\n -file.dimensions[\"mn_mode_nyq\"].size :\n ]\n\n signgs = file.createVariable(\"signgs\", np.int32)\n signgs.long_name = \"sign of coordinate system jacobian\"\n signgs[:] = sign(eq.compute(\"sqrt(g)\", Grid(np.array([[1, 0, 0]])))[\"sqrt(g)\"])\n\n gamma = file.createVariable(\"gamma\", np.float64)\n gamma.long_name = \"compressibility index (0 = pressure prescribed)\"\n gamma[:] = 0\n\n # TODO: add option for saving spline profiles\n power_series = stringtochar(\n np.array(\n [\"power_series\" + \" \" * 8], \"S\" + str(file.dimensions[\"dim_00020\"].size)\n )\n )\n\n pmass_type = file.createVariable(\"pmass_type\", \"S1\", (\"dim_00020\",))\n pmass_type.long_name = \"parameterization of pressure function\"\n pmass_type[:] = power_series\n\n piota_type = file.createVariable(\"piota_type\", \"S1\", (\"dim_00020\",))\n piota_type.long_name = \"parameterization of rotational transform function\"\n piota_type[:] = power_series\n\n pcurr_type = file.createVariable(\"pcurr_type\", \"S1\", (\"dim_00020\",))\n pcurr_type.long_name = \"parameterization of current density function\"\n pcurr_type[:] = power_series\n\n am = file.createVariable(\"am\", np.float64, (\"preset\",))\n am.long_name = \"pressure coefficients\"\n am.units = \"Pa\"\n am[:] = np.zeros((file.dimensions[\"preset\"].size,))\n # only using up to 10th order to avoid poor conditioning\n am[:11] = PowerSeriesProfile.from_values(\n s_full, eq.pressure(r_full), order=10\n ).params\n\n ai = file.createVariable(\"ai\", np.float64, (\"preset\",))\n ai.long_name = \"rotational transform coefficients\"\n ai[:] = np.zeros((file.dimensions[\"preset\"].size,))\n # only using up to 10th order to avoid poor conditioning\n ai[:11] = PowerSeriesProfile.from_values(\n s_full, eq.iota(r_full), order=10\n ).params\n\n ac = file.createVariable(\"ac\", np.float64, (\"preset\",))\n ac.long_name = \"normalized toroidal current density coefficients\"\n ac[:] = np.zeros((file.dimensions[\"preset\"].size,))\n\n presf = file.createVariable(\"presf\", np.float64, (\"radius\",))\n presf.long_name = \"pressure on full mesh\"\n presf.units = \"Pa\"\n presf[:] = eq.pressure(r_full)\n\n pres = file.createVariable(\"pres\", np.float64, (\"radius\",))\n pres.long_name = \"pressure on half mesh\"\n pres.units = \"Pa\"\n pres[0] = 0\n pres[1:] = eq.pressure(r_half)\n\n mass = file.createVariable(\"mass\", np.float64, (\"radius\",))\n mass.long_name = \"mass on half mesh\"\n mass.units = \"Pa\"\n mass[:] = pres[:]\n\n iotaf = file.createVariable(\"iotaf\", np.float64, (\"radius\",))\n iotaf.long_name = \"rotational transform on full mesh\"\n iotaf[:] = eq.iota(r_full)\n\n iotas = file.createVariable(\"iotas\", np.float64, (\"radius\",))\n iotas.long_name = \"rotational transform on half mesh\"\n iotas[0] = 0\n iotas[1:] = eq.iota(r_half)\n\n phi = file.createVariable(\"phi\", np.float64, (\"radius\",))\n phi.long_name = \"toroidal flux\"\n phi.units = \"Wb\"\n phi[:] = np.linspace(0, Psi, surfs)\n\n phipf = file.createVariable(\"phipf\", np.float64, (\"radius\",))\n phipf.long_name = \"d(phi)/ds: toroidal flux derivative\"\n phipf[:] = Psi * np.ones((surfs,))\n\n phips = file.createVariable(\"phips\", np.float64, (\"radius\",))\n phips.long_name = \"d(phi)/ds * -1/2pi: toroidal flux derivative on half mesh\"\n phips[0] = 0\n phips[1:] = -phipf[1:] / (2 * np.pi)\n\n chi = file.createVariable(\"chi\", np.float64, (\"radius\",))\n chi.long_name = \"poloidal flux\"\n chi.units = \"Wb\"\n chi[:] = (\n 2\n * Psi\n * signgs[:]\n * integrate.cumtrapz(r_full * iotaf[:], r_full, initial=0)\n )\n\n chipf = file.createVariable(\"chipf\", np.float64, (\"radius\",))\n chipf.long_name = \"d(chi)/ds: poloidal flux derivative\"\n chipf[:] = phipf[:] * iotaf[:]\n\n Rmajor_p = file.createVariable(\"Rmajor_p\", np.float64)\n Rmajor_p.long_name = \"major radius\"\n Rmajor_p.units = \"m\"\n Rmajor_p[:] = eq.compute(\"R0\")[\"R0\"]\n\n Aminor_p = file.createVariable(\"Aminor_p\", np.float64)\n Aminor_p.long_name = \"minor radius\"\n Aminor_p.units = \"m\"\n Aminor_p[:] = eq.compute(\"a\")[\"a\"]\n\n aspect = file.createVariable(\"aspect\", np.float64)\n aspect.long_name = \"aspect ratio = R_major / A_minor\"\n aspect[:] = eq.compute(\"R0/a\")[\"R0/a\"]\n\n volume_p = file.createVariable(\"volume_p\", np.float64)\n volume_p.long_name = \"plasma volume\"\n volume_p.units = \"m^3\"\n volume_p[:] = eq.compute(\"V\")[\"V\"]\n\n timer.stop(\"parameters\")\n if verbose > 1:\n timer.disp(\"parameters\")\n\n # indepentent variables (exact conversion)\n\n # R axis\n idx = np.where(eq.R_basis.modes[:, 1] == 0)[0]\n R0_n = np.zeros((2 * N + 1,))\n for k in idx:\n (l, m, n) = eq.R_basis.modes[k, :]\n R0_n[n + N] += (-2 * (l % 2) + 1) * eq.R_lmn[k]\n raxis_cc = file.createVariable(\"raxis_cc\", np.float64, (\"n_tor\",))\n raxis_cc.long_name = \"cos(n*p) component of magnetic axis R coordinate\"\n raxis_cc[:] = R0_n[N:]\n if not eq.sym:\n raxis_cs = file.createVariable(\"raxis_cs\", np.float64, (\"n_tor\",))\n raxis_cs.long_name = \"sin(n*p) component of magnetic axis R coordinate\"\n raxis_cs[1:] = R0_n[0:N]\n\n # Z axis\n idx = np.where(eq.Z_basis.modes[:, 1] == 0)[0]\n Z0_n = np.zeros((2 * N + 1,))\n for k in idx:\n (l, m, n) = eq.Z_basis.modes[k, :]\n Z0_n[n + N] += (-2 * (l % 2) + 1) * eq.Z_lmn[k]\n zaxis_cs = file.createVariable(\"zaxis_cs\", np.float64, (\"n_tor\",))\n zaxis_cs.long_name = \"sin(n*p) component of magnetic axis Z coordinate\"\n zaxis_cs[1:] = Z0_n[0:N]\n if not eq.sym:\n zaxis_cc = file.createVariable(\"zaxis_cc\", np.float64, (\"n_tor\",))\n zaxis_cc.long_name = \"cos(n*p) component of magnetic axis Z coordinate\"\n zaxis_cc[1:] = Z0_n[N:]\n\n # R\n timer.start(\"R\")\n if verbose > 0:\n print(\"Saving R\")\n rmnc = file.createVariable(\"rmnc\", np.float64, (\"radius\", \"mn_mode\"))\n rmnc.long_name = \"cos(m*t-n*p) component of cylindrical R, on full mesh\"\n rmnc.units = \"m\"\n if not eq.sym:\n rmns = file.createVariable(\"rmns\", np.float64, (\"radius\", \"mn_mode\"))\n rmns.long_name = \"sin(m*t-n*p) component of cylindrical R, on full mesh\"\n rmns.units = \"m\"\n m, n, x_mn = zernike_to_fourier(eq.R_lmn, basis=eq.R_basis, rho=r_full)\n xm, xn, s, c = ptolemy_identity_rev(m, n, x_mn)\n rmnc[:] = c\n if not eq.sym:\n rmns[:] = s\n timer.stop(\"R\")\n if verbose > 1:\n timer.disp(\"R\")\n\n # Z\n timer.start(\"Z\")\n if verbose > 0:\n print(\"Saving Z\")\n zmns = file.createVariable(\"zmns\", np.float64, (\"radius\", \"mn_mode\"))\n zmns.long_name = \"sin(m*t-n*p) component of cylindrical Z, on full mesh\"\n zmns.units = \"m\"\n if not eq.sym:\n zmnc = file.createVariable(\"zmnc\", np.float64, (\"radius\", \"mn_mode\"))\n zmnc.long_name = \"cos(m*t-n*p) component of cylindrical Z, on full mesh\"\n zmnc.units = \"m\"\n m, n, x_mn = zernike_to_fourier(eq.Z_lmn, basis=eq.Z_basis, rho=r_full)\n xm, xn, s, c = ptolemy_identity_rev(m, n, x_mn)\n zmns[:] = s\n if not eq.sym:\n zmnc[:] = c\n timer.stop(\"Z\")\n if verbose > 1:\n timer.disp(\"Z\")\n\n # lambda\n timer.start(\"lambda\")\n if verbose > 0:\n print(\"Saving lambda\")\n lmns = file.createVariable(\"lmns\", np.float64, (\"radius\", \"mn_mode\"))\n lmns.long_name = \"sin(m*t-n*p) component of lambda, on half mesh\"\n lmns.units = \"rad\"\n if not eq.sym:\n lmnc = file.createVariable(\"lmnc\", np.float64, (\"radius\", \"mn_mode\"))\n lmnc.long_name = \"cos(m*t-n*p) component of lambda, on half mesh\"\n lmnc.units = \"rad\"\n m, n, x_mn = zernike_to_fourier(eq.L_lmn, basis=eq.L_basis, rho=r_half)\n xm, xn, s, c = ptolemy_identity_rev(m, n, x_mn)\n lmns[0, :] = 0\n lmns[1:, :] = s\n if not eq.sym:\n lmnc[0, :] = 0\n lmnc[1:, :] = c\n timer.stop(\"lambda\")\n if verbose > 1:\n timer.disp(\"lambda\")\n\n # derived quantities (approximate conversion)\n\n grid = LinearGrid(M=2 * M_nyq + 1, N=2 * N_nyq + 1, NFP=NFP, rho=1)\n coords = eq.compute(\"R\", grid)\n if eq.sym:\n sin_basis = DoubleFourierSeries(M=M_nyq, N=N_nyq, NFP=NFP, sym=\"sin\")\n cos_basis = DoubleFourierSeries(M=M_nyq, N=N_nyq, NFP=NFP, sym=\"cos\")\n sin_transform = Transform(\n grid=grid, basis=sin_basis, build=False, build_pinv=True\n )\n cos_transform = Transform(\n grid=grid, basis=cos_basis, build=False, build_pinv=True\n )\n else:\n full_basis = DoubleFourierSeries(M=M_nyq, N=N_nyq, NFP=NFP, sym=None)\n full_transform = Transform(\n grid=grid, basis=full_basis, build=False, build_pinv=True\n )\n\n rmin_surf = file.createVariable(\"rmin_surf\", np.float64)\n rmin_surf.long_name = \"minimum R coordinate range\"\n rmin_surf.units = \"m\"\n rmin_surf[:] = np.amin(coords[\"R\"])\n\n rmax_surf = file.createVariable(\"rmax_surf\", np.float64)\n rmax_surf.long_name = \"maximum R coordinate range\"\n rmax_surf.units = \"m\"\n rmax_surf[:] = np.amax(coords[\"R\"])\n\n zmax_surf = file.createVariable(\"zmax_surf\", np.float64)\n zmax_surf.long_name = \"maximum Z coordinate range\"\n zmax_surf.units = \"m\"\n zmax_surf[:] = np.amax(np.abs(coords[\"Z\"]))\n\n # half grid quantities\n half_grid = LinearGrid(M=2 * M_nyq + 1, N=2 * N_nyq + 1, NFP=NFP, rho=r_half)\n data_half_grid = eq.compute(\"|B|\", half_grid)\n data_half_grid = eq.compute(\"J\", half_grid, data=data_half_grid)\n # Jacobian\n timer.start(\"Jacobian\")\n if verbose > 0:\n print(\"Saving Jacobian\")\n gmnc = file.createVariable(\"gmnc\", np.float64, (\"radius\", \"mn_mode_nyq\"))\n gmnc.long_name = \"cos(m*t-n*p) component of Jacobian, on half mesh\"\n gmnc.units = \"m\"\n m = cos_basis.modes[:, 1]\n n = cos_basis.modes[:, 2]\n if not eq.sym:\n gmns = file.createVariable(\"gmns\", np.float64, (\"radius\", \"mn_mode_nyq\"))\n gmns.long_name = \"sin(m*t-n*p) component of Jacobian, on half mesh\"\n gmns.units = \"m\"\n m = full_basis.modes[:, 1]\n n = full_basis.modes[:, 2]\n x_mn = np.zeros((surfs - 1, m.size))\n data = (\n data_half_grid[\"sqrt(g)\"]\n .reshape(half_grid.M, half_grid.L, half_grid.N, order=\"F\")\n .transpose((1, 0, 2))\n .reshape((half_grid.L, -1))\n )\n if eq.sym:\n x_mn[:, :] = cos_transform.fit(data.T).T\n else:\n x_mn[:, :] = full_transform.fit(data.T).T\n xm, xn, s, c = ptolemy_identity_rev(m, n, x_mn)\n gmnc[0, :] = 0\n gmnc[1:, :] = c\n if not eq.sym:\n gmns[0, :] = 0\n gmns[1:, :] = s\n timer.stop(\"Jacobian\")\n if verbose > 1:\n timer.disp(\"Jacobian\")\n\n # |B|\n timer.start(\"|B|\")\n if verbose > 0:\n print(\"Saving |B|\")\n bmnc = file.createVariable(\"bmnc\", np.float64, (\"radius\", \"mn_mode_nyq\"))\n bmnc.long_name = \"cos(m*t-n*p) component of |B|, on half mesh\"\n bmnc.units = \"T\"\n m = cos_basis.modes[:, 1]\n n = cos_basis.modes[:, 2]\n if not eq.sym:\n bmns = file.createVariable(\"bmns\", np.float64, (\"radius\", \"mn_mode_nyq\"))\n bmns.long_name = \"sin(m*t-n*p) component of |B|, on half mesh\"\n bmns.units = \"T\"\n m = full_basis.modes[:, 1]\n n = full_basis.modes[:, 2]\n x_mn = np.zeros((surfs - 1, m.size))\n data = (\n data_half_grid[\"|B|\"]\n .reshape(half_grid.M, half_grid.L, half_grid.N, order=\"F\")\n .transpose((1, 0, 2))\n .reshape((half_grid.L, -1))\n )\n if eq.sym:\n x_mn[:, :] = cos_transform.fit(data.T).T\n else:\n x_mn[:, :] = full_transform.fit(data.T).T\n xm, xn, s, c = ptolemy_identity_rev(m, n, x_mn)\n bmnc[0, :] = 0\n bmnc[1:, :] = c\n if not eq.sym:\n bmns[0, :] = 0\n bmns[1:, :] = s\n timer.stop(\"|B|\")\n if verbose > 1:\n timer.disp(\"|B|\")\n\n # B^theta\n timer.start(\"B^theta\")\n if verbose > 0:\n print(\"Saving B^theta\")\n bsupumnc = file.createVariable(\n \"bsupumnc\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n bsupumnc.long_name = \"cos(m*t-n*p) component of B^theta, on half mesh\"\n bsupumnc.units = \"T/m\"\n m = cos_basis.modes[:, 1]\n n = cos_basis.modes[:, 2]\n if not eq.sym:\n bsupumns = file.createVariable(\n \"bsupumns\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n bsupumns.long_name = \"sin(m*t-n*p) component of B^theta, on half mesh\"\n bsupumns.units = \"T/m\"\n m = full_basis.modes[:, 1]\n n = full_basis.modes[:, 2]\n x_mn = np.zeros((surfs - 1, m.size))\n data = (\n data_half_grid[\"B^theta\"]\n .reshape(half_grid.M, half_grid.L, half_grid.N, order=\"F\")\n .transpose((1, 0, 2))\n .reshape((half_grid.L, -1))\n )\n if eq.sym:\n x_mn[:, :] = cos_transform.fit(data.T).T\n else:\n x_mn[:, :] = full_transform.fit(data.T).T\n xm, xn, s, c = ptolemy_identity_rev(m, n, x_mn)\n bsupumnc[0, :] = 0\n bsupumnc[1:, :] = c * signgs[:]\n if not eq.sym:\n bsupumns[0, :] = 0\n bsupumns[1:, :] = s * signgs[:]\n timer.stop(\"B^theta\")\n if verbose > 1:\n timer.disp(\"B^theta\")\n\n # B^zeta\n timer.start(\"B^zeta\")\n if verbose > 0:\n print(\"Saving B^zeta\")\n bsupvmnc = file.createVariable(\n \"bsupvmnc\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n bsupvmnc.long_name = \"cos(m*t-n*p) component of B^zeta, on half mesh\"\n bsupvmnc.units = \"T/m\"\n m = cos_basis.modes[:, 1]\n n = cos_basis.modes[:, 2]\n if not eq.sym:\n bsupvmns = file.createVariable(\n \"bsupvmns\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n bsupvmns.long_name = \"sin(m*t-n*p) component of B^zeta, on half mesh\"\n bsupvmns.units = \"T/m\"\n m = full_basis.modes[:, 1]\n n = full_basis.modes[:, 2]\n x_mn = np.zeros((surfs - 1, m.size))\n data = (\n data_half_grid[\"B^zeta\"]\n .reshape(half_grid.M, half_grid.L, half_grid.N, order=\"F\")\n .transpose((1, 0, 2))\n .reshape((half_grid.L, -1))\n )\n if eq.sym:\n x_mn[:, :] = cos_transform.fit(data.T).T\n else:\n x_mn[:, :] = full_transform.fit(data.T).T\n xm, xn, s, c = ptolemy_identity_rev(m, n, x_mn)\n bsupvmnc[0, :] = 0\n bsupvmnc[1:, :] = c * signgs[:]\n if not eq.sym:\n bsupvmns[0, :] = 0\n bsupvmns[1:, :] = s * signgs[:]\n timer.stop(\"B^zeta\")\n if verbose > 1:\n timer.disp(\"B^zeta\")\n\n # full grid quantities\n full_grid = LinearGrid(M=2 * M_nyq + 1, N=2 * N_nyq + 1, NFP=NFP, rho=r_full)\n data_full_grid = eq.compute(\"J\", full_grid)\n\n # B_psi\n timer.start(\"B_psi\")\n if verbose > 0:\n print(\"Saving B_psi\")\n bsubsmns = file.createVariable(\n \"bsubsmns\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n bsubsmns.long_name = \"sin(m*t-n*p) component of B_psi, on full mesh\"\n bsubsmns.units = \"T*m\"\n m = sin_basis.modes[:, 1]\n n = sin_basis.modes[:, 2]\n if not eq.sym:\n bsubsmnc = file.createVariable(\n \"bsubsmnc\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n bsubsmnc.long_name = \"cos(m*t-n*p) component of B_psi, on full mesh\"\n bsubsmnc.units = \"T*m\"\n m = full_basis.modes[:, 1]\n n = full_basis.modes[:, 2]\n x_mn = np.zeros((surfs, m.size))\n data = data_full_grid[\"B_rho\"].reshape(\n full_grid.M, full_grid.L, full_grid.N, order=\"F\"\n ).transpose((1, 0, 2)).reshape((full_grid.L, -1)) / (2 * r_full[:, np.newaxis])\n # B_rho -> B_psi conversion: d(rho)/d(s) = 1/(2*rho)\n if eq.sym:\n x_mn[:, :] = sin_transform.fit(data.T).T\n else:\n x_mn[:, :] = full_transform.fit(data.T).T\n xm, xn, s, c = ptolemy_identity_rev(m, n, x_mn)\n bsubsmns[:, :] = s\n bsubsmns[0, :] = ( # linear extrapolation for coefficient at the magnetic axis\n s[1, :] - (s[2, :] - s[1, :]) / (s_full[2] - s_full[1]) * s_full[1]\n )\n # TODO: evaluate current at rho=0 nodes instead of extrapolation\n if not eq.sym:\n bsubsmnc[:, :] = c\n bsubsmnc[0, :] = (\n c[1, :] - (c[2, :] - c[1, :]) / (s_full[2] - s_full[1]) * s_full[1]\n )\n timer.stop(\"B_psi\")\n if verbose > 1:\n timer.disp(\"B_psi\")\n\n # B_theta\n timer.start(\"B_theta\")\n if verbose > 0:\n print(\"Saving B_theta\")\n bsubumnc = file.createVariable(\n \"bsubumnc\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n bsubumnc.long_name = \"cos(m*t-n*p) component of B_theta, on half mesh\"\n bsubumnc.units = \"T*m\"\n m = cos_basis.modes[:, 1]\n n = cos_basis.modes[:, 2]\n if not eq.sym:\n bsubumns = file.createVariable(\n \"bsubumns\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n bsubumns.long_name = \"sin(m*t-n*p) component of B_theta, on half mesh\"\n bsubumns.units = \"T*m\"\n m = full_basis.modes[:, 1]\n n = full_basis.modes[:, 2]\n x_mn = np.zeros((surfs - 1, m.size))\n data = (\n data_half_grid[\"B_theta\"]\n .reshape(half_grid.M, half_grid.L, half_grid.N, order=\"F\")\n .transpose((1, 0, 2))\n .reshape((half_grid.L, -1))\n )\n if eq.sym:\n x_mn[:, :] = cos_transform.fit(data.T).T\n else:\n x_mn[:, :] = full_transform.fit(data.T).T\n xm, xn, s, c = ptolemy_identity_rev(m, n, x_mn)\n bsubumnc[0, :] = 0\n bsubumnc[1:, :] = c * signgs[:]\n if not eq.sym:\n bsubumns[0, :] = 0\n bsubumns[1:, :] = s * signgs[:]\n timer.stop(\"B_theta\")\n if verbose > 1:\n timer.disp(\"B_theta\")\n\n # B_zeta\n timer.start(\"B_zeta\")\n if verbose > 0:\n print(\"Saving B_zeta\")\n bsubvmnc = file.createVariable(\n \"bsubvmnc\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n bsubvmnc.long_name = \"cos(m*t-n*p) component of B_zeta, on half mesh\"\n bsubvmnc.units = \"T*m\"\n m = cos_basis.modes[:, 1]\n n = cos_basis.modes[:, 2]\n if not eq.sym:\n bsubvmns = file.createVariable(\n \"bsubvmns\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n bsubvmns.long_name = \"sin(m*t-n*p) component of B_zeta, on half mesh\"\n bsubvmns.units = \"T*m\"\n m = full_basis.modes[:, 1]\n n = full_basis.modes[:, 2]\n x_mn = np.zeros((surfs - 1, m.size))\n data = (\n data_half_grid[\"B_zeta\"]\n .reshape(half_grid.M, half_grid.L, half_grid.N, order=\"F\")\n .transpose((1, 0, 2))\n .reshape((half_grid.L, -1))\n )\n if eq.sym:\n x_mn[:, :] = cos_transform.fit(data.T).T\n else:\n x_mn[:, :] = full_transform.fit(data.T).T\n xm, xn, s, c = ptolemy_identity_rev(m, n, x_mn)\n bsubvmnc[0, :] = 0\n bsubvmnc[1:, :] = c * signgs[:]\n if not eq.sym:\n bsubvmns[0, :] = 0\n bsubvmns[1:, :] = s * signgs[:]\n timer.stop(\"B_zeta\")\n if verbose > 1:\n timer.disp(\"B_zeta\")\n\n # J^theta * sqrt(g)\n timer.start(\"J^theta\")\n if verbose > 0:\n print(\"Saving J^theta\")\n currumnc = file.createVariable(\n \"currumnc\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n currumnc.long_name = \"cos(m*t-n*p) component of sqrt(g)*J^theta, on full mesh\"\n currumnc.units = \"A/m^3\"\n m = cos_basis.modes[:, 1]\n n = cos_basis.modes[:, 2]\n if not eq.sym:\n currumns = file.createVariable(\n \"currumns\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n currumns.long_name = (\n \"sin(m*t-n*p) component of sqrt(g)*J^theta, on full mesh\"\n )\n currumns.units = \"A/m^3\"\n m = full_basis.modes[:, 1]\n n = full_basis.modes[:, 2]\n x_mn = np.zeros((surfs, m.size))\n data = (\n (data_full_grid[\"J^theta\"] * data_full_grid[\"sqrt(g)\"])\n .reshape(full_grid.M, full_grid.L, full_grid.N, order=\"F\")\n .transpose((1, 0, 2))\n .reshape((full_grid.L, -1))\n )\n if eq.sym:\n x_mn[:, :] = cos_transform.fit(data.T).T\n else:\n x_mn[:, :] = full_transform.fit(data.T).T\n xm, xn, s, c = ptolemy_identity_rev(m, n, x_mn)\n currumnc[:, :] = c\n currumnc[0, :] = ( # linear extrapolation for coefficient at the magnetic axis\n s[1, :] - (c[2, :] - c[1, :]) / (s_full[2] - s_full[1]) * s_full[1]\n )\n # TODO: evaluate current at rho=0 nodes instead of extrapolation\n if not eq.sym:\n currumns[:, :] = s\n currumns[0, :] = (\n s[1, :] - (s[2, :] - s[1, :]) / (s_full[2] - s_full[1]) * s_full[1]\n )\n timer.stop(\"J^theta\")\n if verbose > 1:\n timer.disp(\"J^theta\")\n\n # J^zeta * sqrt(g)\n timer.start(\"J^zeta\")\n if verbose > 0:\n print(\"Saving J^zeta\")\n currvmnc = file.createVariable(\n \"currvmnc\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n currvmnc.long_name = \"cos(m*t-n*p) component of sqrt(g)*J^zeta, on full mesh\"\n currvmnc.units = \"A/m^3\"\n m = cos_basis.modes[:, 1]\n n = cos_basis.modes[:, 2]\n if not eq.sym:\n currvmns = file.createVariable(\n \"currvmns\", np.float64, (\"radius\", \"mn_mode_nyq\")\n )\n currvmns.long_name = (\n \"sin(m*t-n*p) component of sqrt(g)*J^zeta, on full mesh\"\n )\n currvmns.units = \"A/m^3\"\n m = full_basis.modes[:, 1]\n n = full_basis.modes[:, 2]\n x_mn = np.zeros((surfs, m.size))\n data = (\n (data_full_grid[\"J^zeta\"] * data_full_grid[\"sqrt(g)\"])\n .reshape(full_grid.M, full_grid.L, full_grid.N, order=\"F\")\n .transpose((1, 0, 2))\n .reshape((full_grid.L, -1))\n )\n if eq.sym:\n x_mn[:, :] = cos_transform.fit(data.T).T\n else:\n x_mn[:, :] = full_transform.fit(data.T).T\n xm, xn, s, c = ptolemy_identity_rev(m, n, x_mn)\n currvmnc[:, :] = c\n currvmnc[0, :] = ( # linear extrapolation for coefficient at the magnetic axis\n s[1, :] - (c[2, :] - c[1, :]) / (s_full[2] - s_full[1]) * s_full[1]\n )\n # TODO: evaluate current at rho=0 nodes instead of extrapolation\n if not eq.sym:\n currvmns[:, :] = s\n currumns[0, :] = (\n s[1, :] - (s[2, :] - s[1, :]) / (s_full[2] - s_full[1]) * s_full[1]\n )\n timer.stop(\"J^zeta\")\n if verbose > 1:\n timer.disp(\"J^zeta\")\n\n file.close\n timer.stop(\"Total time\")\n if verbose > 1:\n timer.disp(\"Total time\")\n\n @classmethod\n def read_vmec_output(cls, fname):\n \"\"\"Read VMEC data from wout NetCDF file.\n\n Parameters\n ----------\n fname : str or path-like\n filename of VMEC output file\n\n Returns\n -------\n vmec_data : dict\n the VMEC data fields\n\n \"\"\"\n file = Dataset(fname, mode=\"r\")\n\n vmec_data = {\n \"NFP\": file.variables[\"nfp\"][:],\n \"psi\": file.variables[\"phi\"][:], # toroidal flux is saved as 'phi'\n \"xm\": file.variables[\"xm\"][:],\n \"xn\": file.variables[\"xn\"][:],\n \"rmnc\": file.variables[\"rmnc\"][:],\n \"zmns\": file.variables[\"zmns\"][:],\n \"lmns\": file.variables[\"lmns\"][:],\n }\n try:\n vmec_data[\"rmns\"] = file.variables[\"rmns\"][:]\n vmec_data[\"zmnc\"] = file.variables[\"zmnc\"][:]\n vmec_data[\"lmnc\"] = file.variables[\"lmnc\"][:]\n vmec_data[\"sym\"] = False\n except:\n vmec_data[\"sym\"] = True\n\n return vmec_data\n\n @staticmethod\n def vmec_interpolate(Cmn, Smn, xm, xn, theta, phi, s=None, si=None, sym=True):\n \"\"\"Interpolate VMEC data on a flux surface.\n\n Parameters\n ----------\n Cmn : ndarray\n cos(mt-np) Fourier coefficients\n Smn : ndarray\n sin(mt-np) Fourier coefficients\n xm : ndarray\n poloidal mode numbers\n xn : ndarray\n toroidal mode numbers\n theta : ndarray\n poloidal angles\n phi : ndarray\n toroidal angles\n s : ndarray\n radial coordinate, equivalent to normalized toroidal magnetic flux.\n Defaults to si (all flux surfaces)\n si : ndarray\n values of radial coordinates where Cmn,Smn are defined. Defaults to linearly\n spaced on [0,1]\n sym : bool\n stellarator symmetry (Default value = True)\n\n Returns\n -------\n if sym = True\n C, S (tuple of ndarray): VMEC data interpolated at the points (s,theta,phi)\n where C has cosine symmetry and S has sine symmetry\n if sym = False\n X (ndarray): non-symmetric VMEC data interpolated at the points (s,theta,phi)\n\n \"\"\"\n if si is None:\n si = np.linspace(0, 1, Cmn.shape[0])\n if s is None:\n s = si\n Cf = interpolate.CubicSpline(si, Cmn)\n Sf = interpolate.CubicSpline(si, Smn)\n\n C = np.sum(\n Cf(s)\n * np.cos(\n xm[np.newaxis] * theta[:, np.newaxis]\n - xn[np.newaxis] * phi[:, np.newaxis]\n ),\n axis=-1,\n )\n S = np.sum(\n Sf(s)\n * np.sin(\n xm[np.newaxis] * theta[:, np.newaxis]\n - xn[np.newaxis] * phi[:, np.newaxis]\n ),\n axis=-1,\n )\n\n if sym:\n return C, S\n else:\n return C + S\n\n @classmethod\n def compute_theta_coords(cls, lmns, xm, xn, s, theta_star, zeta, si=None):\n \"\"\"Find theta such that theta + lambda(theta) == theta_star.\n\n Parameters\n ----------\n lmns : array-like\n fourier coefficients for lambda\n xm : array-like\n poloidal mode numbers\n xn : array-like\n toroidal mode numbers\n s : array-like\n desired radial coordinates (normalized toroidal magnetic flux)\n theta_star : array-like\n desired straigh field-line poloidal angles (PEST/VMEC-like flux coordinates)\n zeta : array-like\n desired toroidal angles (toroidal coordinate phi)\n si : ndarray\n values of radial coordinates where lmns are defined. Defaults to linearly\n spaced on half grid between (0,1)\n\n Returns\n -------\n theta : ndarray\n theta such that theta + lambda(theta) == theta_star\n\n \"\"\"\n if si is None:\n si = np.linspace(0, 1, lmns.shape[0])\n si[1:] = si[0:-1] + 0.5 / (lmns.shape[0] - 1)\n lmbda_mn = interpolate.CubicSpline(si, lmns)\n\n # Note: theta* (also known as vartheta) is the poloidal straight field-line\n # angle in PEST-like flux coordinates\n\n def root_fun(theta):\n lmbda = np.sum(\n lmbda_mn(s)\n * np.sin(\n xm[np.newaxis] * theta[:, np.newaxis]\n - xn[np.newaxis] * zeta[:, np.newaxis]\n ),\n axis=-1,\n )\n theta_star_k = theta + lmbda # theta* = theta + lambda\n err = theta_star - theta_star_k\n return err\n\n out = optimize.root(\n root_fun, x0=theta_star, method=\"diagbroyden\", options={\"ftol\": 1e-6}\n )\n return out.x\n\n @classmethod\n def area_difference_vmec(cls, equil, vmec_data, Nr=10, Nt=8, **kwargs):\n \"\"\"Compute average normalized area difference between VMEC and DESC equilibria.\n\n Parameters\n ----------\n equil : Equilibrium\n desc equilibrium to compare\n vmec_data : dict\n dictionary of vmec outputs\n Nr : int, optional\n number of radial surfaces to average over\n Nt : int, optional\n number of vartheta contours to compare\n\n Returns\n -------\n area_rho : ndarray, shape(Nr, Nz)\n normalized area difference of rho contours, computed as the symmetric\n difference divided by the intersection\n area_theta : ndarray, shape(Nt, Nz)\n normalized area difference between vartheta contours, computed as the area\n of the polygon created by closing the two vartheta contours divided by the\n perimeter squared\n\n \"\"\"\n # 1e-3 tolerance seems reasonable for testing, similar to comparison by eye\n if isinstance(vmec_data, (str, os.PathLike)):\n vmec_data = cls.read_vmec_output(vmec_data)\n\n if equil.N == 0:\n Nz = 1\n else:\n Nz = 6\n\n coords = cls.compute_coord_surfaces(equil, vmec_data, Nr, Nt, **kwargs)\n\n Rr1 = coords[\"Rr_desc\"]\n Zr1 = coords[\"Zr_desc\"]\n Rv1 = coords[\"Rv_desc\"]\n Zv1 = coords[\"Zv_desc\"]\n Rr2 = coords[\"Rr_vmec\"]\n Zr2 = coords[\"Zr_vmec\"]\n Rv2 = coords[\"Rv_vmec\"]\n Zv2 = coords[\"Zv_vmec\"]\n area_rho, area_theta = area_difference(Rr1, Rr2, Zr1, Zr2, Rv1, Rv2, Zv1, Zv2)\n return area_rho, area_theta\n\n @classmethod\n def compute_coord_surfaces(cls, equil, vmec_data, Nr=10, Nt=8, **kwargs):\n \"\"\"Compute points on surfaces of constant rho, vartheta for both DESC and VMEC\n\n Parameters\n ----------\n equil : Equilibrium\n desc equilibrium to compare\n vmec_data : str or path-like or dict\n path to VMEC output file, or dictionary of vmec outputs\n Nr : int, optional\n number of rho contours\n Nt : int, optional\n number of vartheta contours\n\n Returns\n -------\n coords : dict of ndarray\n dictionary of coordinate arrays with keys Xy_code where X is R or Z, y is r\n for rho contours, or v for vartheta contours, and code is vmec or desc\n\n \"\"\"\n if isinstance(vmec_data, (str, os.PathLike)):\n vmec_data = cls.read_vmec_output(vmec_data)\n\n if equil.N == 0:\n Nz = 1\n else:\n Nz = 6\n\n num_theta = kwargs.get(\"num_theta\", 180)\n Nr_vmec = vmec_data[\"rmnc\"].shape[0] - 1\n s_idx = Nr_vmec % np.floor(Nr_vmec / (Nr - 1))\n idxes = np.linspace(s_idx, Nr_vmec, Nr).astype(int)\n if s_idx != 0:\n idxes = np.pad(idxes, (1, 0), mode=\"constant\")\n\n # flux surfaces to plot\n rr = np.sqrt(idxes / Nr_vmec)\n rt = np.linspace(0, 2 * np.pi, num_theta)\n rz = np.linspace(0, 2 * np.pi / equil.NFP, Nz, endpoint=False)\n r_grid = LinearGrid(rho=rr, theta=rt, zeta=rz)\n\n # straight field-line angles to plot\n tr = np.linspace(0, 1, 50)\n tt = np.linspace(0, 2 * np.pi, Nt, endpoint=False)\n tz = np.linspace(0, 2 * np.pi / equil.NFP, Nz, endpoint=False)\n t_grid = LinearGrid(rho=tr, theta=tt, zeta=tz)\n\n # Note: theta* (also known as vartheta) is the poloidal straight field-line\n # angle in PEST-like flux coordinates\n\n # find theta angles corresponding to desired theta* angles\n v_grid = Grid(equil.compute_theta_coords(t_grid.nodes))\n r_coords_desc = equil.compute(\"R\", r_grid)\n v_coords_desc = equil.compute(\"R\", v_grid)\n\n # rho contours\n Rr_desc = r_coords_desc[\"R\"].reshape((r_grid.M, r_grid.L, r_grid.N), order=\"F\")\n Zr_desc = r_coords_desc[\"Z\"].reshape((r_grid.M, r_grid.L, r_grid.N), order=\"F\")\n\n # vartheta contours\n Rv_desc = v_coords_desc[\"R\"].reshape((t_grid.M, t_grid.L, t_grid.N), order=\"F\")\n Zv_desc = v_coords_desc[\"Z\"].reshape((t_grid.M, t_grid.L, t_grid.N), order=\"F\")\n\n # Note: the VMEC radial coordinate s is the normalized toroidal magnetic flux;\n # the DESC radial coordiante rho = sqrt(s)\n\n # convert from rho -> s\n r_nodes = r_grid.nodes\n r_nodes[:, 0] = r_nodes[:, 0] ** 2\n t_nodes = t_grid.nodes\n t_nodes[:, 0] = t_nodes[:, 0] ** 2\n\n v_nodes = cls.compute_theta_coords(\n vmec_data[\"lmns\"],\n vmec_data[\"xm\"],\n vmec_data[\"xn\"],\n t_nodes[:, 0],\n t_nodes[:, 1],\n t_nodes[:, 2],\n )\n\n t_nodes[:, 1] = v_nodes\n\n Rr_vmec, Zr_vmec = cls.vmec_interpolate(\n vmec_data[\"rmnc\"],\n vmec_data[\"zmns\"],\n vmec_data[\"xm\"],\n vmec_data[\"xn\"],\n theta=r_nodes[:, 1],\n phi=r_nodes[:, 2],\n s=r_nodes[:, 0],\n )\n\n Rv_vmec, Zv_vmec = cls.vmec_interpolate(\n vmec_data[\"rmnc\"],\n vmec_data[\"zmns\"],\n vmec_data[\"xm\"],\n vmec_data[\"xn\"],\n theta=t_nodes[:, 1],\n phi=t_nodes[:, 2],\n s=t_nodes[:, 0],\n )\n\n coords = {\n \"Rr_desc\": Rr_desc,\n \"Zr_desc\": Zr_desc,\n \"Rv_desc\": Rv_desc,\n \"Zv_desc\": Zv_desc,\n \"Rr_vmec\": Rr_vmec.reshape((r_grid.M, r_grid.L, r_grid.N), order=\"F\"),\n \"Zr_vmec\": Zr_vmec.reshape((r_grid.M, r_grid.L, r_grid.N), order=\"F\"),\n \"Rv_vmec\": Rv_vmec.reshape((t_grid.M, t_grid.L, t_grid.N), order=\"F\"),\n \"Zv_vmec\": Zv_vmec.reshape((t_grid.M, t_grid.L, t_grid.N), order=\"F\"),\n }\n coords = {key: np.swapaxes(val, 0, 1) for key, val in coords.items()}\n return coords\n\n @classmethod\n def plot_vmec_comparison(cls, equil, vmec_data, Nr=10, Nt=8, **kwargs):\n \"\"\"Plot a comparison to VMEC flux surfaces.\n\n Parameters\n ----------\n equil : Equilibrium\n desc equilibrium to compare\n vmec_data : str or path-like or dict\n path to VMEC output file, or dictionary of vmec outputs\n Nr : int, optional\n number of rho contours to plot\n Nt : int, optional\n number of vartheta contours to plot\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n figure being plotted to\n ax : matplotlib.axes.Axes or ndarray of Axes\n axes being plotted to\n\n \"\"\"\n if isinstance(vmec_data, (str, os.PathLike)):\n vmec_data = cls.read_vmec_output(vmec_data)\n coords = cls.compute_coord_surfaces(equil, vmec_data, Nr, Nt, **kwargs)\n\n if equil.N == 0:\n fig, ax = plt.subplots(1, 1, figsize=(6, 6), squeeze=False)\n else:\n fig, ax = plt.subplots(2, 3, figsize=(16, 12), squeeze=False)\n ax = ax.flatten()\n\n for k in range(len(ax)):\n ax[k].plot(coords[\"Rr_vmec\"][0, 0, k], coords[\"Zr_vmec\"][0, 0, k], \"bo\")\n s_vmec = ax[k].plot(\n coords[\"Rr_vmec\"][:, :, k].T, coords[\"Zr_vmec\"][:, :, k].T, \"b-\"\n )\n ax[k].plot(coords[\"Rv_vmec\"][:, :, k], coords[\"Zv_vmec\"][:, :, k], \"b-\")\n\n ax[k].plot(coords[\"Rr_desc\"][0, 0, k].T, coords[\"Zr_desc\"][0, 0, k].T, \"ro\")\n ax[k].plot(coords[\"Rv_desc\"][:, :, k], coords[\"Zv_desc\"][:, :, k], \"r--\")\n s_desc = ax[k].plot(\n coords[\"Rr_desc\"][:, :, k].T, coords[\"Zr_desc\"][:, :, k].T, \"r--\"\n )\n\n ax[k].axis(\"equal\")\n ax[k].set_xlabel(r\"$R ~(\\mathrm{m})$\")\n ax[k].set_ylabel(r\"$Z ~(\\mathrm{m})$\")\n if k == 0:\n s_vmec[0].set_label(r\"$\\mathrm{VMEC}$\")\n s_desc[0].set_label(r\"$\\mathrm{DESC}$\")\n ax[k].legend(fontsize=\"x-small\")\n\n return fig, ax\n",
"import numpy as np\nfrom termcolor import colored\nimport warnings\nimport numbers\nfrom collections.abc import MutableSequence\nfrom desc.backend import use_jax\nfrom desc.utils import Timer, isalmostequal, unpack_state\nfrom desc.configuration import _Configuration\nfrom desc.io import IOAble\nfrom desc.boundary_conditions import get_boundary_condition, BoundaryCondition\nfrom desc.objective_funs import get_objective_function, ObjectiveFunction\nfrom desc.optimize import Optimizer\nfrom desc.grid import Grid, LinearGrid, ConcentricGrid, QuadratureGrid\nfrom desc.transform import Transform\nfrom desc.perturbations import perturb, optimal_perturb\nfrom desc.geometry import FourierRZToroidalSurface, ZernikeRZToroidalSection\n\n\nclass Equilibrium(_Configuration, IOAble):\n \"\"\"Equilibrium is an object that represents a plasma equilibrium.\n\n It contains information about a plasma state, including the shapes of flux surfaces\n and profile inputs. It can compute additional information, such as the magnetic\n field and plasma currents, as well as \"solving\" itself by finding the equilibrium\n fields, and perturbing those fields to find nearby equilibria.\n\n Parameters\n ----------\n Psi : float (optional)\n total toroidal flux (in Webers) within LCFS. Default 1.0\n NFP : int (optional)\n number of field periods Default ``surface.NFP`` or 1\n L : int (optional)\n Radial resolution. Default 2*M for ``spectral_indexing=='fringe'``, else M\n M : int (optional)\n Poloidal resolution. Default surface.M or 1\n N : int (optional)\n Toroidal resolution. Default surface.N or 0\n L_grid : int (optional)\n resolution of real space nodes in radial direction\n M_grid : int (optional)\n resolution of real space nodes in poloidal direction\n N_grid : int (optional)\n resolution of real space nodes in toroidal direction\n node_pattern : str (optional)\n pattern of nodes in real space. Default is ``'jacobi'``\n pressure : Profile or ndarray shape(k,2) (optional)\n Pressure profile or array of mode numbers and spectral coefficients.\n Default is a PowerSeriesProfile with zero pressure\n iota : Profile or ndarray shape(k,2) (optional)\n Rotational transform profile or array of mode numbers and spectral coefficients\n Default is a PowerSeriesProfile with zero rotational transform\n surface: Surface or ndarray shape(k,5) (optional)\n Fixed boundary surface shape, as a Surface object or array of\n spectral mode numbers and coefficients of the form [l, m, n, R, Z].\n Default is a FourierRZToroidalSurface with major radius 10 and\n minor radius 1\n axis : Curve or ndarray shape(k,3) (optional)\n Initial guess for the magnetic axis as a Curve object or ndarray\n of mode numbers and spectral coefficints of the form [n, R, Z].\n Default is the centroid of the surface.\n sym : bool (optional)\n Whether to enforce stellarator symmetry. Default surface.sym or False.\n spectral_indexing : str (optional)\n Type of Zernike indexing scheme to use. Default ``'ansi'``\n objective : str or ObjectiveFunction (optional)\n function to solve for equilibrium solution\n optimizer : str or Optimzer (optional)\n optimizer to use\n \"\"\"\n\n _io_attrs_ = _Configuration._io_attrs_ + [\n \"_solved\",\n \"_x0\",\n \"_L_grid\",\n \"_M_grid\",\n \"_N_grid\",\n \"_grid\",\n \"_node_pattern\",\n \"_transforms\",\n \"_objective\",\n \"optimizer_results\",\n \"_optimizer\",\n ]\n\n def __init__(\n self,\n Psi=1.0,\n NFP=None,\n L=None,\n M=None,\n N=None,\n L_grid=None,\n M_grid=None,\n N_grid=None,\n node_pattern=None,\n pressure=None,\n iota=None,\n surface=None,\n axis=None,\n sym=None,\n spectral_indexing=None,\n objective=None,\n optimizer=None,\n **kwargs,\n ):\n\n super().__init__(\n Psi,\n NFP,\n L,\n M,\n N,\n pressure,\n iota,\n surface,\n axis,\n sym,\n spectral_indexing,\n **kwargs,\n )\n self._x0 = self.x\n assert (L_grid is None) or (\n isinstance(L_grid, numbers.Real)\n and (L_grid == int(L_grid))\n and (L_grid >= 0)\n ), \"L_grid should be a non-negative integer or None, got {L_grid}\"\n assert (M_grid is None) or (\n isinstance(M_grid, numbers.Real)\n and (M_grid == int(M_grid))\n and (M_grid >= 0)\n ), \"M_grid should be a non-negative integer or None, got {M_grid}\"\n assert (N_grid is None) or (\n isinstance(N_grid, numbers.Real)\n and (N_grid == int(N_grid))\n and (N_grid >= 0)\n ), \"N_grid should be a non-negative integer or None, got {N_grid}\"\n self._L_grid = L_grid if L_grid is not None else self.L\n self._M_grid = M_grid if M_grid is not None else self.M\n self._N_grid = N_grid if N_grid is not None else self.N\n self._node_pattern = node_pattern if node_pattern is not None else \"jacobi\"\n self._solved = False\n self._objective = None\n self._optimizer = None\n self._set_grid()\n self._transforms = {}\n self._set_transforms()\n self.objective = objective\n self.optimizer = optimizer\n self.optimizer_results = {}\n\n def __repr__(self):\n \"\"\"String form of the object.\"\"\"\n return (\n type(self).__name__\n + \" at \"\n + str(hex(id(self)))\n + \" (L={}, M={}, N={}, NFP={}, sym={}, spectral_indexing={})\".format(\n self.L, self.M, self.N, self.NFP, self.sym, self.spectral_indexing\n )\n )\n\n @property\n def x0(self):\n \"\"\"Return initial optimization vector before solution (ndarray).\"\"\"\n if not hasattr(self, \"_x0\"):\n self._x0 = None\n return self._x0\n\n @x0.setter\n def x0(self, x0):\n self._x0 = x0\n\n @property\n def L_grid(self):\n \"\"\"Radial resolution of grid in real space (int).\"\"\"\n if not hasattr(self, \"_L_grid\"):\n self._L_grid = (\n self.M_grid if self.spectral_indexing == \"ansi\" else 2 * self.M_grid\n )\n return self._L_grid\n\n @L_grid.setter\n def L_grid(self, new):\n if self.L_grid != new:\n self._L_grid = new\n self._set_grid()\n self._set_transforms()\n\n @property\n def M_grid(self):\n \"\"\"Poloidal resolution of grid in real space (int).\"\"\"\n if not hasattr(self, \"_M_grid\"):\n self._M_grid = 1\n return self._M_grid\n\n @M_grid.setter\n def M_grid(self, new):\n if self.M_grid != new:\n self._M_grid = new\n self._set_grid()\n self._set_transforms()\n\n @property\n def N_grid(self):\n \"\"\"Toroidal resolution of grid in real space (int).\"\"\"\n if not hasattr(self, \"_N_grid\"):\n self._N_grid = 0\n return self._N_grid\n\n @N_grid.setter\n def N_grid(self, new):\n if self.N_grid != new:\n self._N_grid = new\n self._set_grid()\n self._set_transforms()\n\n @property\n def node_pattern(self):\n \"\"\"Pattern for placement of nodes in curvilinear coordinates (str).\"\"\"\n if not hasattr(self, \"_node_pattern\"):\n self._node_pattern = None\n return self._node_pattern\n\n @property\n def transforms(self):\n if not hasattr(self, \"_transforms\"):\n self._transforms = {}\n self._set_transforms()\n return self._transforms\n\n def _set_grid(self):\n if self.node_pattern in [\"cheb1\", \"cheb2\", \"jacobi\", \"ocs\"]:\n self._grid = ConcentricGrid(\n L=self.L_grid,\n M=self.M_grid,\n N=self.N_grid,\n NFP=self.NFP,\n sym=self.sym,\n axis=False,\n node_pattern=self.node_pattern,\n )\n elif self.node_pattern in [\"linear\", \"uniform\"]:\n self._grid = LinearGrid(\n L=2 * self.L_grid + 1,\n M=2 * self.M_grid + 1,\n N=2 * self.N_grid + 1,\n NFP=self.NFP,\n sym=self.sym,\n axis=False,\n )\n elif self.node_pattern in [\"quad\"]:\n self._grid = QuadratureGrid(\n L=self.L_grid, M=self.M_grid, N=self.N_grid, NFP=self.NFP\n )\n else:\n raise ValueError(\n colored(\"unknown grid type {}\".format(self.node_pattern), \"red\")\n )\n\n def _set_transforms(self):\n\n if len(self.transforms) == 0:\n self._transforms[\"R\"] = Transform(\n self.grid, self.R_basis, derivs=0, build=False\n )\n self._transforms[\"Z\"] = Transform(\n self.grid, self.Z_basis, derivs=0, build=False\n )\n self._transforms[\"L\"] = Transform(\n self.grid, self.L_basis, derivs=0, build=False\n )\n self.pressure.grid = self.grid\n self.iota.grid = self.grid\n\n else:\n self.transforms[\"R\"].change_resolution(self.grid, self.R_basis, build=False)\n self.transforms[\"Z\"].change_resolution(self.grid, self.Z_basis, build=False)\n self.transforms[\"L\"].change_resolution(self.grid, self.L_basis, build=False)\n self.pressure.grid = self.grid\n self.iota.grid = self.grid\n\n if self.objective is not None:\n derivs = self.objective.derivatives\n self.transforms[\"R\"].change_derivatives(derivs, build=False)\n self.transforms[\"Z\"].change_derivatives(derivs, build=False)\n self.transforms[\"L\"].change_derivatives(derivs, build=False)\n\n def build(self, verbose=1):\n \"\"\"Build transform matrices and factorizes boundary constraint.\n\n Parameters\n ----------\n verbose : int\n level of output\n\n \"\"\"\n timer = Timer()\n timer.start(\"Transform computation\")\n if verbose > 0:\n print(\"Precomputing Transforms\")\n self._set_transforms()\n for tr in self.transforms.values():\n tr.build()\n\n timer.stop(\"Transform computation\")\n if verbose > 1:\n timer.disp(\"Transform computation\")\n\n timer.start(\"Boundary constraint factorization\")\n if verbose > 0:\n print(\"Factorizing boundary constraint\")\n if self.objective is not None and self.objective.BC_constraint is not None:\n self.objective.BC_constraint.build()\n timer.stop(\"Boundary constraint factorization\")\n if verbose > 1:\n timer.disp(\"Boundary constraint factorization\")\n\n def change_resolution(\n self, L=None, M=None, N=None, L_grid=None, M_grid=None, N_grid=None\n ):\n \"\"\"Set the spectral and real space resolution.\n\n Parameters\n ----------\n L : int\n maximum radial zernike mode number\n M : int\n maximum poloidal fourier mode number\n N : int\n maximum toroidal fourier mode number\n L_grid : int\n radial real space resolution\n M_grid : int\n poloidal real space resolution\n N_grid : int\n toroidal real space resolution\n\n \"\"\"\n L_change = M_change = N_change = False\n if L is not None and L != self.L:\n L_change = True\n if M is not None and M != self.M:\n M_change = True\n if N is not None and N != self.N:\n N_change = True\n\n if any([L_change, M_change, N_change]):\n super().change_resolution(L, M, N)\n\n L_grid_change = M_grid_change = N_grid_change = False\n if L_grid is not None and L_grid != self.L_grid:\n self._L_grid = L_grid\n L_grid_change = True\n if M_grid is not None and M_grid != self.M_grid:\n self._M_grid = M_grid\n M_grid_change = True\n if N_grid is not None and N_grid != self.N_grid:\n self._N_grid = N_grid\n N_grid_change = True\n if any([L_grid_change, M_grid_change, N_grid_change]):\n self._set_grid()\n self._set_transforms()\n if (\n any(\n [\n L_change,\n M_change,\n N_change,\n L_grid_change,\n M_grid_change,\n N_grid_change,\n ]\n )\n and self.objective is not None\n ):\n self.objective = self.objective.name\n\n @property\n def built(self):\n \"\"\"Whether the equilibrium is ready to solve (bool).\"\"\"\n tr = np.all([tr.built for tr in self.transforms.values()])\n if self.objective is not None and self.objective.BC_constraint is not None:\n bc = self.objective.BC_constraint.built\n else:\n bc = True\n return tr and bc\n\n @property\n def grid(self):\n \"\"\"Grid of real space collocation nodes (Grid).\"\"\"\n return self._grid\n\n @grid.setter\n def grid(self, grid):\n if not isinstance(grid, Grid):\n raise ValueError(\"grid attribute must be of type 'Grid' or a subclass\")\n self._grid = grid\n self._set_transforms()\n\n @property\n def solved(self):\n \"\"\"Whether the equilibrium has been solved (bool).\"\"\"\n return self._solved\n\n @solved.setter\n def solved(self, solved):\n self._solved = solved\n\n @property\n def objective(self):\n \"\"\"Objective function currently assigned (ObjectiveFunction).\"\"\"\n if not hasattr(self, \"_objective\"):\n self._objective = None\n return self._objective\n\n @objective.setter\n def objective(self, objective):\n if objective is None:\n self._objective = objective\n elif isinstance(objective, ObjectiveFunction) and objective.eq(self.objective):\n return\n elif isinstance(objective, ObjectiveFunction) and not objective.eq(\n self.objective\n ):\n self._objective = objective\n elif isinstance(objective, str):\n self._set_transforms()\n objective = get_objective_function(\n objective,\n R_transform=self.transforms[\"R\"],\n Z_transform=self.transforms[\"Z\"],\n L_transform=self.transforms[\"L\"],\n p_profile=self.pressure,\n i_profile=self.iota,\n BC_constraint=self.surface.get_constraint(\n self.R_basis, self.Z_basis, self.L_basis\n ),\n )\n self.objective = objective\n else:\n raise ValueError(\n \"objective should be of type 'ObjectiveFunction' or string, \"\n + \"got {}\".format(objective)\n )\n self.solved = False\n self.optimizer_results = {}\n\n @property\n def optimizer(self):\n \"\"\"Optimizer currently assigned (Optimizer).\"\"\"\n if not hasattr(self, \"_optimizer\"):\n self._optimizer = None\n return self._optimizer\n\n @optimizer.setter\n def optimizer(self, optimizer):\n if optimizer is None:\n self._optimizer = optimizer\n elif isinstance(optimizer, Optimizer) and optimizer.eq(self.optimizer):\n return\n elif isinstance(optimizer, Optimizer) and not optimizer.eq(self.optimizer):\n self._optimizer = optimizer\n elif optimizer in Optimizer._all_methods:\n self._optimizer = Optimizer(optimizer)\n else:\n raise ValueError(\n \"optimizer should be of type Optimizer or str, got {}\".format(\n optimizer\n )\n )\n\n @property\n def initial(self):\n \"\"\"Return initial equilibrium state from which it was solved (Equilibrium).\"\"\"\n\n R_lmn, Z_lmn, L_lmn = unpack_state(\n self.x0, self.R_basis.num_modes, self.Z_basis.num_modes\n )\n inputs = {\n \"sym\": self.sym,\n \"NFP\": self.NFP,\n \"Psi\": self.Psi,\n \"L\": self.L,\n \"M\": self.M,\n \"N\": self.N,\n \"spectral_indexing\": self.spectral_indexing,\n \"bdry_mode\": self.bdry_mode,\n \"pressure\": self.pressure,\n \"iota\": self.iota,\n \"surface\": self.surface,\n \"R_lmn\": R_lmn,\n \"Z_lmn\": Z_lmn,\n \"L_lmn\": L_lmn,\n \"objective\": self.objective.name,\n \"optimizer\": self.optimizer.method,\n }\n return Equilibrium(**inputs)\n\n def evaluate(self, jac=False):\n \"\"\"Evaluate the objective function.\n\n Parameters\n ----------\n jac : bool\n whether to compute and return the jacobian df/dx as well\n\n Returns\n -------\n f : ndarray or float\n function value\n jac : ndarray\n derivative df/dx\n\n \"\"\"\n if self.objective is None:\n raise AttributeError(\n \"Equilibrium must have objective defined before evaluating.\"\n )\n\n y = self.objective.BC_constraint.project(self.x)\n f = self.objective.compute(\n y, self.Rb_lmn, self.Zb_lmn, self.p_l, self.i_l, self.Psi\n )\n if jac:\n jac = self.objective.jac_x(\n y, self.Rb_lmn, self.Zb_lmn, self.p_l, self.i_l, self.Psi\n )\n return f, jac\n else:\n return f\n\n def resolution_summary(self):\n \"\"\"Print a summary of the spectral and real space resolution.\"\"\"\n print(\"Spectral indexing: {}\".format(self.spectral_indexing))\n print(\"Spectral resolution (L,M,N)=({},{},{})\".format(self.L, self.M, self.N))\n print(\"Node pattern: {}\".format(self.node_pattern))\n print(\n \"Node resolution (L,M,N)=({},{},{})\".format(\n self.L_grid, self.M_grid, self.N_grid\n )\n )\n\n def solve(\n self,\n ftol=1e-6,\n xtol=1e-6,\n gtol=1e-6,\n verbose=1,\n x_scale=\"auto\",\n maxiter=50,\n options={},\n ):\n \"\"\"Solve to find the equilibrium configuration.\n\n Parameters\n ----------\n ftol : float\n Relative stopping tolerance on objective function value.\n xtol : float\n Stopping tolerance on step size.\n gtol : float\n Stopping tolerance on norm of gradient.\n verbose : int\n Level of output.\n maxiter : int\n Maximum number of solver steps.\n options : dict\n Dictionary of additional options to pass to optimizer.\n\n \"\"\"\n if self.optimizer is None or self.objective is None:\n raise AttributeError(\n \"Equilibrium must have objective and optimizer defined before solving.\"\n )\n\n # make sure objective is up to date\n self.objective = self.objective.name\n args = (self.Rb_lmn, self.Zb_lmn, self.p_l, self.i_l, self.Psi)\n\n self.x0 = self.x\n x_init = self.objective.BC_constraint.project(self.x)\n\n result = self.optimizer.optimize(\n self.objective,\n x_init=x_init,\n args=args,\n ftol=ftol,\n xtol=xtol,\n gtol=gtol,\n x_scale=x_scale,\n verbose=verbose,\n maxiter=maxiter,\n options=options,\n )\n\n if verbose > 0:\n print(\"Start of solver\")\n self.objective.callback(x_init, *args)\n print(\"End of solver\")\n self.objective.callback(result[\"x\"], *args)\n\n self.optimizer_results = {\n key: val if isinstance(val, str) else np.copy(val)\n for key, val in result.items()\n }\n self.x = np.copy(self.objective.BC_constraint.recover(result[\"x\"]))\n self.solved = result[\"success\"]\n return result\n\n def perturb(\n self,\n objective=None,\n dRb=None,\n dZb=None,\n dp=None,\n di=None,\n dPsi=None,\n order=2,\n tr_ratio=0.1,\n cutoff=1e-6,\n weight=\"auto\",\n Jx=None,\n verbose=1,\n copy=True,\n ):\n \"\"\"Perturb the configuration while mainting equilibrium.\n\n Parameters\n ----------\n objective : ObjectiveFunction\n objective to optimize during the perturbation (optional)\n dRb, dZb, dp, di, dPsi : ndarray or float\n If objective not given: deltas for perturbations of\n R_boundary, Z_boundary, pressure, iota, and toroidal flux.\n Setting to None or zero ignores that term in the expansion.\n If objective is given: indicies of modes to include in the perturbations of\n R_boundary, Z_boundary, pressure, iota, toroidal flux, and zeta ratio.\n Setting to True (False/None) includes (excludes) all modes.\n order : int, optional\n order of perturbation (0=none, 1=linear, 2=quadratic)\n tr_ratio : float or array of float\n radius of the trust region, as a fraction of ||x||.\n enforces ||dx1|| <= tr_ratio*||x|| and ||dx2|| <= tr_ratio*||dx1||\n if a scalar uses same ratio for all steps, if an array uses the first element\n for the first step and so on\n cutoff : float\n relative cutoff for small singular values in pseudoinverse\n weight : ndarray, \"auto\", or None, optional\n 1d or 2d array for weighted least squares. 1d arrays are turned into diagonal\n matrices. Default is to weight by (mode number)**2. None applies no weighting.\n Jx : ndarray, optional\n jacobian matrix df/dx\n verbose : int\n level of output to display\n copy : bool\n True to return a modified copy of the current equilibrium, False to perturb\n the current equilibrium in place\n\n Returns\n -------\n eq_new : Equilibrium\n perturbed equilibrum, only returned if copy=True\n\n \"\"\"\n if objective is None:\n # perturb with the given input parameter deltas\n equil = perturb(\n self,\n dRb,\n dZb,\n dp,\n di,\n dPsi,\n order=order,\n tr_ratio=tr_ratio,\n cutoff=cutoff,\n weight=weight,\n Jx=Jx,\n verbose=verbose,\n copy=copy,\n )\n else:\n equil = optimal_perturb(\n # find the deltas that optimize the objective, then perturb\n self,\n objective,\n dRb,\n dZb,\n dp,\n di,\n dPsi,\n order=order,\n tr_ratio=tr_ratio,\n cutoff=cutoff,\n Jx=Jx,\n verbose=verbose,\n copy=copy,\n )\n\n equil.solved = False\n equil.optimizer_results = {}\n\n if copy:\n return equil\n else:\n return None\n\n def optimize(self):\n \"\"\"Optimize an equilibrium for a physics or engineering objective.\"\"\"\n raise NotImplementedError(\"Optimizing equilibria has not yet been implemented.\")\n\n\nclass EquilibriaFamily(IOAble, MutableSequence):\n \"\"\"EquilibriaFamily stores a list of Equilibria.\n\n Has methods for solving complex equilibria using a multi-grid continuation method.\n\n Parameters\n ----------\n inputs : dict or list\n either a dictionary of inputs or list of dictionaries. For more information\n see inputs required by ``'Equilibrium'``.\n If solving using continuation method, a list should be given.\n\n \"\"\"\n\n _io_attrs_ = [\"_equilibria\"]\n\n def __init__(self, inputs):\n # did we get 1 set of inputs or several?\n if isinstance(inputs, (list, tuple)):\n self.equilibria = [Equilibrium(**inputs[0])]\n else:\n self.equilibria = [Equilibrium(**inputs)]\n self.inputs = inputs\n\n @staticmethod\n def _format_deltas(inputs, equil):\n \"\"\"Format the changes in continuation parameters.\n\n Parameters\n ----------\n inputs : dict\n Dictionary of continuation parameters for next step.\n equil : Equilibrium\n Equilibrium being perturbed.\n\n Returns\n -------\n deltas : dict\n Dictionary of changes in parameter values.\n\n \"\"\"\n deltas = {}\n if equil.bdry_mode == \"lcfs\":\n s = FourierRZToroidalSurface(\n inputs[\"surface\"][:, 3],\n inputs[\"surface\"][:, 4],\n inputs[\"surface\"][:, 1:3].astype(int),\n inputs[\"surface\"][:, 1:3].astype(int),\n equil.NFP,\n equil.sym,\n )\n s.change_resolution(equil.M, equil.N)\n Rb_lmn, Zb_lmn = s.R_lmn, s.Z_lmn\n elif equil.bdry_mode == \"poincare\":\n s = ZernikeRZToroidalSection(\n inputs[\"surface\"][:, 3],\n inputs[\"surface\"][:, 4],\n inputs[\"surface\"][:, :2].astype(int),\n inputs[\"surface\"][:, :2].astype(int),\n equil.spectral_indexing,\n equil.sym,\n )\n s.change_resolution(equil.L, equil.M)\n Rb_lmn, Zb_lmn = s.R_lmn, s.Z_lmn\n\n p_l = np.zeros_like(equil.pressure.params)\n i_l = np.zeros_like(equil.iota.params)\n for l, p in inputs[\"pressure\"]:\n idx_p = np.where(equil.pressure.basis.modes[:, 0] == int(l))[0]\n p_l[idx_p] = p\n for l, i in inputs[\"iota\"]:\n idx_i = np.where(equil.iota.basis.modes[:, 0] == int(l))[0]\n i_l[idx_i] = i\n\n if not np.allclose(Rb_lmn, equil.Rb_lmn):\n deltas[\"dRb\"] = Rb_lmn - equil.Rb_lmn\n if not np.allclose(Zb_lmn, equil.Zb_lmn):\n deltas[\"dZb\"] = Zb_lmn - equil.Zb_lmn\n if not np.allclose(p_l, equil.p_l):\n deltas[\"dp\"] = p_l - equil.p_l\n if not np.allclose(i_l, equil.i_l):\n deltas[\"di\"] = i_l - equil.i_l\n if not np.allclose(inputs[\"Psi\"], equil.Psi):\n deltas[\"dPsi\"] = inputs[\"Psi\"] - equil.Psi\n return deltas\n\n def _print_iteration(self, ii, equil):\n print(\"================\")\n print(\"Step {}/{}\".format(ii + 1, len(self.inputs)))\n print(\"================\")\n equil.resolution_summary()\n print(\"Boundary ratio = {}\".format(self.inputs[ii][\"bdry_ratio\"]))\n print(\"Pressure ratio = {}\".format(self.inputs[ii][\"pres_ratio\"]))\n print(\"Perturbation Order = {}\".format(self.inputs[ii][\"pert_order\"]))\n print(\"Constraint: {}\".format(equil.objective.BC_constraint.name))\n print(\"Objective: {}\".format(equil.objective.name))\n print(\"Optimizer: {}\".format(equil.optimizer.method))\n print(\"Function tolerance = {}\".format(self.inputs[ii][\"ftol\"]))\n print(\"Gradient tolerance = {}\".format(self.inputs[ii][\"gtol\"]))\n print(\"State vector tolerance = {}\".format(self.inputs[ii][\"xtol\"]))\n print(\"Max function evaluations = {}\".format(self.inputs[ii][\"nfev\"]))\n print(\"================\")\n\n def solve_continuation(self, start_from=0, verbose=None, checkpoint_path=None):\n \"\"\"Solve for an equilibrium by continuation method.\n\n 1. Creates an initial guess from the given inputs\n 2. Find equilibrium flux surfaces by minimizing the objective function.\n 3. Step up to higher resolution and perturb the previous solution\n 4. Repeat 2 and 3 until at desired resolution\n\n Parameters\n ----------\n start_from : integer\n start solution from the given index\n verbose : integer\n * 0: no output\n * 1: summary of each iteration\n * 2: as above plus timing information\n * 3: as above plus detailed solver output\n checkpoint_path : str or path-like\n file to save checkpoint data (Default value = None)\n\n \"\"\"\n timer = Timer()\n if verbose is None:\n verbose = self.inputs[0][\"verbose\"]\n timer.start(\"Total time\")\n\n if (\n not (\n isalmostequal([inp[\"bdry_ratio\"] for inp in self.inputs])\n and isalmostequal([inp[\"pres_ratio\"] for inp in self.inputs])\n )\n and not use_jax\n ):\n warnings.warn(\n colored(\n \"Computing perturbations with finite differences can be \"\n + \"highly innacurate, consider using JAX or setting all \"\n + \"perturbation ratios to 1\",\n \"yellow\",\n )\n )\n\n for ii in range(start_from, len(self.inputs)):\n timer.start(\"Iteration {} total\".format(ii + 1))\n if ii == start_from:\n equil = self[ii]\n if verbose > 0:\n self._print_iteration(ii, equil)\n\n else:\n equil = self[ii - 1].copy()\n self.insert(ii, equil)\n # this is basically free if nothings actually changing, so we can call\n # it on each iteration\n equil.change_resolution(\n L=self.inputs[ii][\"L\"],\n M=self.inputs[ii][\"M\"],\n N=self.inputs[ii][\"N\"],\n L_grid=self.inputs[ii][\"L_grid\"],\n M_grid=self.inputs[ii][\"M_grid\"],\n N_grid=self.inputs[ii][\"N_grid\"],\n )\n if verbose > 0:\n self._print_iteration(ii, equil)\n\n # figure out if we we need perturbations\n deltas = self._format_deltas(self.inputs[ii], equil)\n\n if len(deltas) > 0:\n equil.build(verbose)\n if verbose > 0:\n print(\"Perturbing equilibrium\")\n\n equil.perturb(\n **deltas,\n order=self.inputs[ii][\"pert_order\"],\n verbose=verbose,\n copy=False,\n )\n\n if not equil.is_nested():\n warnings.warn(\n colored(\n \"WARNING: Flux surfaces are no longer nested, exiting early.\"\n + \"Consider taking smaller perturbation/resolution steps \"\n + \"or reducing trust radius\",\n \"yellow\",\n )\n )\n if checkpoint_path is not None:\n if verbose > 0:\n print(\"Saving latest state\")\n self.save(checkpoint_path)\n break\n\n # objective function\n objective = get_objective_function(\n self.inputs[ii][\"objective\"],\n R_transform=equil.transforms[\"R\"],\n Z_transform=equil.transforms[\"Z\"],\n L_transform=equil.transforms[\"L\"],\n p_profile=equil.pressure,\n i_profile=equil.iota,\n BC_constraint=equil.surface.get_constraint(\n R_basis=equil.R_basis, Z_basis=equil.Z_basis, L_basis=equil.L_basis\n ),\n use_jit=True,\n )\n # reuse old objective if possible to avoid recompiling\n if objective.eq(self[ii - 1].objective):\n equil.objective = self[ii - 1].objective\n else:\n equil.objective = objective\n\n # optimization algorithm\n optimizer = Optimizer(self.inputs[ii][\"optimizer\"])\n equil.optimizer = optimizer\n\n equil.solve(\n ftol=self.inputs[ii][\"ftol\"],\n xtol=self.inputs[ii][\"xtol\"],\n gtol=self.inputs[ii][\"gtol\"],\n verbose=verbose,\n maxiter=self.inputs[ii][\"nfev\"],\n )\n\n if checkpoint_path is not None:\n if verbose > 0:\n print(\"Saving latest iteration\")\n self.save(checkpoint_path)\n timer.stop(\"Iteration {} total\".format(ii + 1))\n if verbose > 1:\n timer.disp(\"Iteration {} total\".format(ii + 1))\n\n if not equil.is_nested():\n warnings.warn(\n colored(\n \"WARNING: Flux surfaces are no longer nested, exiting early.\"\n + \"Consider taking smaller perturbation/resolution steps \"\n + \"or reducing trust radius\",\n \"yellow\",\n )\n )\n break\n\n timer.stop(\"Total time\")\n print(\"====================\")\n print(\"Done\")\n if verbose > 1:\n timer.disp(\"Total time\")\n if checkpoint_path is not None:\n print(\"Output written to {}\".format(checkpoint_path))\n print(\"====================\")\n\n @property\n def equilibria(self):\n \"\"\"List of equilibria contained in the family (list).\"\"\"\n return self._equilibria\n\n @equilibria.setter\n def equilibria(self, equil):\n if isinstance(equil, tuple):\n equil = list(equil)\n elif isinstance(equil, np.ndarray):\n equil = equil.tolist()\n elif not isinstance(equil, list):\n equil = [equil]\n if not np.all([isinstance(eq, Equilibrium) for eq in equil]):\n raise ValueError(\n \"Members of EquilibriaFamily should be of type Equilibrium or subclass.\"\n )\n self._equilibria = list(equil)\n\n # dunder methods required by MutableSequence\n\n def __getitem__(self, i):\n return self._equilibria[i]\n\n def __setitem__(self, i, new_item):\n if not isinstance(new_item, Equilibrium):\n raise ValueError(\n \"Members of EquilibriaFamily should be of type Equilibrium or subclass.\"\n )\n self._equilibria[i] = new_item\n\n def __delitem__(self, i):\n del self._equilibria[i]\n\n def __len__(self):\n return len(self._equilibria)\n\n def insert(self, i, new_item):\n if not isinstance(new_item, Equilibrium):\n raise ValueError(\n \"Members of EquilibriaFamily should be of type Equilibrium or subclass.\"\n )\n self._equilibria.insert(i, new_item)\n"
] | [
[
"numpy.array",
"numpy.random.random",
"numpy.testing.assert_allclose"
],
[
"numpy.amax",
"numpy.sqrt",
"numpy.linspace",
"scipy.optimize.root",
"numpy.zeros_like",
"scipy.integrate.cumtrapz",
"numpy.where",
"numpy.swapaxes",
"numpy.pad",
"numpy.arange",
"numpy.sin",
"scipy.interpolate.CubicSpline",
"numpy.zeros",
"numpy.amin",
"numpy.floor",
"numpy.array",
"numpy.abs",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.ones"
],
[
"numpy.copy",
"numpy.zeros_like",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Sage-Bionetworks/Genie | [
"ce70861b0d3717cd5b57a393a16b4d6fea9500f3"
] | [
"genie/dashboard_table_updater.py"
] | [
"\"\"\"Updates dashboard tables\"\"\"\nimport argparse\nimport datetime\nimport logging\nimport os\n\nimport pandas as pd\nimport synapseclient\nfrom synapseclient.core.utils import to_unix_epoch_time\n\nfrom genie import process_functions\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_center_data_completion(center, df):\n \"\"\"\n Gets center data completion. Calulates the percentile of\n how complete a clinical data element is:\n Number of not blank/Unknown/NA divded by\n total number of patients or samples\n\n Args:\n center: GENIE center\n df: sample or patient dataframe\n\n Returns:\n Dataframe: Center data\n \"\"\"\n centerdf = df[df['CENTER'] == center]\n total = len(centerdf)\n center_data = pd.DataFrame()\n skip_cols = ['CENTER', 'PATIENT_ID', 'SAMPLE_ID', 'SAMPLE_TYPE_DETAILED',\n 'SECONDARY_RACE', 'TERTIARY_RACE']\n for col in centerdf:\n if col not in skip_cols:\n not_missing = [not pd.isnull(value) and value != 'Not Collected'\n for value in centerdf[col]]\n completeness = float(sum(not_missing)) / int(total)\n returned = pd.DataFrame([[col, center, total, completeness]])\n center_data = center_data.append(returned)\n return center_data\n\n\ndef update_samples_in_release_table(syn, file_mapping, release,\n samples_in_release_synid):\n '''\n Updates the sample in release table\n This tracks the samples of each release. 1 means it exists, and 0\n means it doesn't\n\n Args:\n syn: synapse object\n file_mapping: file mapping generated from file mapping function\n release: GENIE release number (ie. 5.3-consortium)\n samples_in_release_synid: Synapse Id of 'samples in release' Table\n '''\n clinical_ent = syn.get(file_mapping['clinical'], followLink=True)\n clinicaldf = pd.read_csv(clinical_ent.path, sep=\"\\t\", comment=\"#\")\n cols = [i['name'] for i in\n list(syn.getTableColumns(samples_in_release_synid))]\n\n if release not in cols:\n schema = syn.get(samples_in_release_synid)\n syn_col = synapseclient.Column(name=release, columnType='INTEGER',\n defaultValue=0)\n new_column = syn.store(syn_col)\n schema.addColumn(new_column)\n schema = syn.store(schema)\n # Columns of samples in release\n samples_per_release = syn.tableQuery(\n 'SELECT SAMPLE_ID, \"{}\" FROM {}'.format(release,\n samples_in_release_synid))\n\n samples_per_releasedf = samples_per_release.asDataFrame()\n new_samples = clinicaldf[['SAMPLE_ID']][\n ~clinicaldf.SAMPLE_ID.isin(samples_per_releasedf.SAMPLE_ID)]\n\n new_samples[release] = 1\n old_samples = clinicaldf[['SAMPLE_ID']][\n clinicaldf.SAMPLE_ID.isin(samples_per_releasedf.SAMPLE_ID)]\n\n old_samples[release] = 1\n samples_in_releasedf = new_samples.append(old_samples)\n process_functions.updateDatabase(syn, samples_per_releasedf,\n samples_in_releasedf,\n samples_in_release_synid, [\"SAMPLE_ID\"])\n\n\ndef update_cumulative_sample_table(syn, file_mapping, release,\n cumulative_sample_count_synid):\n '''\n Consortium release sample count table update function\n This gets the cumulative sample count of each file type in each release\n\n Args:\n syn: synapse object\n file_mapping: file mapping generated from file mapping function\n release: GENIE release number (ie. 5.3-consortium)\n cumulative_sample_count_synid: Synapse Id of\n 'Cumulative sample count' Table\n '''\n\n sample_count_per_round = syn.tableQuery(\n \"SELECT * FROM {} where Release = '{}'\".format(cumulative_sample_count_synid,\n release))\n sample_count_per_rounddf = sample_count_per_round.asDataFrame()\n\n clinical_ent = syn.get(file_mapping['clinical'], followLink=True)\n clinicaldf = pd.read_csv(clinical_ent.path, sep=\"\\t\", comment=\"#\")\n clinicaldf.columns = [i.upper() for i in clinicaldf.columns]\n if clinicaldf.get(\"CENTER\") is None:\n clinicaldf['CENTER'] = \\\n [sample.split(\"-\")[1] for sample in clinicaldf.SAMPLE_ID]\n clinical_counts = clinicaldf['CENTER'].value_counts()\n clinical_counts['Total'] = sum(clinical_counts)\n clinical_counts.name = \"Clinical\"\n\n fusion_ent = syn.get(file_mapping['fusion'], followLink=True)\n fusiondf = pd.read_csv(fusion_ent.path, sep=\"\\t\", comment=\"#\")\n fusiondf.columns = [i.upper() for i in fusiondf.columns]\n fusion_counts = fusiondf['CENTER'][\n ~fusiondf['TUMOR_SAMPLE_BARCODE'].duplicated()].value_counts()\n fusion_counts['Total'] = sum(fusion_counts)\n\n cna_ent = syn.get(file_mapping['cna'], followLink=True)\n cnadf = pd.read_csv(cna_ent.path, sep=\"\\t\", comment=\"#\")\n cna_counts = pd.Series(\n [i.split(\"-\")[1] for i in cnadf.columns[1:]]).value_counts()\n cna_counts['Total'] = sum(cna_counts)\n\n seg_ent = syn.get(file_mapping['seg'], followLink=True)\n segdf = pd.read_csv(seg_ent.path, sep=\"\\t\", comment=\"#\")\n segdf.columns = [i.upper() for i in segdf.columns]\n\n segdf['CENTER'] = [i.split(\"-\")[1] for i in segdf['ID']]\n seg_counts = segdf['CENTER'][~segdf['ID'].duplicated()].value_counts()\n seg_counts['Total'] = sum(seg_counts)\n\n total_counts = pd.DataFrame(clinical_counts)\n total_counts['Fusions'] = fusion_counts\n total_counts['CNV'] = cna_counts\n total_counts['Mutation'] = clinical_counts\n total_counts['SEG'] = seg_counts\n total_counts = total_counts.fillna(0)\n total_counts = total_counts.applymap(int)\n total_counts['Center'] = total_counts.index\n total_counts['Release'] = release\n process_functions.updateDatabase(syn, sample_count_per_rounddf,\n total_counts,\n cumulative_sample_count_synid,\n [\"Center\", \"Release\"],\n to_delete=True)\n\n\ndef get_file_mapping(syn, release_folder_synid):\n \"\"\"\n Get file mapping between important files needed for dashboard and\n their synapse ids\n\n Args:\n syn: synapse object\n release_folder_synid: synapse id of release\n\n \"\"\"\n files = syn.getChildren(release_folder_synid)\n file_mapping = dict()\n for metadata in files:\n filename = metadata['name']\n synid = metadata['id']\n if not filename.startswith(\"meta\"):\n if filename.startswith(\"data_clinical_sample\"):\n file_mapping['clinical'] = synid\n elif filename.endswith(\"fusions.txt\"):\n file_mapping['fusion'] = synid\n elif filename.endswith(\"CNA.txt\"):\n file_mapping['cna'] = synid\n elif filename.endswith(\".seg\"):\n file_mapping['seg'] = synid\n return file_mapping\n\n\ndef update_release_numbers(syn, database_mappingdf, release=None):\n \"\"\"\n Updates all release dashboard numbers or specific release number\n\n Args:\n syn: synapse object\n database_mappingdf: mapping between synapse ids and database\n release: GENIE release (ie. 5.3-consortium). Defaults to None\n \"\"\"\n # Update release table with current release or all releases\n samples_in_release_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'samplesInRelease'].values[0]\n cumulative_sample_count_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'cumulativeSampleCount'].values[0]\n\n release_folder_fileview_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'releaseFolder'].values[0]\n release_folder = syn.tableQuery(\n \"select id,name from %s\" % release_folder_fileview_synid +\n \" where name not like 'Release%' and name <> 'case_lists' and \" +\n \"name not like '%.0.%'\")\n release_folderdf = release_folder.asDataFrame()\n\n for rel_synid, rel_name in zip(release_folderdf.id, release_folderdf.name):\n file_mapping = get_file_mapping(syn, rel_synid)\n # If release is specified, only process on that,\n # otherwise process for all\n if release is None or release == rel_name:\n update_samples_in_release_table(\n syn, file_mapping, rel_name, samples_in_release_synid)\n update_cumulative_sample_table(\n syn, file_mapping, rel_name, cumulative_sample_count_synid)\n else:\n pass\n\n\ndef update_database_numbers(syn, database_mappingdf):\n \"\"\"\n Updates database cumulative numbers (Only called when not staging)\n\n Args:\n syn: synapse object\n database_mappingdf: mapping between synapse ids and database\n \"\"\"\n cumulative_sample_count_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'cumulativeSampleCount'].values[0]\n # Database\n database_count = syn.tableQuery(\n \"SELECT * FROM %s where Release = 'Database'\" %\n cumulative_sample_count_synid)\n database_countdf = database_count.asDataFrame()\n clinical = syn.tableQuery('select CENTER from syn7517674')\n clinicaldf = clinical.asDataFrame()\n clinincal_counts = clinicaldf['CENTER'].value_counts()\n clinincal_counts['Total'] = sum(clinincal_counts)\n clinincal_counts.name = \"Clinical\"\n\n fusion = syn.tableQuery('select * from syn7893268')\n fusiondf = fusion.asDataFrame()\n fusion_counts = fusiondf['CENTER'][\n ~fusiondf['TUMOR_SAMPLE_BARCODE'].duplicated()].value_counts()\n fusion_counts['Total'] = sum(fusion_counts)\n\n center_flat_files = syn.getChildren(\"syn12278118\")\n cna_file_paths = [syn.get(file['id']).path for file in center_flat_files if\n file['name'].startswith(\"data_CNA\")]\n cna_numbers = {}\n for cna_file in cna_file_paths:\n center = os.path.basename(cna_file).replace(\".txt\", \"\").split(\"_\")[2]\n with open(cna_file, 'r') as cna:\n header = cna.readline()\n samples = header.split(\"\\t\")\n # Minus one because of Hugo_Symbol\n cna_numbers[center] = len(samples) - 1\n cna_counts = pd.Series(cna_numbers)\n cna_counts['Total'] = sum(cna_counts)\n\n seg = syn.tableQuery('select * from syn7893341')\n segdf = seg.asDataFrame()\n seg_counts = segdf['CENTER'][~segdf['ID'].duplicated()].value_counts()\n seg_counts['Total'] = sum(seg_counts)\n\n db_counts = pd.DataFrame(clinincal_counts)\n db_counts['Fusions'] = fusion_counts\n db_counts['CNV'] = cna_counts\n db_counts['Mutation'] = clinincal_counts\n db_counts['SEG'] = seg_counts\n db_counts = db_counts.fillna(0)\n db_counts = db_counts.applymap(int)\n db_counts['Center'] = db_counts.index\n db_counts['Release'] = \"Database\"\n process_functions.updateDatabase(syn, database_countdf, db_counts,\n cumulative_sample_count_synid,\n [\"Center\", \"Release\"])\n today = datetime.date.today()\n if today.month in [1, 4, 8, 12]:\n db_count_tracker = db_counts[['Clinical', 'Center', 'Release']]\n db_count_tracker.rename(\n columns={'Clinical': 'sample_count',\n 'Center': 'center',\n 'Release': 'date'},\n inplace=True)\n db_count_tracker['date'] = today.strftime(\"%b-%Y\")\n # Hard coded syn id\n syn.store(synapseclient.Table(\"syn18404852\", db_count_tracker))\n\n\ndef update_oncotree_code_tables(syn, database_mappingdf):\n \"\"\"\n Updates database statistics of oncotree codes\n and primary onocotree codes\n\n Args:\n syn: synapse object\n database_mappingdf: mapping between synapse ids and database\n \"\"\"\n oncotree_distribution_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'oncotree'].values[0]\n\n clinical = syn.tableQuery('select * from syn7517674')\n clinicaldf = clinical.asDataFrame()\n\n # DISTRIBUTION OF ONCOTREE CODE TABLE UPDATE\n oncotree_code_distributiondf = pd.DataFrame(\n columns=set(clinicaldf['CENTER']),\n index=set(clinicaldf['ONCOTREE_CODE']))\n for center in oncotree_code_distributiondf.columns:\n onc_counts = clinicaldf['ONCOTREE_CODE'][\n clinicaldf['CENTER'] == center].value_counts()\n oncotree_code_distributiondf[center] = onc_counts\n oncotree_code_distributiondf = oncotree_code_distributiondf.fillna(0)\n oncotree_code_distributiondf = oncotree_code_distributiondf.applymap(int)\n oncotree_code_distributiondf['Total'] = \\\n oncotree_code_distributiondf.apply(sum, axis=1)\n oncotree_code_distributiondf['Oncotree_Code'] = \\\n oncotree_code_distributiondf.index\n\n oncotree_distribution_db = syn.tableQuery(\n 'SELECT %s FROM %s' %\n (\"Oncotree_Code,\" + \",\".join(clinicaldf['CENTER'].unique()) +\n \",Total\", oncotree_distribution_synid))\n\n oncotree_distribution_dbdf = oncotree_distribution_db.asDataFrame()\n process_functions.updateDatabase(syn, oncotree_distribution_dbdf,\n oncotree_code_distributiondf,\n oncotree_distribution_synid,\n [\"Oncotree_Code\"], to_delete=True)\n\n # DISTRIBUTION OF PRIMARY CODE TABLE UPDATE\n oncotree_link_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'oncotreeLink'].values[0]\n primary_code_synId = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'primaryCode'].values[0]\n\n # Can also use most up to date oncotree code,\n # because these tables are updated from the database\n oncotree_link_ent = syn.get(oncotree_link_synid)\n oncotree_link = oncotree_link_ent.externalURL\n oncotree_mapping = \\\n process_functions.get_oncotree_code_mappings(oncotree_link)\n\n clinicaldf['PRIMARY_CODES'] = \\\n [oncotree_mapping[i.upper()]['ONCOTREE_PRIMARY_NODE']\n if i.upper() in oncotree_mapping.keys() else 'DEPRECATED_CODE'\n for i in clinicaldf.ONCOTREE_CODE]\n\n # ### DISTRIBUTION OF PRIMARY ONCOTREE CODE TABLE UPDATE\n primary_code_distributiondf = pd.DataFrame(\n columns=set(clinicaldf['CENTER']),\n index=set(clinicaldf['PRIMARY_CODES']))\n\n for center in primary_code_distributiondf.columns:\n onc_counts = clinicaldf['PRIMARY_CODES'][\n clinicaldf['CENTER'] == center].value_counts()\n primary_code_distributiondf[center] = onc_counts\n primary_code_distributiondf = primary_code_distributiondf.fillna(0)\n primary_code_distributiondf = primary_code_distributiondf.applymap(int)\n primary_code_distributiondf['Total'] = \\\n primary_code_distributiondf.apply(sum, axis=1)\n primary_code_distributiondf['Oncotree_Code'] = \\\n primary_code_distributiondf.index\n\n primary_code_dist_db = syn.tableQuery(\n 'SELECT %s FROM %s' %\n (\"Oncotree_Code,\" + \",\".join(clinicaldf['CENTER'].unique()) +\n \",Total\", primary_code_synId))\n\n primary_code_dist_dbdf = primary_code_dist_db.asDataFrame()\n process_functions.updateDatabase(syn, primary_code_dist_dbdf,\n primary_code_distributiondf,\n primary_code_synId, [\"Oncotree_Code\"],\n to_delete=True)\n\n\ndef update_sample_difference_table(syn, database_mappingdf):\n \"\"\"\n Updates sample difference table between\n consortium releases\n\n Args:\n syn: synapse object\n database_mappingdf: mapping between synapse ids and database\n \"\"\"\n cumulative_sample_count_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'cumulativeSampleCount'].values[0]\n\n sample_diff_count_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'sampleDiffCount'].values[0]\n\n # UPDATE DIFF TABLE\n sample_count_per_round = syn.tableQuery(\n \"SELECT * FROM %s where Center <> 'Total' and Release <> 'Database'\"\n % cumulative_sample_count_synid)\n\n sample_count_per_rounddf = sample_count_per_round.asDataFrame()\n releases = list(sample_count_per_rounddf['Release'].unique())\n # sort the releases and remove public releases\n releases.sort()\n consortium_releases = [\n release for release in releases if \"public\" not in release\n and \".0.\" not in release]\n\n diff_between_releasesdf = sample_count_per_rounddf[\n sample_count_per_rounddf['Release'] == consortium_releases[0]]\n\n for index, release_name in enumerate(consortium_releases[1:]):\n prior_release = sample_count_per_rounddf[\n sample_count_per_rounddf['Release'] == consortium_releases[index]]\n\n current_release = sample_count_per_rounddf[\n sample_count_per_rounddf['Release'] == release_name]\n\n prior_release.index = prior_release['Center']\n current_release.index = current_release['Center']\n\n del prior_release['Center']\n del prior_release['Release']\n del current_release['Center']\n del current_release['Release']\n # Append new rows of centers that are new and\n # just added to the releases\n new_centers = current_release.index[\n ~current_release.index.isin(prior_release.index)]\n\n if not new_centers.empty:\n prior_release = prior_release.append(\n pd.DataFrame(index=new_centers))\n prior_release = prior_release.fillna(0)\n difference = current_release - prior_release\n difference['Center'] = difference.index\n difference['Release'] = release_name\n diff_between_releasesdf = diff_between_releasesdf.append(difference)\n\n difftable_db = syn.tableQuery('SELECT * FROM %s' % sample_diff_count_synid)\n difftable_dbdf = difftable_db.asDataFrame()\n difftable_dbdf = difftable_dbdf.fillna(0)\n new_values = diff_between_releasesdf[[\n 'Clinical', 'Mutation',\n 'CNV', 'SEG', 'Fusions']].fillna(0).applymap(int)\n\n diff_between_releasesdf[\n ['Clinical', 'Mutation', 'CNV', 'SEG', 'Fusions']] = new_values\n\n process_functions.updateDatabase(syn, difftable_dbdf,\n diff_between_releasesdf,\n sample_diff_count_synid,\n [\"Center\", \"Release\"],\n to_delete=True)\n\n\ndef update_data_completeness_table(syn, database_mappingdf):\n \"\"\"\n Updates the data completeness of the database\n\n Args:\n syn: synapse object\n database_mappingdf: mapping between synapse ids and database\n \"\"\"\n data_completion_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'dataCompletion'].values[0]\n\n sample = syn.tableQuery('select * from syn7517674')\n sampledf = sample.asDataFrame()\n patient = syn.tableQuery('select * from syn7517669')\n patientdf = patient.asDataFrame()\n\n data_completenessdf = pd.DataFrame()\n center_infos = sampledf.CENTER.drop_duplicates().apply(\n lambda center: get_center_data_completion(center, sampledf))\n for center_info in center_infos:\n data_completenessdf = data_completenessdf.append(center_info)\n\n center_infos = patientdf.CENTER.drop_duplicates().apply(\n lambda center: get_center_data_completion(center, patientdf))\n for center_info in center_infos:\n data_completenessdf = data_completenessdf.append(center_info)\n\n data_completeness_db = syn.tableQuery(\n 'select * from %s' % data_completion_synid)\n data_completeness_dbdf = data_completeness_db.asDataFrame()\n data_completenessdf.columns = data_completeness_dbdf.columns\n process_functions.updateDatabase(syn, data_completeness_dbdf,\n data_completenessdf,\n data_completion_synid,\n [\"FIELD\", \"CENTER\"],\n to_delete=True)\n\n\ndef update_wiki(syn, database_mappingdf):\n \"\"\"\n Updates the GENIE project dashboard wiki timestamp\n\n Args:\n syn: synapse object\n database_mappingdf: mapping between synapse ids and database\n\n \"\"\"\n # Updates to query and date dashboard was updated\n cumulative_sample_count_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'cumulativeSampleCount'].values[0]\n\n primary_code_synId = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'primaryCode'].values[0]\n\n centers = syn.tableQuery(\n 'select distinct(CENTER) as CENTER from syn7517674')\n\n centersdf = centers.asDataFrame()\n now = datetime.datetime.now()\n markdown = \\\n [\"_Updated {month}/{day}/{year}_\\n\\n\".format(\n month=now.month,\n day=now.day,\n year=now.year),\n \"##Count of Clinical Samples\\n\",\n \"${synapsetable?query=SELECT Center%2C Clinical%2C Release FROM \" +\n cumulative_sample_count_synid + \"}\\n\\n\",\n \"\\n\\n##Primary Oncotree Codes\\n\\n\",\n \"${synapsetable?query=SELECT Oncotree%5FCode%2C \" +\n \"%2C \".join(centersdf['CENTER'].unique()) +\n \"%2C Total FROM \" + primary_code_synId +\n \" ORDER BY Total DESC&limit=15}\\n\\n\"]\n\n wikipage = syn.getWiki(\"syn3380222\", 235803)\n wikipage.markdown = \"\".join(markdown)\n syn.store(wikipage)\n\n\ndef string_to_unix_epoch_time_milliseconds(string_time):\n \"\"\"\n Takes dates in this format: 2018-10-25T20:16:07.959Z\n and turns it into unix epoch time in milliseconds\n\n Args:\n string_time: string in this format: 2018-10-25T20:16:07.959Z\n\n Returns:\n unix epoch time in milliseconds\n \"\"\"\n datetime_obj = datetime.datetime.strptime(\n string_time.split(\".\")[0], \"%Y-%m-%dT%H:%M:%S\")\n return to_unix_epoch_time(datetime_obj)\n\n\ndef update_data_release_file_table(syn, database_mappingdf):\n \"\"\"\n Updates data release file table\n\n Args:\n syn: synapse object\n database_mappingdf: mapping between synapse ids and database\n \"\"\"\n release_folder_fileview_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'releaseFolder'].values[0]\n release_folder = syn.tableQuery(\n \"select id,name from %s\" % release_folder_fileview_synid +\n \" where name not like 'Release%' and name <> 'case_lists' \" +\n \"and name not like '0.%'\")\n release_folderdf = release_folder.asDataFrame()\n\n data_release_table_synid = \"syn16804261\"\n data_release_table = syn.tableQuery(\n \"select * from %s\" % data_release_table_synid)\n data_release_tabledf = data_release_table.asDataFrame()\n\n not_in_release_tabledf = release_folderdf[\n ~release_folderdf.name.isin(data_release_tabledf.release)]\n\n for synid, name in zip(not_in_release_tabledf.id,\n not_in_release_tabledf.name):\n release_files = syn.getChildren(synid)\n\n append_rows = [\n [release_file['name'],\n release_file['id'],\n name,\n string_to_unix_epoch_time_milliseconds(\n release_file['modifiedOn']), synid]\n for release_file in release_files\n if release_file['name'] != \"case_lists\"]\n\n syn.store(synapseclient.Table(data_release_table_synid, append_rows))\n\n\ndef check_column_decreases(currentdf, olderdf):\n \"\"\"\n Checks entity decreases\n\n Args:\n current_ent: Current entity dataframe\n old_ent: Older entity dataframe\n\n Returns:\n Differences in values\n \"\"\"\n diff_map = dict()\n for col in currentdf:\n new_counts = currentdf[col].value_counts()\n if olderdf.get(col) is not None:\n old_counts = olderdf[col].value_counts()\n # Make sure any values that exist in the new get added\n # to the old to show the decrease\n new_keys = pd.Series(index=new_counts.keys()[\n ~new_counts.keys().isin(old_counts.keys())])\n old_counts = old_counts.add(new_keys, fill_value=0)\n old_counts.fillna(0, inplace=True)\n # Make sure any values that don't exist in the old get added\n # to show the decrease\n new_keys = pd.Series(index=old_counts.keys()[\n ~old_counts.keys().isin(new_counts.keys())])\n new_counts = new_counts.add(new_keys, fill_value=0)\n new_counts.fillna(0, inplace=True)\n if any(new_counts - old_counts < 0):\n logger.info(\"\\tDECREASE IN COLUMN: %s\" % col)\n # diff = new_counts[new_counts - old_counts < 0]\n diffs = new_counts-old_counts\n diffstext = diffs[diffs < 0].to_csv().replace(\"\\n\", \"; \")\n logger.info(\"\\t\" + diffstext)\n diff_map[col] = True\n else:\n diff_map[col] = False\n return diff_map\n\n\ndef print_clinical_values_difference_table(syn, database_mappingdf):\n \"\"\"\n Checks for a decrease in values in the clinical file\n from last consortium release to most recent consortium release\n\n Args:\n syn: synapse object\n database_mappingdf: mapping between synapse ids and database\n \"\"\"\n release_folder_fileview_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'releaseFolder'].values[0]\n\n clinical_key_decrease_synid = database_mappingdf['Id'][\n database_mappingdf['Database'] == 'clinicalKeyDecrease'].values[0]\n\n release_folder = syn.tableQuery(\n f\"select id,name from {release_folder_fileview_synid} \"\n \"where name not like 'Release%' and name <> 'case_lists' \"\n \"and name not like '%.0.%' and name not like '%-public' \"\n \"and name <> 'potential_artifacts'\"\n )\n\n release_folderdf = release_folder.asDataFrame()\n # Set release number as a numerical value since string \"10\" < \"9\"\n # Also can't set by created on date, because sometimes\n # there are patch releases\n release_folderdf['num_release'] = [\n float(name.replace(\".0\", \"\").replace(\"-consortium\", \"\"))\n for name in release_folderdf['name']\n ]\n release_folderdf.sort_values(\"num_release\", ascending=False, inplace=True)\n current_release = release_folderdf['id'][0]\n older_release = release_folderdf['id'][1]\n\n current_release_files = syn.getChildren(current_release)\n current_clinical_synids = {\n file['name']: file['id']\n for file in current_release_files if file['name'] in\n ['data_clinical_sample.txt', 'data_clinical_patient.txt']}\n\n older_release_files = syn.getChildren(older_release)\n\n older_clinical_synids = {\n file['name']: file['id']\n for file in older_release_files if file['name'] in\n ['data_clinical_sample.txt', 'data_clinical_patient.txt']}\n\n current_sample_ent = syn.get(\n current_clinical_synids['data_clinical_sample.txt'], followLink=True)\n\n older_sample_ent = syn.get(\n older_clinical_synids['data_clinical_sample.txt'], followLink=True)\n current_sampledf = pd.read_csv(\n current_sample_ent.path, sep=\"\\t\", comment=\"#\")\n\n current_sampledf['CENTER'] = [\n patient.split(\"-\")[1] for patient in current_sampledf['PATIENT_ID']]\n\n older_sampledf = pd.read_csv(older_sample_ent.path, sep=\"\\t\", comment=\"#\")\n older_sampledf['CENTER'] = [\n patient.split(\"-\")[1] for patient in older_sampledf['PATIENT_ID']]\n # Rather than take the CENTER, must take the SAMPLE_ID to compare\n current_sampledf = current_sampledf[current_sampledf['SAMPLE_ID'].isin(\n older_sampledf['SAMPLE_ID'].unique())]\n\n logger.info(\"SAMPLE CLINICAL VALUE DECREASES\")\n center_decrease_mapping = dict()\n for center in older_sampledf['CENTER'].unique():\n current_center_sampledf = current_sampledf[\n current_sampledf['CENTER'] == center]\n\n older_center_sampledf = older_sampledf[\n older_sampledf['CENTER'] == center]\n\n logger.info(center)\n\n decrease_map = check_column_decreases(\n current_center_sampledf, older_center_sampledf)\n center_decrease_mapping[center] = decrease_map\n\n current_patient_ent = syn.get(\n current_clinical_synids['data_clinical_patient.txt'], followLink=True)\n\n older_patient_ent = syn.get(\n older_clinical_synids['data_clinical_patient.txt'], followLink=True)\n\n current_patientdf = pd.read_csv(\n current_patient_ent.path, sep=\"\\t\", comment=\"#\")\n\n older_patientdf = pd.read_csv(\n older_patient_ent.path, sep=\"\\t\", comment=\"#\")\n # Rather than take the CENTER, must take the PATIENT_ID to compare\n current_patientdf = current_patientdf[current_patientdf['PATIENT_ID'].isin(\n older_patientdf['PATIENT_ID'].unique())]\n\n logger.info(\"PATIENT CLINICAL VALUE DECREASES\")\n for center in older_patientdf['CENTER'].unique():\n current_center_patientdf = current_patientdf[\n current_patientdf['CENTER'] == center]\n\n older_center_patientdf = older_patientdf[\n older_patientdf['CENTER'] == center]\n\n logger.info(center)\n patient_decrease_map = check_column_decreases(\n current_center_patientdf, older_center_patientdf)\n\n center_decrease_mapping[center].update(patient_decrease_map)\n\n center_decrease_mapping = pd.DataFrame(center_decrease_mapping)\n center_decrease_mapping = center_decrease_mapping.transpose()\n center_decrease_mapping['CENTER'] = center_decrease_mapping.index\n\n clinical_key_decrease = syn.tableQuery(\"select * from {0}\".format(\n clinical_key_decrease_synid))\n clinical_key_decreasedbdf = clinical_key_decrease.asDataFrame()\n process_functions.updateDatabase(syn, clinical_key_decreasedbdf,\n center_decrease_mapping,\n clinical_key_decrease_synid, [\"CENTER\"],\n to_delete=True)\n\n\ndef run_dashboard(syn, database_mappingdf, release, staging=False,\n public=False):\n \"\"\"\n Runs the dashboard scripts\n\n Args:\n syn: synapse object\n database_mappingdf: mapping between synapse ids and database\n release: GENIE release (ie. 5.3-consortium)\n\n \"\"\"\n update_release_numbers(syn, database_mappingdf, release=release)\n\n if not staging:\n update_data_release_file_table(syn, database_mappingdf)\n if not public:\n print_clinical_values_difference_table(syn, database_mappingdf)\n update_sample_difference_table(syn, database_mappingdf)\n update_data_completeness_table(syn, database_mappingdf)\n update_database_numbers(syn, database_mappingdf)\n update_oncotree_code_tables(syn, database_mappingdf)\n update_wiki(syn, database_mappingdf)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Update dashboard tables')\n\n parser.add_argument(\n '--release',\n help=\"GENIE release number (ie. 5.3-consortium)\",\n default=None)\n\n parser.add_argument(\n \"--pem_file\",\n type=str,\n help=\"Path to PEM file (genie.pem)\")\n\n parser.add_argument(\n \"--staging\",\n action='store_true',\n help=\"Using staging directory files\")\n\n parser.add_argument(\n \"--debug\",\n action='store_true',\n help=\"Synapse debugging flag\")\n\n parser.add_argument(\n \"--public\",\n action='store_true',\n help=\"Set true if releasing public release\")\n\n args = parser.parse_args()\n syn = process_functions.synLogin(args)\n if args.staging:\n # Database to Synapse Id mapping Table\n database_mapping_synid = 'syn12094210'\n else:\n database_mapping_synid = 'syn10967259'\n\n database_mapping = syn.tableQuery(\n 'select * from %s' % database_mapping_synid)\n database_mappingdf = database_mapping.asDataFrame()\n\n run_dashboard(syn, database_mappingdf, args.release,\n staging=args.staging, public=args.public)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.isnull",
"pandas.read_csv",
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
shihchengli/APE | [
"c2f529b9e20959824317dbc3c018ce41702d67f6",
"c2f529b9e20959824317dbc3c018ce41702d67f6"
] | [
"ape/OptimalVibrations.py",
"ape/intcoords/update.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"\nA module to find the optimizing vibrational coordinates to reduce intermode coupling\n\"\"\"\n\nimport os\nimport time\nimport logging\nimport numpy as np\nfrom copy import deepcopy\nfrom scipy import optimize\nfrom numba import jit\n\nimport rmgpy.constants as constants\n\nfrom ape.job.job import Job\nfrom ape.qchem import QChemLog\nfrom ape.exceptions import InputError, ConvergeError\nfrom ape.common import diagonalize_projected_hessian\nfrom ape.intcoords.InternalCoordinates import getXYZ\nfrom ape.intcoords.constants import BOHR2ANG\n\nclass OptVib(object):\n def __init__(self, symbols, nmode, coordinate_system, cart_coords, internal_object, conformer, hessian, linearity, n_vib, rotors, label, path, ncpus, \n charge=None, multiplicity=None, rem_variables_dict=None, gen_basis=\"\", is_QM_MM_INTERFACE=None, QM_USER_CONNECT=None, QM_ATOMS=None,\n ISOTOPE=None, force_field_params=None, fixed_molecule_string=None, opt=None):\n self.symbols = symbols\n self.nmode = nmode\n self.coordinate_system = coordinate_system\n self.cart_coords = cart_coords\n self.internal_object = internal_object\n self.conformer = conformer\n self.hessian = hessian\n self.linearity = linearity\n self.n_vib = n_vib\n self.rotors = rotors\n self.label = label\n self.path = path\n self.ncpus = ncpus\n self.charge = charge\n self.multiplicity = multiplicity\n self.rem_variables_dict = rem_variables_dict\n self.gen_basis = gen_basis\n self.n_rotors = len(self.rotors)\n\n # QMMM parameters\n self.is_QM_MM_INTERFACE = is_QM_MM_INTERFACE\n self.QM_USER_CONNECT = QM_USER_CONNECT\n self.QM_ATOMS = QM_ATOMS\n self.ISOTOPE = ISOTOPE\n self.force_field_params = force_field_params\n self.fixed_molecule_string = fixed_molecule_string\n self.opt = opt\n \n def get_optvib(self):\n \"\"\"\n Algorithms for local and optimal vibrations.\n \"\"\"\n logging.info('{0} modes of {1} finding...'.format(self.coordinate_system, self.label))\n self.grid_of_hessians = self.get_grid_of_hessians()\n self.mwv = diagonalize_projected_hessian(self.conformer, self.hessian, self.linearity, self.n_vib, \n self.rotors, get_weighted_vectors=True, label=self.label)\n\n # random rotations by 1° over all pairs of normal modes to break symmetry\n num = int(self.n_vib * (self.n_vib - 1) / 2) # 2-combination of self.n_vib\n angles = np.zeros(num)\n ind = 0\n for i in range(self.n_vib):\n new_raw = True\n for j in range(self.n_vib):\n if i < j:\n if (i + 1) % 2 == 1 and new_raw:\n angles[ind] = 1 / 180 * np.pi\n new_raw = False\n else:\n angles[ind] = 0\n ind += 1\n Ui = self.U(angles)\n self.mwv = self.mwv.T.dot(Ui)\n\n # Do Jacobi sweeps over all pairs of modes\n logging.info('-------------------------------------------------------------------------------------------------------------------')\n logging.info(' Jacobi sweeps ')\n logging.info('-------------------------------------------------------------------------------------------------------------------')\n self.Jacobi_sweeps()\n\n # Calculate anharmonic frequencies\n mwv = self.mwv\n H = mwv.T.dot(self.grid_of_hessians[0]).dot(mwv)\n vib_freq = []\n for i in range(self.n_vib):\n freq = np.sqrt(H[i][i]) / (2 * np.pi * constants.c * 100)\n vib_freq.append(freq)\n \n # Calculate optimal coordinates in terms of not mass-weighted cartesian coordinate\n mass = self.conformer.mass.value_si\n mass_3N_array = np.array([i for i in mass for j in range(3)])\n mass_mat = np.diag(mass_3N_array)\n inv_sq_mass_mat = np.linalg.inv(mass_mat ** 0.5)\n unweighted_v = np.matmul(inv_sq_mass_mat, mwv).T\n\n # Sort anharmonic frequencies in ascending order\n unweighted_v = [v for _, v in sorted(zip(vib_freq, unweighted_v))]\n vib_freq = sorted(vib_freq)\n\n return vib_freq, unweighted_v\n\n def get_grid_of_hessians(self):\n \"\"\"\n Hessians are generated on a grid of one point per vibrational mode.\n \"\"\"\n logging.info('A grid of Hessians generating...\\n')\n vib_freq, unweighted_v = diagonalize_projected_hessian(self.conformer, self.hessian, self.linearity, self.n_vib, self.rotors, label=self.label)\n grid_of_hessians = {}\n fm = diagonalize_projected_hessian(self.conformer, self.hessian, self.linearity, self.n_vib, self.rotors, get_mass_weighted_hessian=True, label=self.label)\n grid_of_hessians[0] = deepcopy(fm)\n for i in range(self.nmode):\n if i in range(self.n_rotors): continue\n logging.info('Sampling Mode {mode}'.format(mode=i+1))\n mode = i + 1\n vector = unweighted_v[i - self.n_rotors]\n freq = vib_freq[i - self.n_rotors]\n magnitude = np.linalg.norm(vector)\n reduced_mass = magnitude ** -2 / constants.amu # in amu\n step_size = np.sqrt(constants.hbar / (reduced_mass * constants.amu) / (freq * 2 * np.pi * constants.c * 100)) * 10 ** 10 # in angstrom\n normalizes_vector = vector / magnitude\n qj = np.matmul(self.internal_object.B, normalizes_vector/BOHR2ANG)\n qj = qj.reshape(-1,)\n \n initial_geometry = self.cart_coords.copy()\n cart_coords = initial_geometry.copy()\n internal = deepcopy(self.internal_object)\n\n cart_coords += internal.transform_int_step((qj * step_size).reshape(-1,)) * BOHR2ANG\n xyz = getXYZ(self.symbols, cart_coords)\n file_name = mode\n if self.is_QM_MM_INTERFACE:\n QMMM_xyz_string = ''\n for i, xyz in enumerate(xyz.split('\\n')):\n QMMM_xyz_string += \" \".join([xyz, self.QM_USER_CONNECT[i]]) + '\\n'\n if i == len(self.QM_ATOMS)-1:\n break\n QMMM_xyz_string += self.fixed_molecule_string\n job = Job(QMMM_xyz_string, self.path, file_name, jobtype='freq', ncpus=self.ncpus, charge=self.charge, multiplicity=self.multiplicity,\n rem_variables_dict=self.rem_variables_dict, gen_basis=self.gen_basis, QM_atoms=self.QM_ATOMS, ISOTOPE=self.ISOTOPE,\n force_field_params=self.force_field_params, opt=self.opt)\n else:\n job = Job(xyz, self.path, file_name, jobtype='freq', ncpus=self.ncpus, charge=self.charge, multiplicity=self.multiplicity,\n rem_variables_dict=self.rem_variables_dict, gen_basis=self.gen_basis)\n\n # Write Q-Chem input file\n job.write_input_file()\n\n # Job submission\n job.submit()\n\n # Parse output file to get the hessian matrix\n output_file_path = os.path.join(self.path, '{}.q.out'.format(file_name))\n hessian = QChemLog(output_file_path).load_force_constant_matrix()\n fm = diagonalize_projected_hessian(self.conformer, hessian, self.linearity, self.n_vib, self.rotors, get_mass_weighted_hessian=True, label=self.label)\n grid_of_hessians[mode] = fm\n\n return grid_of_hessians\n\n def objectiveFunction(self, angle):\n \"\"\"\n To produce optimal coordinates, metrics which quantify off-diagonal couplings\n over a grid of Hessian matrices are minimized through unitary rotations of\n the vibrational basis.\n \n 1. coordinate_system == \"E-Optimized\"\n Return the sum of squared off-diagonal coupling\n 2. coordinate_system == \"E'-Optimized\"\n Return the sum squared change in off-diagonal coupling\n 3. coordinate_system == \"Pipek_Mezey\"\n Return the sum of squares of the atomic contribution to the modes\n \"\"\"\n angles = self.angles.copy()\n angles[self.n] = angle\n\n # Rotate the paires of eigenvectors\n U = self.U(angles)\n V = self.mwv.dot(U)\n E = 0\n if self.coordinate_system == \"E-Optimized\":\n for key in self.grid_of_hessians.keys():\n dim = self.n_vib\n hessian = self.grid_of_hessians[key]\n E += E_Optimized_batch_run(hessian, V ,dim)\n elif self.coordinate_system == \"E'-Optimized\":\n H0 = V.T.dot(self.grid_of_hessians[0]).dot(V) / ((2 * np.pi * constants.c * 100) ** 2)\n for key in self.grid_of_hessians.keys():\n if key == 0: continue\n dim = self.n_vib\n hessian = self.grid_of_hessians[key]\n E += dE_Optimized_batch_run(hessian, V ,dim, H0)\n elif self.coordinate_system == \"Pipek_Mezey\":\n modes = V.T\n squared_modes = modes ** 2\n c = squared_modes[:, 0::3] + squared_modes[:, 1::3] + squared_modes[:, 2::3]\n E = -np.linalg.norm(c) ** 2 \n else:\n raise InputError(\"The value of coordinate_system should be E-Optimized or E'-Optimized to produce optimal coordinates.\")\n\n return E\n\n def Jacobi_sweeps(self, thresh=1e-6, thresh2=1e-4, printing=True):\n \"\"\"\n Jacobi sweeps are performed over the angles until minimization was reached.\n \"\"\"\n start = time.time()\n num = int(self.n_vib * (self.n_vib - 1) / 2) # 2-combination of a set self.n_vib\n err = 1e10\n err2 = 1e10\n isweep = 0\n while (err > thresh) or (err2 > thresh2):\n isweep += 1\n self.angles = np.zeros(num)\n if isweep == 1:\n self.n = 0\n old_E = self.objectiveFunction(0)\n for n in range(num):\n self.n = n\n bounds = [[0, 2 * np.pi]]\n result = optimize.minimize_scalar(self.objectiveFunction, bounds=bounds)\n if not result.success:\n raise ConvergeError('Optimization to find optimal vibrational coordinates fails.')\n else:\n self.angles[n] = result.x\n \n # Update vectors\n U = self.U(self.angles)\n self.mwv = self.mwv.dot(U)\n\n # Check convergence\n E = result.fun\n err = abs(E - old_E)\n err2 = abs(self.angles.sum())\n old_E = E\n \n if printing:\n logging.info('Normal mode localization: Cycle {:3d} E: {:>25.7f} change: {:>25.7f} {:>10.5f}'\\\n .format(isweep, E, err, err2))\n end = time.time()\n logging.info('-------------------------------------------------------------------------------------------------------------------')\n logging.info('\\nThe Jacobi sweeps have converged in {:.2f} s(wall)'.format(end - start))\n \n def Ui(self, angle, i, j):\n \"\"\"\n Ui is a Jacobi rotation matrix of two by two rotation (among mode i and mode j).\n \"\"\"\n Ui = np.identity(self.n_vib)\n if i > j:\n tmp = i\n i = j\n j = tmp\n c = np.cos(angle)\n s = np.sin(angle) \n Ui[i][i] = c\n Ui[j][j] = c\n Ui[i][j] = -s\n Ui[j][i] = s\n return Ui\n\n def U(self, angles):\n \"\"\"\n Matrix U is expressed as consecutive Jacobi rotations.\n \"\"\"\n U = np.identity(self.n_vib)\n ind = 0\n for i in range(self.n_vib):\n for j in range(self.n_vib):\n if i < j:\n angle = angles[ind]\n Ui = self.Ui(angle, i, j)\n U = np.matmul(U, Ui)\n ind += 1\n return U\n\n#@jit(nopython=True)\ndef E_Optimized_batch_run(hessian, V, dim):\n E = 0\n H = V.T.dot(hessian).dot(V) / ((2 * np.pi * constants.c * 100) ** 2)\n for i in range(dim):\n for j in range(dim):\n if i < j:\n E += (H[i][j]) ** 2\n return E\n\n#@jit(nopython=True)\ndef dE_Optimized_batch_run(hessian, V, dim, H0):\n E = 0\n H = V.T.dot(hessian).dot(V) / ((2 * np.pi * constants.c * 100) ** 2)\n for i in range(dim):\n for j in range(dim):\n if i < j:\n E += (H[i][j] - H0[i][j]) ** 2\n return E",
"import numpy as np\n\nfrom ape.intcoords.helpers_pure import log\nfrom ape.intcoords.eval import eval_primitives\nfrom ape.intcoords.slots import Torsion\nfrom ape.intcoords.exceptions import NeedNewInternalsException\nfrom ape.intcoords.valid import dihedral_valid\n\n\ndef correct_dihedrals(new_dihedrals, old_dihedrals):\n \"\"\"Dihedrals are periodic. Going from -179° to 179° is not a step of 358°,\n but a step of 2°. By considering the actual distance of the dihedrals from\n π the correct step can be calculated.\n dihedral step length = abs(abs(new_dihedral) - π) + abs(abs(old_dihedral)- π)\n or put differently\n dihedral step length = abs(abs(new_dihedral - old_dihedral) - 2*π)\n The sign is left to be determined. Going from -179° to 179° (roughly π - -π = 2π)\n is a counter clockwise rotation and the dihedral has to decrease below -π. Going\n from 179° to -179° (roughly -π - π = -2π) is a clockwise rotation and the dihedral\n increases abvove π. So the correct sign corresponds to the negative sign of the\n original difference.\n original difference 2π -> dihedral must decrease -> sign = -1\n original difference -2π -> dihedral must increase -> sign = +1\n Overall the old dihedral is then modified by the actual step length with the correct\n sign.\"\"\"\n dihedrals_step = new_dihedrals - old_dihedrals\n shifted_by_2pi = np.abs(np.abs(dihedrals_step) - 2 * np.pi) < np.pi / 2\n corrected_dihedrals = new_dihedrals.copy()\n corrected_dihedrals[shifted_by_2pi] -= (\n 2 * np.pi * np.sign(dihedrals_step[shifted_by_2pi])\n )\n return corrected_dihedrals\n\n\ndef update_internals(\n new_coords3d,\n old_internals,\n primitives,\n dihedral_inds,\n check_dihedrals=False,\n logger=None,\n):\n prim_internals = eval_primitives(new_coords3d, primitives)\n new_internals = [prim_int.val for prim_int in prim_internals]\n internal_diffs = np.array(new_internals) - old_internals\n\n dihedrals = [prim_internals[i] for i in dihedral_inds]\n dihedral_num = len(dihedrals)\n dihedral_diffs = internal_diffs[-dihedral_num:]\n\n # Find differences that are shifted by 2*pi\n shifted_by_2pi = np.abs(np.abs(dihedral_diffs) - 2 * np.pi) < np.pi / 2\n new_dihedrals = np.array([dihed.val for dihed in dihedrals])\n if any(shifted_by_2pi):\n new_dihedrals[shifted_by_2pi] -= (\n 2 * np.pi * np.sign(dihedral_diffs[shifted_by_2pi])\n )\n # Update values\n for dihed, new_val in zip(dihedrals, new_dihedrals):\n dihed.val = new_val\n # See if dihedrals became invalid (collinear atoms)\n if check_dihedrals:\n are_valid = [dihedral_valid(new_coords3d, prim.inds) for prim in dihedrals]\n try:\n first_dihedral = dihedral_inds[0]\n except IndexError:\n first_dihedral = 0\n invalid_inds = [\n i + first_dihedral for i, is_valid in enumerate(are_valid) if not is_valid\n ]\n if len(invalid_inds) > 0:\n invalid_prims = [primitives[i] for i in invalid_inds]\n log(logger, \"Dihedral(s) became invalid! Need new internal coordinates!\")\n raise NeedNewInternalsException(\n new_coords3d, invalid_inds=invalid_inds, invalid_prims=invalid_prims\n )\n\n return prim_internals\n\n\ndef transform_int_step(\n int_step,\n old_cart_coords,\n cur_internals,\n Bt_inv_prim,\n primitives,\n check_dihedrals=False,\n cart_rms_thresh=1e-6,\n logger=None,\n):\n \"\"\"Transformation is done in primitive internals, so int_step must be given\n in primitive internals and not in DLC!\"\"\"\n\n new_cart_coords = old_cart_coords.copy()\n remaining_int_step = int_step\n target_internals = cur_internals + int_step\n\n dihedral_inds = np.array(\n [i for i, primitive in enumerate(primitives) if isinstance(primitive, Torsion)]\n )\n\n last_rms = 9999\n old_internals = cur_internals\n backtransform_failed = True\n for i in range(25):\n cart_step = Bt_inv_prim.T.dot(remaining_int_step)\n cart_rms = np.sqrt(np.mean(cart_step ** 2))\n # Update cartesian coordinates\n new_cart_coords += cart_step\n # Determine new internal coordinates\n new_prim_ints = update_internals(\n new_cart_coords.reshape(-1, 3),\n old_internals,\n primitives,\n dihedral_inds,\n check_dihedrals=check_dihedrals,\n logger=logger,\n )\n new_internals = [prim.val for prim in new_prim_ints]\n remaining_int_step = target_internals - new_internals\n internal_rms = np.sqrt(np.mean(remaining_int_step ** 2))\n log(\n logger,\n f\"Cycle {i}: rms(Δcart)={cart_rms:1.4e}, rms(Δint.) = {internal_rms:1.5e}\",\n )\n\n # This assumes the first cart_rms won't be > 9999 ;)\n if cart_rms < last_rms:\n # Store results of the conversion cycle for laster use, if\n # the internal-cartesian-transformation goes bad.\n best_cycle = (new_cart_coords.copy(), new_internals.copy())\n best_cycle_ind = i\n elif i != 0:\n # If the conversion somehow fails we fallback to the best previous step.\n log(logger, f\"Backconversion failed! Falling back to step {best_cycle_ind}\")\n new_cart_coords, new_internals = best_cycle\n break\n else:\n raise Exception(\n \"Internal-cartesian back-transformation already \"\n \"failed in the first step. Aborting!\"\n )\n old_internals = new_internals\n\n last_rms = cart_rms\n if cart_rms < cart_rms_thresh:\n log(\n logger,\n f\"Internal->Cartesian transformation converged in {i+1} cycle(s)!\",\n )\n backtransform_failed = False\n break\n\n # if check_dihedrals and (\n # not dihedrals_are_valid(new_cart_coords.reshape(-1, 3), dihedral_inds)\n # ):\n # raise NeedNewInternalsException(new_cart_coords)\n\n log(logger, \"\")\n\n # Return the difference between the new cartesian coordinates that yield\n # the desired internal coordinates and the old cartesian coordinates.\n cart_step = new_cart_coords - old_cart_coords\n return new_prim_ints, cart_step, backtransform_failed\n"
] | [
[
"numpy.diag",
"numpy.sqrt",
"numpy.linalg.inv",
"numpy.matmul",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"scipy.optimize.minimize_scalar",
"numpy.identity",
"numpy.zeros"
],
[
"numpy.sign",
"numpy.array",
"numpy.mean",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
njcuk9999/apero-utils | [
"f77de4c9123874e5bb6ed6bd03a7de3b27057402",
"f77de4c9123874e5bb6ed6bd03a7de3b27057402",
"f77de4c9123874e5bb6ed6bd03a7de3b27057402"
] | [
"general/paper_plots/general.py",
"updates_to_drs/database_tests/db_test_run_connections.py",
"general/object_id/astro_object.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nPlots for the apero drs paper\n\nCreated on 2021-08-01\n\n@author: cook\n\"\"\"\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nfrom astropy.io import fits\nfrom astropy.visualization import imshow_norm, ZScaleInterval, LinearStretch\nimport glob\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport numpy as np\nimport os\n\nfrom apero import lang\nfrom apero.core import constants\nfrom apero.core.core import drs_log\nfrom apero.core.core import drs_database\nfrom apero.science import preprocessing as prep\nfrom apero.io import drs_image\nfrom apero.core.instruments.spirou import file_definitions\n\n\n# =============================================================================\n# Define variables\n# =============================================================================\n# Get Logging function\nWLOG = drs_log.wlog\n# Get the text types\ntextentry = lang.textentry\n# Raw prefix\nRAW_PREFIX = file_definitions.raw_prefix\n# get the object database\nObjectDatabase = drs_database.ObjectDatabase\n# define the night of data we want to use\nNIGHT = '2020-08-31'\n# define where we want to save plots\n# PLOT_PATH = '/data/spirou/drs-data/misc/paper_plots'\nPLOT_PATH = '/scratch2/drs-data/misc/paper_plots'\n# define plots and append those we want\nPLOTS = []\n# PLOTS.append('SIZE_GRID')\nPLOTS.append('RAW_FEATURES')\n# PLOTS.append('BADMAP')\n\n\n# =============================================================================\n# PLOT functions\n# =============================================================================\n# SIZE_GRID\ndef plot_size_grid(params):\n\n odocode = '2510288a'\n hashcode = '3444961B5D'\n # get file paths\n raw_file = os.path.join(params['DRS_DATA_RAW'], NIGHT,\n '{0}.fits'.format(odocode))\n pp_file = os.path.join(params['DRS_DATA_REDUC'], NIGHT,\n '{0}_pp.fits'.format(hashcode))\n e2ds_file = os.path.join(params['DRS_DATA_REDUC'], NIGHT,\n '{0}_pp_e2dsff_AB.fits'.format(hashcode))\n # get\n print('Loading raw image')\n raw_image = fits.getdata(raw_file)\n print('Loading pp image')\n pp_image = fits.getdata(pp_file)\n print('Loading E2DS image')\n e2ds_image = fits.getdata(e2ds_file)\n\n # rotation to match HARPS orientation (expected by DRS)\n # image1 = drs_image.rotate_image(raw_image, params['RAW_TO_PP_ROTATION'])\n # flip image\n image2 = drs_image.flip_image(params, pp_image)\n # get resize size\n sargs = dict(xlow=params['IMAGE_X_LOW'], xhigh=params['IMAGE_X_HIGH'],\n ylow=params['IMAGE_Y_LOW'], yhigh=params['IMAGE_Y_HIGH'])\n # resize flat\n image3 = drs_image.resize(params, image2, **sargs)\n\n print('Plotting size_grid plot')\n fig = plt.figure(figsize=(12, 12))\n size = (2, 2)\n frame1 = plt.subplot2grid(size, (0, 0))\n frame2 = plt.subplot2grid(size, (0, 1))\n frame3 = plt.subplot2grid(size, (1, 0), colspan=2)\n\n cmap = matplotlib.cm.get_cmap('inferno').copy()\n cmap.set_bad(color='green')\n # -------------------------------------------------------------------------\n # top left raw image\n im, norm = _norm_image(raw_image, frame1, cmap)\n # add labels\n frame1.set(xlim=(0, 4096), ylim=(0, 4096))\n frame1.tick_params(axis='both', which='both', bottom=False, top=False,\n left=False, right=False, labelleft=False,\n labelbottom=False)\n frame1.set_title('raw (4096x4096)', loc='left',\n x=0.05, y=0.95, pad=-14,\n color='black', backgroundcolor='white')\n # -------------------------------------------------------------------------\n # middle right: flipped + resized image\n im, norm = _norm_image(image3, frame2, cmap)\n # add labels\n frame2.set(xlim=(-4, 4092), ylim=(0-250, 4096-250))\n frame2.tick_params(axis='both', which='both', bottom=False, top=False,\n left=False, right=False, labelleft=False,\n labelbottom=False)\n frame2.set_title('pre-processed, flipped, resized (3100x4088)', loc='left',\n x=0.05, y=0.95, pad=-14,\n color='black', backgroundcolor='white')\n # -------------------------------------------------------------------------\n # bottom: e2ds\n im, norm = _norm_image(e2ds_image, frame3, cmap)\n # add labels\n frame3.set(xlim=(0, 4088), ylim=(0, 49))\n frame3.tick_params(axis='both', which='both', bottom=False, top=False,\n left=False, right=False, labelleft=False,\n labelbottom=False)\n frame3.set_title('Extracted (E2DS) 49x4088', loc='left',\n x=0.025, y=0.95, pad=-14,\n color='black', backgroundcolor='white')\n\n plt.subplots_adjust(wspace=0.05, hspace=0.05,\n left=0.01, right=0.99, top=0.975, bottom=0.025)\n # -------------------------------------------------------------------------\n outfile = os.path.join(PLOT_PATH, 'size_grid.pdf')\n print('Saving to file: ' + outfile)\n plt.savefig(outfile, dpi=500)\n print('Showing graph')\n plt.show()\n plt.close()\n\n\n# RAW_FEATURES\ndef plot_raw_features(params):\n # set file to use\n odocode = '2510288a'\n # get file paths\n raw_file = os.path.join(params['DRS_DATA_RAW'], NIGHT,\n '{0}.fits'.format(odocode))\n # get raw image\n print('Loading raw image')\n raw_image = fits.getdata(raw_file)\n # plot feature grid\n plot_feature_grid(raw_image)\n\n\ndef plot_pp_features(params):\n # get pseudo constants\n pconst = constants.pload()\n # set file to use\n odocode = '2510288a'\n # get file paths\n raw_file = os.path.join(params['DRS_DATA_RAW'], NIGHT,\n '{0}.fits'.format(odocode))\n # get raw image\n print('Loading raw images')\n datalist = []\n for ext in [1, 2, 3, 4]:\n datalist.append(fits.getdata(raw_file, ext=ext))\n # get header\n header = fits.getheader(raw_file)\n # get flux image from the data list\n image = datalist[0]\n # get intercept from the data list\n intercept = datalist[1]\n # get error on slope from the data list\n errslope = datalist[2]\n # get frame time\n frame_time = pconst.FRAME_TIME(params, header)\n # get the pixel exposure time from the data list\n inttime = datalist[3] * frame_time\n # Get hot pixels for corruption check\n hotpixels = prep.get_hot_pixels(params)\n # ----------------------------------------------------------------------\n # Check for pixel shift and/or corrupted files\n # ----------------------------------------------------------------------\n # storage\n snr_hotpix, rms_list = [], []\n shiftdx, shiftdy = 0, 0\n # do this iteratively as if there is a shift need to re-workout QC\n for iteration in range(2):\n # get pass condition\n cout = prep.test_for_corrupt_files(params, image, hotpixels)\n snr_hotpix, rms_list = cout[0], cout[1]\n shiftdx, shiftdy = int(cout[2]), int(cout[3])\n # use dx/dy to shift the image back to where the engineering flat\n # is located\n if shiftdx != 0 and shiftdy != 0:\n # log process\n wmsg = textentry('40-010-00013', args=[shiftdx, shiftdy])\n WLOG(params, '', wmsg)\n # roll on the y axis\n image = np.roll(image, [shiftdy], axis=0)\n intercept = np.roll(intercept, [shiftdy], axis=0)\n errslope = np.roll(errslope, [shiftdy], axis=0)\n inttime = np.roll(inttime, [shiftdy], axis=0)\n # roll on the x axis\n image = np.roll(image, [shiftdx], axis=1)\n intercept = np.roll(intercept, [shiftdx], axis=1)\n errslope = np.roll(errslope, [shiftdx], axis=1)\n inttime = np.roll(inttime, [shiftdx], axis=1)\n elif shiftdx != 0:\n # log process\n wmsg = textentry('40-010-00013', args=[shiftdx, shiftdy])\n WLOG(params, '', wmsg)\n # roll on the x axis\n image = np.roll(image, [shiftdx], axis=1)\n intercept = np.roll(intercept, [shiftdx], axis=1)\n errslope = np.roll(errslope, [shiftdx], axis=1)\n inttime = np.roll(inttime, [shiftdx], axis=1)\n elif shiftdy != 0:\n # log process\n wmsg = textentry('40-010-00013', args=[shiftdx, shiftdy])\n WLOG(params, '', wmsg)\n # roll on the y axis\n image = np.roll(image, [shiftdy], axis=0)\n intercept = np.roll(intercept, [shiftdy], axis=0)\n errslope = np.roll(errslope, [shiftdy], axis=0)\n inttime = np.roll(inttime, [shiftdy], axis=0)\n # ------------------------------------------------------------------\n # correct image\n # ------------------------------------------------------------------\n # correct for the top and bottom reference pixels\n WLOG(params, '', textentry('40-010-00003'))\n image = prep.correct_top_bottom(params, image)\n # correct by a median filter from the dark amplifiers\n WLOG(params, '', textentry('40-010-00004'))\n image = prep.median_filter_dark_amps(params, image)\n # correct for the 1/f noise\n WLOG(params, '', textentry('40-010-00005'))\n image = prep.median_one_over_f_noise(params, image)\n # ---------------------------------------------------------------------\n # Correct for cosmic rays before the possible pixel shift\n # ---------------------------------------------------------------------\n # correct the intercept\n WLOG(params, '', textentry('40-010-00021'))\n intercept = prep.intercept_correct(intercept)\n # correct error slope\n WLOG(params, '', textentry('40-010-00022'))\n errslope1 = prep.errslope_correct(errslope)\n # correct cosmic rays\n WLOG(params, '', textentry('40-010-00018'))\n image, cprops = prep.correct_cosmics(params, image, intercept,\n errslope1, inttime)\n\n # ---------------------------------------------------------------------\n # plot\n # ---------------------------------------------------------------------\n # plot feature grid\n plot_feature_grid(image, 'pp_features.pdf')\n\n\n# plot for raw features and pp features\ndef plot_feature_grid(image, outname='raw_features.pdf'):\n # define cuts / zooms\n cut1area = [100, 700, 1200, 1800]\n cut2area = [2375, 2575, 525, 725]\n zoom0area = [800, 1200, 3396, 3796]\n zoom1area = [3596, 3996, 800, 1200]\n zoom2area = [1240, 1290, 1590, 1640]\n # text height above image [in raw image pixel units]\n textheight = 100\n rcolor = 'r'\n # -------------------------------------------------------------------------\n print('Plotting raw_features plot')\n # set up grid\n plt.close()\n fig = plt.figure(figsize=(12, 14))\n size = (3, 3)\n topright = plt.subplot2grid(size, (0, 0))\n topmid = plt.subplot2grid(size, (0, 1))\n topleft = plt.subplot2grid(size, (0, 2))\n rightmid = plt.subplot2grid(size, (1, 0))\n rightbot = plt.subplot2grid(size, (2, 0))\n panel = plt.subplot2grid(size, (1, 1), colspan=2, rowspan=2)\n\n # get colour map\n cmap = matplotlib.cm.get_cmap('inferno').copy()\n cmap.set_bad(color='green')\n\n cmap0 = matplotlib.cm.get_cmap('Greys_r').copy()\n cmap0.set_bad(color='green')\n # -------------------------------------------------------------------------\n # dark region\n cut1 = image[cut1area[2]:cut1area[3], cut1area[0]:cut1area[1]]\n c1im, _ = _norm_image(cut1, rightmid, cmap)\n _add_colorbar(fig, c1im, rightmid, side='bottom')\n rightmid.set_title('D. dark region', loc='left',\n x=0.05, y=0.95, pad=-14,\n color='black', backgroundcolor='white')\n rightmid.tick_params(axis='both', which='both', bottom=False, top=False,\n left=False, right=False, labelleft=False,\n labelbottom=False)\n # -------------------------------------------------------------------------\n # holes\n cut2 = image[cut2area[2]:cut2area[3], cut2area[0]:cut2area[1]]\n c2im, _ = _norm_image(cut2, rightbot, cmap)\n _add_colorbar(fig, c2im, rightbot, side='bottom')\n rightbot.set_title('E. detector holes', loc='left',\n x=0.05, y=0.95, pad=-14,\n color='black', backgroundcolor='white')\n rightbot.tick_params(axis='both', which='both', bottom=False, top=False,\n left=False, right=False, labelleft=False,\n labelbottom=False)\n # -------------------------------------------------------------------------\n # zoom 0\n zoom0 = image[zoom0area[2]:zoom0area[3], zoom0area[0]:zoom0area[1]]\n z0im, _ = _norm_image(zoom0, topright, cmap)\n _add_colorbar(fig, z0im, topright, side='bottom')\n topright.set_title('A. Zoom reddest orders', loc='left',\n x=0.05, y=0.95, pad=-14,\n color='black', backgroundcolor='white')\n topright.tick_params(axis='both', which='both', bottom=False, top=False,\n left=False, right=False, labelleft=False,\n labelbottom=False)\n # -------------------------------------------------------------------------\n # zoom 1\n zoom1 = image[zoom1area[2]:zoom1area[3], zoom1area[0]:zoom1area[1]]\n z1im, norm = _norm_image(zoom1, topmid, cmap)\n _add_colorbar(fig, z1im, topmid, side='bottom')\n topmid.set_title('B. Zoom bluest orders', loc='left',\n x=0.05, y=0.95, pad=-14,\n color='black', backgroundcolor='white')\n topmid.tick_params(axis='both', which='both', bottom=False, top=False,\n left=False, right=False, labelleft=False,\n labelbottom=False)\n # -------------------------------------------------------------------------\n # zoom 2\n zoom2 = image[zoom2area[2]:zoom2area[3], zoom2area[0]:zoom2area[1]]\n z2im, norm = _norm_image(zoom2, topleft, cmap)\n _add_colorbar(fig, z2im, topleft, side='bottom')\n topleft.set_title('C. Zoom on slices', loc='left',\n x=0.05, y=0.95, pad=-14,\n color='black', backgroundcolor='white')\n topleft.tick_params(axis='both', which='both', bottom=False, top=False,\n left=False, right=False, labelleft=False,\n labelbottom=False)\n # -------------------------------------------------------------------------\n # panel\n pim, norm = _norm_image(image, panel, cmap0)\n _add_colorbar(fig, pim, panel, side='bottom')\n panel.tick_params(axis='both', which='both', bottom=False, top=False,\n left=False, right=False, labelleft=False,\n labelbottom=False)\n panel.set_title('raw (4096x4096)', loc='left',\n x=0.5, y=0.95, pad=-14,\n color='black', backgroundcolor='white')\n # -------------------------------------------------------------------------\n # add rectangle dark cut\n rcut1 = patches.Rectangle((cut1area[0], cut1area[2]),\n cut1area[1] - cut1area[0],\n cut1area[3] - cut1area[2],\n linewidth=1, edgecolor=rcolor, facecolor='none')\n panel.add_patch(rcut1)\n # add text\n panel.text(0.5*(cut1area[1] + cut1area[0]), cut1area[3] + textheight,\n 'D', color=rcolor, backgroundcolor='white')\n # -------------------------------------------------------------------------\n # add rectangle holes\n rcut2 = patches.Rectangle((cut2area[0], cut2area[2]),\n cut2area[1] - cut2area[0],\n cut2area[3] - cut2area[2],\n linewidth=1, edgecolor=rcolor, facecolor='none')\n panel.add_patch(rcut2)\n # add text\n panel.text(0.5*(cut2area[1] + cut2area[0]), cut2area[3] + textheight,\n 'E', color=rcolor, backgroundcolor='white')\n # -------------------------------------------------------------------------\n # add rectangle zoom 1\n rzoom0 = patches.Rectangle((zoom0area[0], zoom0area[2]),\n zoom0area[1] - zoom0area[0],\n zoom0area[3] - zoom0area[2],\n linewidth=1, edgecolor=rcolor, facecolor='none')\n panel.add_patch(rzoom0)\n # add text\n panel.text(0.5*(zoom0area[1] + zoom0area[0]), zoom0area[3] + textheight,\n 'A', color=rcolor, backgroundcolor='white')\n # -------------------------------------------------------------------------\n # add rectangle zoom 1\n rzoom1 = patches.Rectangle((zoom1area[0], zoom1area[2]),\n zoom1area[1] - zoom1area[0],\n zoom1area[3] - zoom1area[2],\n linewidth=1, edgecolor=rcolor, facecolor='none')\n panel.add_patch(rzoom1)\n # add text\n panel.text(0.5*(zoom1area[1] + zoom1area[0]), zoom1area[3] + textheight,\n 'B', color=rcolor, backgroundcolor='white')\n # -------------------------------------------------------------------------\n # add rectangle zoom 2\n rzoom2 = patches.Rectangle((zoom2area[0], zoom2area[2]),\n zoom2area[1] - zoom2area[0],\n zoom2area[3] - zoom2area[2],\n linewidth=1, edgecolor=rcolor, facecolor='none')\n panel.add_patch(rzoom2)\n # add text\n panel.text(0.5*(zoom2area[1] + zoom2area[0]), zoom2area[3] + textheight,\n 'C', color=rcolor, backgroundcolor='white')\n # -------------------------------------------------------------------------\n plt.subplots_adjust(wspace=0.05, hspace=0.075,\n left=0.01, right=0.99, top=0.975, bottom=0.025)\n # -------------------------------------------------------------------------\n outfile = os.path.join(PLOT_PATH, outname)\n print('Saving to file: ' + outfile)\n plt.savefig(outfile, dpi=300)\n print('Showing graph')\n plt.show()\n plt.close()\n\n\n# BADMAP\ndef plot_badpix_plot(params):\n\n hashcode = 'DB67D5C4F5'\n xlow = params['IMAGE_X_LOW']\n xhigh = params['IMAGE_X_HIGH']\n ylow = params['IMAGE_Y_LOW']\n yhigh = params['IMAGE_Y_HIGH']\n\n dark_file = glob.glob(os.path.join(params['DRS_DATA_REDUC'], 'other',\n '*d_pp_dark_master.fits'))[0]\n bad_file = os.path.join(params['DRS_DATA_REDUC'], NIGHT,\n '{0}_pp_badpixel.fits'.format(hashcode))\n\n dark_image = fits.getdata(dark_file)\n bad_image = fits.getdata(bad_file).astype(float)\n\n # fill bad image\n bad_image_full = np.zeros_like(dark_image)\n bad_image_full[ylow:yhigh, xlow:xhigh] = bad_image\n dark_image = drs_image.flip_image(params, dark_image)\n\n cmap1 = matplotlib.cm.get_cmap('Greys_r').copy()\n cmap2 = matplotlib.cm.get_cmap('Greys').copy()\n\n plt.close()\n fig, frames = plt.subplots(figsize=(20, 10), ncols=2, nrows=1)\n frame0 = frames[0]\n frame1 = frames[1]\n\n im, norm = imshow_norm(dark_image, frame0, origin='lower', aspect='auto',\n interval=ZScaleInterval(), stretch=LinearStretch(),\n cmap=cmap1, interpolation='None', rasterized=True)\n im, norm = imshow_norm(bad_image_full, frame1, origin='lower', aspect='auto',\n cmap=cmap2, interpolation='None', rasterized=True)\n\n\n\n frame0.tick_params(axis='both', which='both', bottom=False, top=False,\n left=False, right=False, labelleft=False,\n labelbottom=False)\n frame1.tick_params(axis='both', which='both', bottom=False, top=False,\n left=False, right=False, labelleft=False,\n labelbottom=False)\n\n frame0.hlines(y=ylow, xmin=xlow, xmax=xhigh, color='r', lw=2)\n frame0.hlines(y=yhigh, xmin=xlow, xmax=xhigh, color='r', lw=2)\n frame0.vlines(x=xlow, ymin=ylow, ymax=yhigh, color='r', lw=2)\n frame0.vlines(x=xhigh, ymin=ylow, ymax=yhigh, color='r', lw=2)\n frame1.hlines(y=ylow, xmin=xlow, xmax=xhigh, color='r', lw=2)\n frame1.hlines(y=yhigh, xmin=xlow, xmax=xhigh, color='r', lw=2)\n frame1.vlines(x=xlow, ymin=ylow, ymax=yhigh, color='r', lw=2)\n frame1.vlines(x=xhigh, ymin=ylow, ymax=yhigh, color='r', lw=2)\n\n frame0.set(xlim=[0, 4096], ylim=[0, 4096])\n frame1.set(xlim=[0, 4096], ylim=[0, 4096])\n\n plt.subplots_adjust(wspace=0, hspace=0, left=0.01, right=0.99,\n bottom=0.01, top=0.99)\n\n outfile = os.path.join(PLOT_PATH, 'badmap.pdf')\n print('Saving to file: ' + outfile)\n plt.savefig(outfile)\n print('Showing graph')\n plt.show()\n plt.close()\n\n\n# =============================================================================\n# worker functions (private)\n# =============================================================================\ndef _norm_image(image, frame, cmap):\n im, norm = imshow_norm(image, frame, origin='lower', aspect='auto',\n interval=ZScaleInterval(), stretch=LinearStretch(),\n cmap=cmap, interpolation='None', rasterized=True)\n return im, norm\n\n\ndef _add_colorbar(fig, im, frame, side='bottom'):\n\n if side in ['right', 'left']:\n orientation = 'vertical'\n else:\n orientation = 'horizontal'\n\n divider = make_axes_locatable(frame)\n cax = divider.append_axes(side, '5%', pad=0)\n cbar = fig.colorbar(im, cax=cax, orientation=orientation)\n cbar.ax.tick_params(labelsize=8)\n\n\n# =============================================================================\n# Start of code\n# =============================================================================\nif __name__ == '__main__':\n\n params = constants.load()\n\n if 'SIZE_GRID' in PLOTS:\n plot_size_grid(params)\n if 'RAW_FEATURES' in PLOTS:\n plot_raw_features(params)\n if 'BADMAP' in PLOTS:\n plot_badpix_plot(params)\n\n# =============================================================================\n# End of code\n# =============================================================================\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n# CODE NAME HERE\n\n# CODE DESCRIPTION HERE\n\nCreated on 2021-03-31\n\n@author: cook\n\"\"\"\nfrom apero.base import drs_db\nimport numpy as np\nimport time\n\nimport mysql.connector as mysql\n\n# =============================================================================\n# Define variables\n# =============================================================================\n# add you login here\ncase = 2\nif case == 1:\n host = 'localhost'\n username = 'cook'\n password = 'neilcook'\n tablename = 'ea_test'\n dbname = 'spirou'\nelse:\n host = 'rali'\n username = 'spirou'\n password = 'Covid19!'\n tablename = 'ea_test'\n dbname = 'spirou'\n\n# number of add rows to try\nN_ADDS = 20000\n\n# add some test switch\n# TEST = 'add_row'\nTEST = 'connect'\n\n# =============================================================================\n# Start of code\n# =============================================================================\nif __name__ == \"__main__\":\n\n # wait to start so we can run on multiple windows at once\n print('Waiting to start')\n time.sleep(2)\n # make mysql database\n database = drs_db.MySQLDatabase(host, username, password, dbname,\n tablename)\n # make sure database exists\n database.add_database()\n # add our test table\n database.add_table(tablename, ['X', 'Y', 'Z'], [str, str, str])\n # loop around N_ADDS times\n for it in range(N_ADDS):\n print('\\nProcessing row {0}'.format(it + 1))\n # set up some values\n values = np.array([it, it * 2, it * 3])\n values = values.astype(str)\n # add rows\n if TEST == 'add_row':\n database.add_row(values, tablename)\n\n elif TEST == 'connect':\n conn = mysql.connect(host=host, user=username, passwd=password,\n database=dbname,\n connection_timeout=3600)\n cursor = conn.cursor()\n cursor.close()\n conn.close()\n\n\n\n# =============================================================================\n# End of code\n# =============================================================================\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCODE DESCRIPTION HERE\n\nCreated on 2020-10-2020-10-22 11:49\n\n@author: cook\n\"\"\"\nimport warnings\nfrom typing import List, Union\nfrom astropy.time import Time\nfrom astropy.table import Table\nfrom astropy import units as uu\nfrom astropy.coordinates import SkyCoord, Distance\nimport numpy as np\nimport requests\n\n\n# =============================================================================\n# Define variables\n# =============================================================================\n__NAME__ = 'astro_obj.py'\n\n# Fake Logging function\ndef WLOG(params, level, message, *args, **kwargs):\n print(message)\n if level == 'error':\n raise Exception(message)\n\n\n# Fake TextEntry\ndef TextEntry(code, args, **kwargs):\n return code\n\n\n# Fake drs_text\nclass drstext:\n def null_text(self, value, nulls):\n if value is None:\n return True\n if value in nulls:\n return True\n return False\n\n\ndrs_text = drstext()\n\n\nclass Pconst:\n def DRS_OBJ_NAME(self, objname: str) -> str:\n \"\"\"\n Clean and standardize an object name\n\n Default action: make upper case and remove white spaces\n\n :param objname: str, input object name\n :return:\n \"\"\"\n # set function name\n _ = __NAME__ + '.DRS_OBJ_NAME()'\n # clean object name\n rawobjname = str(objname)\n objectname = rawobjname.strip()\n objectname = objectname.replace(' ', '_')\n objectname = objectname.upper()\n # return object name\n return objectname\n\n\npconst = Pconst()\n\n# Fake APERO params\nFAKE_PARAMS = dict()\nFAKE_PARAMS['OBJ_LIST_GAIA_URL'] = 'https://gea.esac.esa.int/tap-server/tap'\n# FAKE_PARAMS['OBJ_LIST_GAIA_URL'] = 'https://gaia.obspm.fr/tap-server/tap'\nFAKE_PARAMS['OBJ_LIST_CROSS_MATCH_RADIUS'] = 180\nFAKE_PARAMS['OBJ_LIST_GAIA_MAG_CUT'] = 15.0\nFAKE_PARAMS['OBJ_LIST_GAIA_PLX_LIM'] = 0.5\nFAKE_PARAMS['OBJ_LIST_GOOGLE_SHEET_URL'] = ('1jwlux8AJjBMMVrbg6LszJIpFJ'\n 'rk6alhbT5HA7BiAHD8')\nFAKE_PARAMS['OBJ_LIST_GOOGLE_SHEET_WNUM'] = 0\n\nUSE_DATABASE = False\n\nOBJECT_FILE = 'unique_objs.csv'\nCMRADIUS = 3 * 60\n\nQUERY_GAIA = 'SELECT {0} FROM {1} WHERE {2}'\nQCOLS = ('ra as ra, dec as dec, source_id as gaiaid, parallax as plx, '\n 'pmdec as pmde, pmra as pmra, radial_velocity as rv, '\n 'phot_g_mean_mag as gmag, phot_bp_mean_mag as bpmag, '\n 'phot_rp_mean_mag as rpmag')\nQSOURCE = 'gaiadr2.gaia_source'\n\nQCIRCLE = ('(1=CONTAINS(POINT(\\'ICRS\\', ra, dec), CIRCLE(\\'ICRS\\', {ra}, '\n '{dec}, {radius})))')\n\n# cache for google sheet\nGOOGLE_TABLES = dict()\n# define standard google base url\nGOOGLE_BASE_URL = ('https://docs.google.com/spreadsheets/d/{}/gviz/'\n 'tq?tqx=out:csv&sheet={}')\n\n\n# =============================================================================\n# Define functions\n# =============================================================================\nclass AstroObject(object):\n aliases: List[str]\n used: int\n\n def __init__(self, params, pconst, gaia_id: Union[str, None],\n ra: Union[str, float], dec: Union[str, float],\n database, objname: Union[str, None], pmra: float = np.nan,\n pmde: float = np.nan, plx: float = np.nan,\n rv: float = np.nan, teff: float = np.nan):\n \"\"\"\n Construct an astrophysical object instance\n\n :param gaia_id: str or None, input gaia id - if None require ra and dec\n :param ra: float, right ascension, only required if gaia_id is None\n :param dec: float, declination, only required if gaia_id is None\n :param database:\n \"\"\"\n # properties from input\n self.input_gaiaid = gaia_id\n self.input_ra = ra\n self.input_dec = dec\n self.database = database\n if objname is None:\n self.input_objname = None\n else:\n self.input_objname = pconst.DRS_OBJ_NAME(objname)\n self.input_pmra = pmra\n self.input_pmde = pmde\n self.input_plx = plx\n self.input_rv = rv\n self.input_teff = teff\n # other information\n self.pconst = pconst\n self.gaia_url = params['OBJ_LIST_GAIA_URL']\n self.radius = params['OBJ_LIST_CROSS_MATCH_RADIUS']\n self.maglimit = params['OBJ_LIST_GAIA_MAG_CUT']\n self.plxlimit = params['OBJ_LIST_GAIA_PLX_LIM']\n self.gsheet_url = params['OBJ_LIST_GOOGLE_SHEET_URL']\n self.gsheet_wnum = params['OBJ_LIST_GOOGLE_SHEET_WNUM']\n # properties we need to find\n self.objname = None\n self.gaia_id = None\n self.ra = np.nan\n self.dec = np.nan\n self.pmra = np.nan\n self.pmde = np.nan\n self.plx = np.nan\n self.rv = np.nan\n self.gmag = np.nan\n self.bpmag = np.nan\n self.rpmag = np.nan\n self.epoch = np.nan\n self.teff = np.nan\n self.aliases = []\n self.source = None\n self.used = 0\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n if self.input_objname is not None:\n return 'AstroObject[{0}]'.format(self.input_objname)\n elif self.input_gaiaid is not None:\n return 'AstroObject[Gaia DR2 {0}]'.format(self.input_gaiaid)\n else:\n return 'AstroObject[ra={0},dec={1}]'.format(self.input_ra,\n self.input_dec)\n\n def resolve_target(self, mjd=None):\n # deal with database not loaded\n if USE_DATABASE:\n self.database.load_db()\n # ---------------------------------------------------------------------\n # 1. try gaia id and objname against database\n # ---------------------------------------------------------------------\n # self._resolve_from_database()\n # ---------------------------------------------------------------------\n # 2. try gaia id against gaia query (only if gaia_id is still None)\n # ---------------------------------------------------------------------\n # if self.gaia_id is None:\n # self._resolve_from_gaia_id()\n # ---------------------------------------------------------------------\n # 3. try to get gaia id from google sheet of gaia id (with object name)\n # ---------------------------------------------------------------------\n # if self.gaia_id is None:\n # self._resolve_from_glist()\n # ---------------------------------------------------------------------\n # 4. use ra + dec to get gaia id (only if gaia_id is still None)\n # ---------------------------------------------------------------------\n if self.gaia_id is None:\n self._resolve_from_coords(mjd)\n # ---------------------------------------------------------------------\n # 5. if we still cannot find gaia id use input + default parameters\n # ---------------------------------------------------------------------\n if self.gaia_id is None:\n self._use_inputs()\n\n def get_simbad_aliases(self):\n # deal with aliases already set\n if len(self.aliases) > 0:\n return\n # storage aliases\n self.aliases = []\n # set aliases to objname and input objname if different\n if self.objname is not None:\n self.aliases += [self.objname]\n if self.input_objname is not None:\n if self.objname != self.input_objname:\n self.aliases += [self.input_objname]\n # only do this is we have a gaia-id\n if self.gaia_id is None:\n return\n obj_id = 'Gaia DR2 {0}'.format(self.gaia_id)\n # get entries for this gaia id\n entries = query_simbad_id(obj_id)\n # deal with no entries\n if entries is None:\n return\n if len(entries) == 0:\n return\n # get the aliases\n raw_aliases = entries['IDS'][0].decode('utf-8')\n # slit via the pipe (|)\n self.aliases += raw_aliases.split('|')\n\n def _resolve_from_database(self):\n \"\"\"\n Use gaia id (from input) to check if this object is already in database\n - if it is then update all parameters from here\n - if it isn't (or isn't set) check against known names in the database\n - if names aren't found and gaia id not found do not update parameters\n \"\"\"\n if not USE_DATABASE:\n return\n # ---------------------------------------------------------------------\n # deal with no input_gaiaid\n if self.input_gaiaid is not None:\n # condition condition\n condition = 'GAIADR2ID==\"{0}\"'.format(self.input_gaiaid)\n # get the entries for this condition\n entries = self.database.get_entries('*', condition=condition)\n # set source\n self.source = 'database-gaia-id'\n else:\n entries = None\n # ---------------------------------------------------------------------\n # deal with no entries (try resolving from name in known aliases)\n if entries is None or len(entries) == 0:\n entries = self._resolve_from_names()\n if entries is None or len(entries) == 0:\n return\n # ---------------------------------------------------------------------\n # fill out required information if available\n self.objname = entries['OBJECT']\n self.gaia_id = entries['GAIADR2ID']\n self.ra = entries['RA']\n self.dec = entries['DEC']\n # assign pmra\n if not drs_text.null_text(entries['PMRA'], ['None']):\n self.pmra = entries['PMRA']\n # assign pmde\n if not drs_text.null_text(entries['PMDE'], ['None']):\n self.pmde = entries['PMDE']\n # assign pmde\n if not drs_text.null_text(entries['PLX'], ['None']):\n self.plx = entries['PLX']\n # assign rv\n if not drs_text.null_text(entries['RV'], ['None']):\n self.rv = entries['RV']\n # assign gmag\n if not drs_text.null_text(entries['GMAG'], ['None']):\n self.gmag = entries['GMAG']\n # assign bpmag\n if not drs_text.null_text(entries['BPMAG'], ['None']):\n self.bpmag = entries['BPMAG']\n # assign rpmag\n if not drs_text.null_text(entries['RPMAG'], ['None']):\n self.rpmag = entries['RPMAG']\n # assign epoch\n if not drs_text.null_text(entries['EPOCH'], ['None']):\n self.epoch = entries['EPOCH']\n # assign teff\n if not drs_text.null_text(entries['TEFF'], ['None']):\n self.teff = entries['TEFF']\n # assign aliases\n if not drs_text.null_text(entries['ALIASES'], ['None']):\n self.aliases = entries['ALIASES'].split('|')\n # set used\n self.used = 1\n\n def _resolve_from_names(self):\n \"\"\"\n Search the database for a named column\n\n :return:\n \"\"\"\n if not USE_DATABASE:\n return\n # deal with no object name (shouldn't be possible)\n if self.input_objname is None:\n return None\n # get aliases from database\n gaia_table = self.database.get_entries('GAIADR2ID, OBJECT, ALIASES')\n # extract required columns\n gaia_id = gaia_table['GAIADR2ID']\n objnames = gaia_table['OBJECT']\n alias_sets = gaia_table['ALIASES']\n\n # ---------------------------------------------------------------------\n # 1. check direct object name\n # ---------------------------------------------------------------------\n for row, objname in enumerate(objnames):\n # get cleaned alias\n cobjname = pconst.DRS_OBJ_NAME(objname)\n # compare to input_objname\n if cobjname == self.input_objname:\n # condition condition\n condition = 'GAIADR2ID==\"{0}\"'.format(gaia_id[row])\n # set source\n self.source = 'database-objname'\n # return the entries for this gaia id\n return self.database.get_entries('*', condition=condition)\n\n # ---------------------------------------------------------------------\n # 2. check aliases\n # ---------------------------------------------------------------------\n # loop around each set of aliases and see if\n for row, alias_set in enumerate(alias_sets):\n # split the names by a comma\n aliases = alias_set.split('|')\n # loop around aliases - if alias found return gaia id for this\n for alias in aliases:\n # ignore None\n if drs_text.null_text(alias, ['None']):\n continue\n # get cleaned alias\n calias = pconst.DRS_OBJ_NAME(alias)\n # compare to input_objname\n if calias == self.input_objname:\n # condition condition\n condition = 'GAIADR2ID==\"{0}\"'.format(gaia_id[row])\n # set source\n self.source = 'database-aliases'\n # return the entries for this gaia id\n return self.database.get_entries('*', condition=condition)\n # if we have reached this point we cannot match to name\n # therefore return None\n return None\n\n def _resolve_from_gaia_id(self):\n \"\"\"\n Use input_gaiaid to query gaia and them update all parameters based\n on this id\n\n :return:\n \"\"\"\n # deal with no input_gaiaid\n if self.input_gaiaid is None:\n print\n return\n # set up ID query\n condition = 'source_id = {0}'.format(self.input_gaiaid)\n # construct sql query\n query = QUERY_GAIA.format(QCOLS, QSOURCE, condition)\n # return results\n entries = query_gaia(self.gaia_url, query)\n # deal with no entries\n if entries is None:\n return\n if len(entries) == 0:\n return\n # fill out required information if available\n self.objname = str(self.input_objname)\n self.gaia_id = str(entries['gaiaid'][0])\n self.ra = float(entries['ra'][0])\n self.dec = float(entries['dec'][0])\n # assign pmra\n if not entries['pmra'].mask[0]:\n self.pmra = float(entries['pmra'][0])\n # assign pmde\n if not entries['pmde'].mask[0]:\n self.pmde = float(entries['pmde'][0])\n # assign plx\n if not entries['plx'].mask[0]:\n self.plx = float(entries['plx'][0])\n # assign rv\n if not entries['rv'].mask[0]:\n self.rv = float(entries['rv'][0])\n # assign gmag\n if not entries['gmag'].mask[0]:\n self.gmag = float(entries['gmag'][0])\n # assign bpmag\n if not entries['bpmag'].mask[0]:\n self.bpmag = float(entries['bpmag'][0])\n # assign rpmag\n if not entries['rpmag'].mask[0]:\n self.rpmag = float(entries['rpmag'][0])\n # assign epoch\n self.epoch = 2015.5\n # set used\n self.used = 1\n # set source\n self.source = 'gaia-query-id-input'\n\n def _resolve_from_glist(self):\n \"\"\"\n Resolve gaia id from google sheets (using object name) and then\n retry the gaia id against the gaia database\n\n :return:\n \"\"\"\n # try to get gaia id from google sheets\n self.input_gaiaid = query_glist(self.input_objname, self.gsheet_url,\n self.gsheet_wnum)\n # try (again) gaia id against gaia query (only if gaia_id is still\n # None)\n if self.input_gaiaid is not None:\n self._resolve_from_gaia_id()\n # set source\n self.source = 'gaia-query-id-gsheet'\n\n def _resolve_from_coords(self, mjd=None):\n \"\"\"\n Resolve from Gaia using coordinates (and the current date)\n\n :param mjd: observation modified julien date\n\n :return:\n \"\"\"\n # deal with ra and dec (want it in degrees)\n ra, dec = parse_coords(self.input_ra, self.input_dec)\n # get radius in degrees\n radius = (self.radius * uu.arcsec).to(uu.deg).value\n # set up ra / dec crossmatch\n condition = QCIRCLE.format(ra=ra, dec=dec, radius=radius)\n # add additional criteria (to narrow search)\n condition += r' AND (phot_rp_mean_mag < {0})'.format(self.maglimit)\n # add a parallax condition\n condition += r' AND (parallax > {0})'.format(self.plxlimit)\n # construct sql query\n query = QUERY_GAIA.format(QCOLS, QSOURCE, condition)\n # return results of query\n entries = query_gaia(self.gaia_url, query)\n # deal with no entries\n # print(entries)\n if entries is None:\n return\n if len(entries) == 0:\n return\n # get closest to ra and dec (propagated)\n position = best_gaia_entry(ra, dec, mjd, entries)\n # fill out required information if available\n self.objname = str(self.input_objname)\n self.gaia_id = str(entries['gaiaid'][position])\n self.ra = float(entries['ra'][position])\n self.dec = float(entries['dec'][position])\n # assign pmra\n if not entries['pmra'].mask[position]:\n self.pmra = float(entries['pmra'][position])\n # assign pmde\n if not entries['pmde'].mask[position]:\n self.pmde = float(entries['pmde'][position])\n # assign plx\n if not entries['plx'].mask[position]:\n self.plx = float(entries['plx'][position])\n # assign rv\n if not entries['rv'].mask[position]:\n self.rv = float(entries['rv'][position])\n # assign gmag\n if not entries['gmag'].mask[position]:\n self.gmag = float(entries['gmag'][position])\n # assign bpmag\n if not entries['bpmag'].mask[position]:\n self.bpmag = float(entries['bpmag'][position])\n # assign rpmag\n if not entries['rpmag'].mask[position]:\n self.rpmag = float(entries['rpmag'][position])\n # assign epoch\n self.epoch = 2015.5\n # set used\n self.used = 1\n # set source\n self.source = 'gaia-query-coords'\n\n def _use_inputs(self):\n \"\"\"\n If all else fails use the input values\n\n :return:\n \"\"\"\n # fill out required information if available\n self.gaia_id = None\n self.objname = self.input_objname\n self.ra = self.input_ra\n self.dec = self.input_dec\n self.pmra = self.input_pmra\n self.pmde = self.input_pmde\n self.plx = self.input_plx\n self.rv = self.input_rv\n self.gmag = np.nan\n self.bpmag = np.nan\n self.rpmag = np.nan\n self.epoch = np.nan\n self.teff = np.nan\n self.aliases = []\n self.source = None\n self.used = 1\n # set source\n self.source = 'input'\n\n def write_obj(self, database, commit: bool = True):\n # write to database\n database.add_entry(objname=self.objname, gaia_id=self.gaia_id,\n ra=self.ra, dec=self.dec,\n pmra=self.pmra, pmde=self.pmde,\n plx=self.plx, rv=self.rv,\n gmag=self.gmag, bpmag=self.bpmag,\n rpmag=self.rpmag, epoch=self.epoch,\n teff=self.input_teff, aliases=self.aliases,\n commit=commit)\n\n def write_table(self, outdict):\n \"\"\"\n Proxy write function (used to write to dictionary --> Table)\n :param outdict:\n :return:\n \"\"\"\n\n columns = ['OBJECT', 'GAIADR2ID', 'RA', 'DEC', 'PMRA', 'PMDE', 'PLX',\n 'RV', 'GMAG', 'BPMAG', 'RPMAG', 'EPOCH', 'TEFF', 'ALIASES',\n 'USED', 'SOURCE']\n values = [self.objname, self.gaia_id, self.ra, self.dec, self.pmra,\n self.pmde, self.plx, self.rv, self.gmag,\n self.bpmag, self.rpmag, self.epoch, self.teff]\n # deal with aliases\n if isinstance(self.aliases, str):\n values.append(self.aliases)\n elif isinstance(self.aliases, list):\n values.append('|'.join(self.aliases))\n else:\n values.append('None')\n # add used\n values.append(self.used)\n # add source\n values.append(self.source)\n # loop around and add to outdict\n for row in range(len(values)):\n if columns[row] in outdict:\n outdict[columns[row]].append(values[row])\n else:\n outdict[columns[row]] = [values[row]]\n # return outdict\n return outdict\n\n\ndef query_gaia(url, query) -> Union[Table, None]:\n \"\"\"\n Query Gaia via a TapPlus query\n\n :param url: str, the URL to the SQL database\n :param query: str, the SQL query to run\n\n :return: astropy.table.Table or None - the results of the gaia TAP query\n \"\"\"\n # set fucntion name\n func_name = __NAME__ + '.query_gaia()'\n # check for astroquery and return a fail and warning if not installed\n try:\n from astroquery.utils.tap.core import TapPlus\n\n except Exception as e:\n eargs = [type(e), str(e), func_name]\n WLOG(FAKE_PARAMS, 'warning', TextEntry('10-016-00009', args=eargs))\n return None\n # ------------------------------------------------------------------\n # try running gaia query\n try:\n with warnings.catch_warnings(record=True) as _:\n # construct gaia TapPlus instance\n gaia = TapPlus(url=url)\n # launch gaia job\n job = gaia.launch_job(query=query)\n # get gaia table\n table = job.get_results()\n except Exception as e:\n wargs = [url, query, type(e), e, func_name]\n WLOG(FAKE_PARAMS, 'warning', TextEntry('10-016-00008', args=wargs))\n # return No row and True to fail\n return None\n # ------------------------------------------------------------------\n # if we have no entries we did not find object\n if len(table) == 0:\n # return None\n return None\n # else we return result\n return table\n\n\ndef query_simbad_id(obj_id: str) -> Union[Table, None]:\n # set fucntion name\n _ = __NAME__ + '.query_simbad()'\n # check for astroquery and return a fail and warning if not installed\n try:\n # import astroquery\n from astroquery.simbad import Simbad\n # get results\n with warnings.catch_warnings(record=True) as _:\n # add ids column\n Simbad.add_votable_fields('ids')\n # query simbad\n return Simbad.query_object(obj_id)\n # deal with all exceptions here\n except Exception as e:\n # log that there was an error with astroquery\n wargs = [obj_id, type(e), str(e)]\n WLOG(FAKE_PARAMS, 'warning', TextEntry('10-016-00020', args=wargs))\n # return unset ra/dec\n return None\n\n\ndef query_glist(objname: str, sheet_id: str, worksheet: int = 0):\n\n # get the google sheet\n gtable = get_google_sheet(sheet_id, worksheet)\n\n # deal with empty table\n if gtable is None:\n return None\n if len(gtable) == 0:\n return None\n # set initial position to None\n position = None\n row = np.nan\n # loop around rows and look for aliases\n for row in range(len(gtable)):\n # set aliases as the objname\n aliases = [gtable['OBJECT'][row]]\n # get the aliases for this row\n aliases += gtable['ALIASES'][row].split('|')\n # search for object name\n position = crossmatch_name(objname, aliases)\n # break if we have found a match\n if position is not None:\n break\n # if position is still None return None\n if position is None:\n return None\n # else we have our Gaia id so return it\n return gtable['GAIADR2ID'][row]\n\n\ndef get_google_sheet(sheet_id: str, worksheet: int = 0,\n cached: bool = True) -> Table:\n \"\"\"\n Load a google sheet from url using a sheet id (if cached = True and\n previous loaded - just loads from memory)\n\n :param sheet_id: str, the google sheet id\n :param worksheet: int, the worksheet id (defaults to 0)\n :param cached: bool, if True and previous loaded, loads from memory\n\n :return: Table, astropy table representation of google sheet\n \"\"\"\n # set google cache table as global\n global GOOGLE_TABLES\n # construct url for worksheet\n url = GOOGLE_BASE_URL.format(sheet_id, worksheet)\n # deal with table existing\n if url in GOOGLE_TABLES and cached:\n return GOOGLE_TABLES[url]\n # get data using a request\n rawdata = requests.get(url)\n # convert rawdata input table\n table = Table.read(rawdata.text, format='ascii')\n # add to cached storage\n GOOGLE_TABLES[url] = table\n # return table\n return table\n\n\ndef crossmatch_name(name: str, namelist: List[str]) -> Union[int, None]:\n \"\"\"\n Crossmatch a name with a list of names (returning position in the list of\n names)\n\n :param name: str, the name to search for\n :param namelist: list of strings, the list of names to search\n :return: int, the position of the name in the namelist (if found) else\n None\n \"\"\"\n\n # clean name\n name = name.strip().upper()\n # strip namelist as char array\n namelist = np.char.array(namelist).strip().upper()\n # search for name in list\n if name in namelist:\n position = np.where(name == namelist)[0][0]\n # return position\n return position\n # if not found return None\n return None\n\n\ndef parse_coords(ra: float, dec: float, ra_unit='deg', dec_unit='deg'):\n \"\"\"\n Convert coordinates into degrees via SkyCoord\n\n :param ra: right ascension (with units \"ra_units\")\n :param dec: declination (with units \"dec_units\"\n :param ra_unit: units for right ascension\n :param dec_unit: units for declination\n :return:\n \"\"\"\n # get Sky Coord instances\n coord = SkyCoord(ra, dec, unit=[ra_unit, dec_unit])\n\n return coord.ra.value, coord.dec.value\n\n\ndef best_gaia_entry(ra: float, dec: float, mjd: float, entries: Table):\n \"\"\"\n Using the originally supplied ra and dec choose the closest\n entries (propagating all entries in time to match current ra and dec\n at time = 'mjd')\n\n :param ra: float, the right ascension in degrees\n :param dec: float, the declination in degrees\n :param mjd: float, the modified julien date for observation\n :param entries:\n :return:\n \"\"\"\n # get the original coords in SkyCoord\n ocoord = SkyCoord(ra, dec, unit='deg')\n # get gaia time and observation time\n gaia_time = Time('2015.5', format='decimalyear')\n obs_time = Time(mjd, format='mjd')\n # get entries as numpy arrays (with units)\n ra_arr = np.array(entries['ra']) * uu.deg\n dec_arr = np.array(entries['dec']) * uu.deg\n pmra_arr = np.array(entries['pmra']) * uu.mas/uu.yr\n pmde_arr = np.array(entries['pmde']) * uu.mas/uu.yr\n plx_arr = np.array(entries['plx']) * uu.mas\n # propagate all entries ra and dec to mjd\n coords0 = SkyCoord(ra_arr, dec_arr,\n pm_ra_cosdec=pmra_arr, pm_dec=pmde_arr,\n distance=Distance(parallax=plx_arr),\n obstime=gaia_time)\n # apply space motion\n coords1 = coords0.apply_space_motion(obs_time)\n # crossmatch with ra and dec and keep closest\n separation = coords1.separation(ocoord)\n # find the position of the minimum separated value\n position = np.argmin(separation.value)\n # return the position\n return position\n\n\n# =============================================================================\n# Start of code\n# =============================================================================\nif __name__ == \"__main__\":\n # load table\n table = Table.read(OBJECT_FILE, format='csv')\n\n if USE_DATABASE:\n\n from apero.core import constants\n from apero.core.core import drs_database\n\n # get object database\n ObjectDatabase = drs_database.ObjectDatabase\n\n params = constants.load('SPIROU')\n pconst = constants.pload('SPIROU')\n\n params.set('OBJ_LIST_CROSS_MATCH_RADIUS', value=CMRADIUS)\n params.set('OBJ_LIST_GAIA_PLX_LIM', value=0.5)\n params.set('OBJ_LIST_GOOGLE_SHEET_URL',\n value='1jwlux8AJjBMMVrbg6LszJIpFJrk6alhbT5HA7BiAHD8')\n params.set('OBJ_LIST_GOOGLE_SHEET_WNUM', value=0)\n\n # load database\n objdbm = ObjectDatabase(params)\n objdbm.load_db()\n\n # for now delete database\n columns, ctypes = pconst.OBJECT_DB_COLUMNS()\n objdbm.database.delete_table('MAIN')\n objdbm.database.add_table('MAIN', columns, ctypes)\n objdbm.load_db()\n outdict = dict()\n else:\n params = FAKE_PARAMS\n objdbm = None\n outdict = dict()\n\n # loop around objects (later)\n for row in range(len(table)):\n print('='*50)\n print('Accessing row {0} / {1}'.format(row + 1, len(table)))\n print('='*50)\n # get properties\n gaia_id = None\n ra = table['RA_DEG'][row]\n dec = table['DEC_DEG'][row]\n objname = table['OBJECT'][row]\n # mjdmid\n exptime_days = (table['EXPTIME'][row] / 2.0) / (3600 * 24.0)\n mjdmid = table['MJDEND'][row] - exptime_days\n # set up an object instance for this target\n astro_obj = AstroObject(params, pconst, gaia_id, ra, dec, objdbm,\n objname, 0.0, 0.0, 0.0, 0.0, None)\n # resolve target (from gaia id or ra/dec)\n astro_obj.resolve_target(mjdmid)\n\n # get simbad aliases for this object\n astro_obj.get_simbad_aliases()\n\n # write to database\n if USE_DATABASE:\n astro_obj.write_obj(objdbm)\n else:\n astro_obj.write_table(outdict)\n\n print(astro_obj)\n\n # need to write table\n if not USE_DATABASE:\n # make table\n outtable = Table()\n # add columns\n for key in outdict:\n outtable[key] = outdict[key]\n # sort by parallax\n with warnings.catch_warnings(record=True) as _:\n sortmask = np.argsort(outtable['PLX'])\n outtable = outtable[sortmask]\n # write to file\n outtable.write('test_obj_database.csv', format='csv', overwrite=True)\n\n\n# =============================================================================\n# End of code\n# =============================================================================\n"
] | [
[
"matplotlib.use",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots_adjust",
"numpy.roll",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot2grid",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.figure"
],
[
"numpy.array"
],
[
"numpy.char.array",
"numpy.argmin",
"numpy.argsort",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
viantirreau/cupy | [
"cafe9af0e974ff88fc6aa43bf106e343a60fb983"
] | [
"tests/cupyx_tests/scipy_tests/sparse_tests/test_csr.py"
] | [
"import contextlib\nimport pickle\nimport unittest\nimport warnings\n\nimport numpy\nimport pytest\ntry:\n import scipy.sparse\n scipy_available = True\nexcept ImportError:\n scipy_available = False\n\nimport cupy\nfrom cupy.core import _accelerator\nfrom cupy import testing\nfrom cupyx.scipy import sparse\n\n\ndef _make(xp, sp, dtype):\n data = xp.array([0, 1, 2, 3], dtype)\n indices = xp.array([0, 1, 3, 2], 'i')\n indptr = xp.array([0, 2, 3, 4], 'i')\n # 0, 1, 0, 0\n # 0, 0, 0, 2\n # 0, 0, 3, 0\n return sp.csr_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make_complex(xp, sp, dtype):\n data = xp.array([0, 1, 2, 3], dtype)\n if dtype in [numpy.complex64, numpy.complex128]:\n data = data - 1j\n indices = xp.array([0, 1, 3, 2], 'i')\n indptr = xp.array([0, 2, 3, 4], 'i')\n # 0, 1 - 1j, 0, 0\n # 0, 0, 0, 2 - 1j\n # 0, 0, 3 - 1j, 0\n return sp.csr_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make2(xp, sp, dtype):\n data = xp.array([1, 2, 3, 4], dtype)\n indices = xp.array([2, 1, 2, 2], 'i')\n indptr = xp.array([0, 1, 3, 4], 'i')\n # 0, 0, 1, 0\n # 0, 2, 3, 0\n # 0, 0, 4, 0\n return sp.csr_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make3(xp, sp, dtype):\n data = xp.array([1, 2, 3, 4, 5], dtype)\n indices = xp.array([0, 2, 1, 0, 2], 'i')\n indptr = xp.array([0, 1, 3, 3, 5], 'i')\n # 1, 0, 0\n # 0, 3, 2\n # 0, 0, 0\n # 4, 0, 5\n return sp.csr_matrix((data, indices, indptr), shape=(4, 3))\n\n\ndef _make4(xp, sp, dtype):\n data = xp.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype)\n indices = xp.array([0, 2, 3, 0, 1, 3, 0, 1, 2], 'i')\n indptr = xp.array([0, 3, 6, 9], 'i')\n # 1, 0, 2, 3\n # 4, 5, 0, 6\n # 7, 8, 9, 0\n return sp.csr_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make_unordered(xp, sp, dtype):\n data = xp.array([1, 2, 3, 4], dtype)\n indices = xp.array([1, 0, 1, 2], 'i')\n indptr = xp.array([0, 2, 3, 4], 'i')\n # 2, 1, 0, 0\n # 0, 3, 0, 0\n # 0, 0, 4, 0\n return sp.csr_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make_duplicate(xp, sp, dtype):\n data = xp.array([0, 1, 3, 2, 4, 5], dtype)\n indices = xp.array([0, 0, 0, 2, 0, 2], 'i')\n indptr = xp.array([0, 3, 6, 6], 'i')\n # 4, 0, 0, 0\n # 4, 0, 7, 0\n # 0, 0, 0, 0\n return sp.csr_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make_empty(xp, sp, dtype):\n data = xp.array([], dtype)\n indices = xp.array([], 'i')\n indptr = xp.array([0, 0, 0, 0], 'i')\n return sp.csr_matrix((data, indices, indptr), shape=(3, 4))\n\n\ndef _make_square(xp, sp, dtype):\n data = xp.array([0, 1, 2, 3], dtype)\n indices = xp.array([0, 1, 0, 2], 'i')\n indptr = xp.array([0, 2, 3, 4], 'i')\n # 0, 1, 0\n # 2, 0, 0\n # 0, 0, 3\n return sp.csr_matrix((data, indices, indptr), shape=(3, 3))\n\n\ndef _make_row(xp, sp, dtype):\n data = xp.array([1, 2, 3], dtype)\n indices = xp.array([0, 2, 3], 'i')\n indptr = xp.array([0, 3], 'i')\n # 1, 0, 2, 3\n return sp.csr_matrix((data, indices, indptr), shape=(1, 4))\n\n\ndef _make_col(xp, sp, dtype):\n data = xp.array([1, 2], dtype)\n indices = xp.array([0, 0], 'i')\n indptr = xp.array([0, 1, 1, 2], 'i')\n # 1\n # 0\n # 2\n return sp.csr_matrix((data, indices, indptr), shape=(3, 1))\n\n\ndef _make_shape(xp, sp, dtype):\n return sp.csr_matrix((3, 4), dtype=dtype)\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\nclass TestCsrMatrix(unittest.TestCase):\n\n def setUp(self):\n self.m = _make(cupy, sparse, self.dtype)\n\n def test_dtype(self):\n assert self.m.dtype == self.dtype\n\n def test_data(self):\n assert self.m.data.dtype == self.dtype\n testing.assert_array_equal(\n self.m.data, cupy.array([0, 1, 2, 3], self.dtype))\n\n def test_indices(self):\n assert self.m.indices.dtype == numpy.int32\n testing.assert_array_equal(\n self.m.indices, cupy.array([0, 1, 3, 2], self.dtype))\n\n def test_indptr(self):\n assert self.m.indptr.dtype == numpy.int32\n testing.assert_array_equal(\n self.m.indptr, cupy.array([0, 2, 3, 4], self.dtype))\n\n def test_init_copy(self):\n n = sparse.csr_matrix(self.m)\n assert n is not self.m\n cupy.testing.assert_array_equal(n.data, self.m.data)\n cupy.testing.assert_array_equal(n.indices, self.m.indices)\n cupy.testing.assert_array_equal(n.indptr, self.m.indptr)\n assert n.shape == self.m.shape\n\n def test_init_copy_other_sparse(self):\n n = sparse.csr_matrix(self.m.tocsc())\n cupy.testing.assert_array_equal(n.data, self.m.data)\n cupy.testing.assert_array_equal(n.indices, self.m.indices)\n cupy.testing.assert_array_equal(n.indptr, self.m.indptr)\n assert n.shape == self.m.shape\n\n @testing.with_requires('scipy')\n def test_init_copy_scipy_sparse(self):\n m = _make(numpy, scipy.sparse, self.dtype)\n n = sparse.csr_matrix(m)\n assert isinstance(n.data, cupy.ndarray)\n assert isinstance(n.indices, cupy.ndarray)\n assert isinstance(n.indptr, cupy.ndarray)\n cupy.testing.assert_array_equal(n.data, m.data)\n cupy.testing.assert_array_equal(n.indices, m.indices)\n cupy.testing.assert_array_equal(n.indptr, m.indptr)\n assert n.shape == m.shape\n\n @testing.with_requires('scipy')\n def test_init_copy_other_scipy_sparse(self):\n m = _make(numpy, scipy.sparse, self.dtype)\n n = sparse.csr_matrix(m.tocsc())\n assert isinstance(n.data, cupy.ndarray)\n assert isinstance(n.indices, cupy.ndarray)\n assert isinstance(n.indptr, cupy.ndarray)\n cupy.testing.assert_array_equal(n.data, m.data)\n cupy.testing.assert_array_equal(n.indices, m.indices)\n cupy.testing.assert_array_equal(n.indptr, m.indptr)\n assert n.shape == m.shape\n\n def test_init_dense(self):\n m = cupy.array([[0, 1, 0, 2],\n [0, 0, 0, 0],\n [0, 0, 3, 0]], dtype=self.dtype)\n n = sparse.csr_matrix(m)\n assert n.nnz == 3\n assert n.shape == (3, 4)\n cupy.testing.assert_array_equal(n.data, [1, 2, 3])\n cupy.testing.assert_array_equal(n.indices, [1, 3, 2])\n cupy.testing.assert_array_equal(n.indptr, [0, 2, 2, 3])\n\n def test_init_dense_empty(self):\n m = cupy.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]], dtype=self.dtype)\n n = sparse.csr_matrix(m)\n assert n.nnz == 0\n assert n.shape == (3, 4)\n cupy.testing.assert_array_equal(n.data, [])\n cupy.testing.assert_array_equal(n.indices, [])\n cupy.testing.assert_array_equal(n.indptr, [0, 0, 0, 0])\n\n def test_init_dense_one_dim(self):\n m = cupy.array([0, 1, 0, 2], dtype=self.dtype)\n n = sparse.csr_matrix(m)\n assert n.nnz == 2\n assert n.shape == (1, 4)\n cupy.testing.assert_array_equal(n.data, [1, 2])\n cupy.testing.assert_array_equal(n.indices, [1, 3])\n cupy.testing.assert_array_equal(n.indptr, [0, 2])\n\n def test_init_dense_zero_dim(self):\n m = cupy.array(1, dtype=self.dtype)\n n = sparse.csr_matrix(m)\n assert n.nnz == 1\n assert n.shape == (1, 1)\n cupy.testing.assert_array_equal(n.data, [1])\n cupy.testing.assert_array_equal(n.indices, [0])\n cupy.testing.assert_array_equal(n.indptr, [0, 1])\n\n def test_init_data_row_col(self):\n o = self.m.tocoo()\n n = sparse.csr_matrix((o.data, (o.row, o.col)))\n cupy.testing.assert_array_equal(n.data, self.m.data)\n cupy.testing.assert_array_equal(n.indices, self.m.indices)\n cupy.testing.assert_array_equal(n.indptr, self.m.indptr)\n assert n.shape == self.m.shape\n\n @testing.with_requires('scipy')\n def test_init_dense_invalid_ndim(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = xp.zeros((1, 1, 1), dtype=self.dtype)\n with pytest.raises(TypeError):\n sp.csr_matrix(m)\n\n def test_copy(self):\n n = self.m.copy()\n assert isinstance(n, sparse.csr_matrix)\n assert n is not self.m\n assert n.data is not self.m.data\n assert n.indices is not self.m.indices\n assert n.indptr is not self.m.indptr\n cupy.testing.assert_array_equal(n.data, self.m.data)\n cupy.testing.assert_array_equal(n.indices, self.m.indices)\n cupy.testing.assert_array_equal(n.indptr, self.m.indptr)\n assert n.shape == self.m.shape\n\n def test_shape(self):\n assert self.m.shape == (3, 4)\n\n def test_ndim(self):\n assert self.m.ndim == 2\n\n def test_nnz(self):\n assert self.m.nnz == 4\n\n def test_conj(self):\n n = _make_complex(cupy, sparse, self.dtype)\n cupy.testing.assert_array_equal(n.conj().data, n.data.conj())\n\n @testing.with_requires('scipy')\n def test_get(self):\n m = self.m.get()\n assert isinstance(m, scipy.sparse.csr_matrix)\n expect = [\n [0, 1, 0, 0],\n [0, 0, 0, 2],\n [0, 0, 3, 0]\n ]\n numpy.testing.assert_allclose(m.toarray(), expect)\n\n @testing.with_requires('scipy')\n def test_str(self):\n if numpy.dtype(self.dtype).kind == 'f':\n expect = ''' (0, 0)\\t0.0\n (0, 1)\\t1.0\n (1, 3)\\t2.0\n (2, 2)\\t3.0'''\n elif numpy.dtype(self.dtype).kind == 'c':\n expect = ''' (0, 0)\\t0j\n (0, 1)\\t(1+0j)\n (1, 3)\\t(2+0j)\n (2, 2)\\t(3+0j)'''\n\n assert str(self.m) == expect\n\n def test_toarray(self):\n m = self.m.toarray()\n expect = [\n [0, 1, 0, 0],\n [0, 0, 0, 2],\n [0, 0, 3, 0]\n ]\n assert m.flags.c_contiguous\n cupy.testing.assert_allclose(m, expect)\n\n def test_pickle_roundtrip(self):\n s = _make(cupy, sparse, self.dtype)\n\n s2 = pickle.loads(pickle.dumps(s))\n assert s._descr.descriptor != s2._descr.descriptor\n assert s.shape == s2.shape\n assert s.dtype == s2.dtype\n if scipy_available:\n assert (s.get() != s2.get()).count_nonzero() == 0\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected]_requires('scipy')\nclass TestCsrMatrixInit(unittest.TestCase):\n\n def setUp(self):\n self.shape = (3, 4)\n\n def data(self, xp):\n return xp.array([1, 2, 3, 4], self.dtype)\n\n def indices(self, xp):\n return xp.array([0, 1, 3, 2], 'i')\n\n def indptr(self, xp):\n return xp.array([0, 2, 3, 4], 'i')\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_shape_none(self, xp, sp):\n x = sp.csr_matrix(\n (self.data(xp), self.indices(xp), self.indptr(xp)), shape=None)\n assert x.shape == (3, 4)\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_dtype(self, xp, sp):\n data = self.data(xp).real.astype('i')\n x = sp.csr_matrix(\n (data, self.indices(xp), self.indptr(xp)), dtype=self.dtype)\n assert x.dtype == self.dtype\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_copy_true(self, xp, sp):\n data = self.data(xp)\n indices = self.indices(xp)\n indptr = self.indptr(xp)\n x = sp.csr_matrix((data, indices, indptr), copy=True)\n\n assert data is not x.data\n assert indices is not x.indices\n assert indptr is not x.indptr\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_init_with_shape(self, xp, sp):\n s = sp.csr_matrix(self.shape)\n assert s.shape == self.shape\n assert s.dtype == 'd'\n assert s.size == 0\n return s\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_init_with_shape_and_dtype(self, xp, sp):\n s = sp.csr_matrix(self.shape, dtype=self.dtype)\n assert s.shape == self.shape\n assert s.dtype == self.dtype\n assert s.size == 0\n return s\n\n @testing.numpy_cupy_allclose(sp_name='sp', atol=1e-5)\n def test_intlike_shape(self, xp, sp):\n s = sp.csr_matrix((self.data(xp), self.indices(xp), self.indptr(xp)),\n shape=(xp.array(self.shape[0]),\n xp.int32(self.shape[1])))\n assert isinstance(s.shape[0], int)\n assert isinstance(s.shape[1], int)\n return s\n\n def test_shape_invalid(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(ValueError):\n sp.csr_matrix(\n (self.data(xp), self.indices(xp), self.indptr(xp)),\n shape=(2,))\n\n def test_data_invalid(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(ValueError):\n sp.csr_matrix(\n ('invalid', self.indices(xp), self.indptr(xp)),\n shape=self.shape)\n\n def test_data_invalid_ndim(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(ValueError):\n sp.csr_matrix(\n (self.data(xp)[None], self.indices(xp), self.indptr(xp)),\n shape=self.shape)\n\n def test_indices_invalid(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(ValueError):\n sp.csr_matrix(\n (self.data(xp), 'invalid', self.indptr(xp)),\n shape=self.shape)\n\n def test_indices_invalid_ndim(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(ValueError):\n sp.csr_matrix(\n (self.data(xp), self.indices(xp)[None], self.indptr(xp)),\n shape=self.shape)\n\n def test_indptr_invalid(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(ValueError):\n sp.csr_matrix(\n (self.data(xp), self.indices(xp), 'invalid'),\n shape=self.shape)\n\n def test_indptr_invalid_ndim(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(ValueError):\n sp.csr_matrix(\n (self.data(xp), self.indices(xp), self.indptr(xp)[None]),\n shape=self.shape)\n\n def test_data_indices_different_length(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n data = xp.arange(5, dtype=self.dtype)\n with pytest.raises(ValueError):\n sp.csr_matrix(\n (data, self.indices(xp), self.indptr(xp)),\n shape=self.shape)\n\n def test_indptr_invalid_length(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n indptr = xp.array([0, 1], 'i')\n with pytest.raises(ValueError):\n sp.csr_matrix(\n (self.data(xp), self.indices(xp), indptr),\n shape=self.shape)\n\n def test_unsupported_dtype(self):\n with self.assertRaises(ValueError):\n sparse.csr_matrix(\n (self.data(cupy), self.indices(cupy), self.indptr(cupy)),\n shape=self.shape, dtype='i')\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_conj(self, xp, sp):\n n = _make_complex(xp, sp, self.dtype)\n cupy.testing.assert_array_equal(n.conj().data, n.data.conj())\n\n\[email protected](*testing.product({\n 'make_method': [\n '_make', '_make_unordered', '_make_empty', '_make_duplicate',\n '_make_shape'],\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected]_requires('scipy')\nclass TestCsrMatrixScipyComparison(unittest.TestCase):\n\n @property\n def make(self):\n return globals()[self.make_method]\n\n def test_len(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n with pytest.raises(TypeError):\n len(m)\n\n @testing.with_requires('scipy>=1.4.0')\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_iter(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n rows = []\n for r in m:\n rows.append(r)\n assert isinstance(r, sp.spmatrix)\n assert len(rows) == 3\n return xp.concatenate([r.toarray() for r in rows])\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_asfptype(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.asfptype().toarray()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_toarray(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n a = m.toarray()\n assert a.flags.c_contiguous\n return a\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_toarray_c_order(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n a = m.toarray(order='C')\n assert a.flags.c_contiguous\n return a\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_toarray_f_order(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n a = m.toarray(order='F')\n assert a.flags.f_contiguous\n return a\n\n @testing.with_requires('numpy>=1.19')\n def test_toarray_unknown_order(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n with pytest.raises(ValueError):\n m.toarray(order='#')\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_A(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.A\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocoo(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.tocoo()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocoo_copy(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = m.tocoo(copy=True)\n assert m.data is not n.data\n return n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocsc(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.tocsc()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocsc_copy(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = m.tocsc(copy=True)\n assert m.data is not n.data\n assert m.indices is not n.indices\n assert m.indptr is not n.indptr\n return n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocsr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.tocsr()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_tocsr_copy(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = m.tocsr(copy=True)\n assert m.data is not n.data\n assert m.indices is not n.indices\n assert m.indptr is not n.indptr\n return n\n\n # dot\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.dot(2.0)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_numpy_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.dot(numpy.dtype(self.dtype).type(2.0)).toarray()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_csr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype)\n return m.dot(x)\n\n def test_dot_csr_invalid_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = sp.csr_matrix((5, 3), dtype=self.dtype)\n with pytest.raises(ValueError):\n m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_csc(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocsc()\n return m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_sparse(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocoo()\n return m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_zero_dim(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.array(2, dtype=self.dtype)\n return m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_dot_dense_vector(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(4).astype(self.dtype)\n return m.dot(x)\n\n def test_dot_dense_vector_invalid_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(5).astype(self.dtype)\n with pytest.raises(ValueError):\n m.dot(x)\n\n @testing.numpy_cupy_allclose(sp_name='sp', contiguous_check=False)\n def test_dot_dense_matrix(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(8).reshape(4, 2).astype(self.dtype)\n return m.dot(x)\n\n def test_dot_dense_matrix_invalid_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(10).reshape(5, 2).astype(self.dtype)\n with pytest.raises(ValueError):\n m.dot(x)\n\n def test_dot_dense_ndim3(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(24).reshape(4, 2, 3).astype(self.dtype)\n with pytest.raises(ValueError):\n m.dot(x)\n\n def test_dot_unsupported(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n with pytest.raises(TypeError):\n m.dot(None)\n\n # __add__\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_add_zero(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m + 0\n\n def test_add_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n with pytest.raises(NotImplementedError):\n m + 1\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_add_csr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = _make2(xp, sp, self.dtype)\n return m + n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_add_coo(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = _make2(xp, sp, self.dtype).tocoo()\n return m + n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_add_dense(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = xp.arange(12).reshape(3, 4)\n return m + n\n\n # __radd__\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_radd_zero(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return (0 + m).toarray()\n\n def test_radd_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n with pytest.raises(NotImplementedError):\n 1 + m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_radd_dense(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = xp.arange(12).reshape(3, 4)\n return n + m\n\n # __sub__\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sub_zero(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return (m - 0).toarray()\n\n def test_sub_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n with pytest.raises(NotImplementedError):\n m - 1\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sub_csr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = _make2(xp, sp, self.dtype)\n return (m - n).toarray()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sub_coo(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = _make2(xp, sp, self.dtype).tocoo()\n return m - n\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sub_dense(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = xp.arange(12).reshape(3, 4)\n return m - n\n\n # __rsub__\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rsub_zero(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return 0 - m\n\n def test_rsub_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n with pytest.raises(NotImplementedError):\n 1 - m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rsub_dense(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n n = xp.arange(12).reshape(3, 4)\n return n - m\n\n # __mul__\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m * 2.0\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_numpy_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m * numpy.dtype(self.dtype).type(2.0)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_csr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype)\n return m * x\n\n def test_mul_csr_invalid_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = sp.csr_matrix((5, 3), dtype=self.dtype)\n with pytest.raises(ValueError):\n m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_csc(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocsc()\n return m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_sparse(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocoo()\n return m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_zero_dim(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.array(2, dtype=self.dtype)\n return m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_dense_vector(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(4).astype(self.dtype)\n return m * x\n\n def test_mul_dense_vector_invalid_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(5).astype(self.dtype)\n with pytest.raises(ValueError):\n m * x\n\n @testing.numpy_cupy_allclose(sp_name='sp', contiguous_check=False)\n def test_mul_dense_matrix(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(8).reshape(4, 2).astype(self.dtype)\n return m * x\n\n def test_mul_dense_matrix_invalid_shape(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(10).reshape(5, 2).astype(self.dtype)\n with pytest.raises(ValueError):\n m * x\n\n def test_mul_dense_ndim3(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(24).reshape(4, 2, 3).astype(self.dtype)\n with pytest.raises(ValueError):\n m * x\n\n def test_mul_unsupported(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n with pytest.raises(TypeError):\n m * None\n\n # __rmul__\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return 2.0 * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_numpy_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return numpy.dtype(self.dtype).type(2.0) * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_csr(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype)\n return x * m\n\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_rmul_csc(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocsc()\n return x * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_sparse(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make3(xp, sp, self.dtype).tocoo()\n return x * m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_rmul_zero_dim(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.array(2, dtype=self.dtype)\n return x * m\n\n @testing.numpy_cupy_allclose(sp_name='sp', contiguous_check=False)\n def test_rmul_dense_matrix(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(12).reshape(4, 3).astype(self.dtype)\n return x * m\n\n def test_rmul_dense_ndim3(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(24).reshape(4, 2, 3).astype(self.dtype)\n with pytest.raises(ValueError):\n x * m\n\n def test_rmul_unsupported(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n if m.nnz == 0:\n # When there is no element, a SciPy's sparse matrix does\n # not raise an error when it is multiplied with None.\n continue\n with pytest.raises(TypeError):\n None * m\n\n # Note: '@' operator is almost equivalent to '*' operator. Only test the\n # cases where '@' raises an exception and '*' does not.\n def test_matmul_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = 2.0\n with pytest.raises(ValueError):\n m @ x\n with pytest.raises(ValueError):\n x @ m\n\n def test_matmul_numpy_scalar(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = numpy.dtype(self.dtype).type(2.0)\n with pytest.raises(ValueError):\n m @ x\n with pytest.raises(ValueError):\n x @ m\n\n def test_matmul_scalar_like_array(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n x = xp.array(2.0, self.dtype)\n with pytest.raises(ValueError):\n m @ x\n with pytest.raises(ValueError):\n x @ m\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_has_canonical_format(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.has_canonical_format\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_has_canonical_format2(self, xp, sp):\n # this test is adopted from SciPy's\n M = sp.csr_matrix((xp.array([2], dtype=self.dtype),\n xp.array([0]), xp.array([0, 1])))\n assert M.has_canonical_format is True\n return M\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_has_canonical_format3(self, xp, sp):\n # this test is adopted from SciPy's\n indices = xp.array([0, 0]) # contains duplicate\n data = xp.array([1, 1], dtype=self.dtype)\n indptr = xp.array([0, 2])\n\n M = sp.csr_matrix((data, indices, indptr))\n assert M.has_canonical_format is False\n\n # set by deduplicating\n M.sum_duplicates()\n assert M.has_canonical_format is True\n assert 1 == len(M.indices)\n return M\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_has_canonical_format4(self, xp, sp):\n # this test is adopted from SciPy's\n indices = xp.array([0, 0]) # contains duplicate\n data = xp.array([1, 1], dtype=self.dtype)\n indptr = xp.array([0, 2])\n\n M = sp.csr_matrix((data, indices, indptr))\n # set manually (although underlyingly duplicated)\n M.has_canonical_format = True\n assert M.has_canonical_format\n assert 2 == len(M.indices) # unaffected content\n\n # ensure deduplication bypassed when has_canonical_format == True\n M.sum_duplicates()\n assert 2 == len(M.indices) # unaffected content\n return M\n\n @testing.with_requires('scipy>1.6.0')\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_has_sorted_indices(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.has_sorted_indices\n\n # TODO(asi1024): Remove test after the fixed version is released.\n # https://github.com/scipy/scipy/pull/13426\n @testing.with_requires('scipy<=1.6.0')\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_has_sorted_indices_for_old_scipy(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return bool(m.has_sorted_indices)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_has_sorted_indices2(self, xp, sp):\n # this test is adopted from SciPy's\n sorted_inds = xp.array([0, 1])\n data = xp.array([1, 1], dtype=self.dtype)\n indptr = xp.array([0, 2])\n M = sp.csr_matrix((data, sorted_inds, indptr))\n assert M.has_sorted_indices\n return M\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_has_sorted_indices3(self, xp, sp):\n # this test is adopted from SciPy's\n sorted_inds = xp.array([0, 1])\n unsorted_inds = xp.array([1, 0])\n data = xp.array([1, 1], dtype=self.dtype)\n indptr = xp.array([0, 2])\n M = sp.csr_matrix((data, unsorted_inds, indptr))\n assert not M.has_sorted_indices\n\n # set by sorting\n M.sort_indices()\n assert M.has_sorted_indices\n assert (M.indices == sorted_inds).all()\n return M\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_has_sorted_indices4(self, xp, sp):\n # this test is adopted from SciPy's\n unsorted_inds = xp.array([1, 0])\n data = xp.array([1, 1], dtype=self.dtype)\n indptr = xp.array([0, 2])\n M = sp.csr_matrix((data, unsorted_inds, indptr))\n\n # set manually (although underlyingly unsorted)\n M.has_sorted_indices = True\n assert M.has_sorted_indices\n assert (M.indices == unsorted_inds).all()\n\n # ensure sort bypassed when has_sorted_indices == True\n M.sort_indices()\n assert (M.indices == unsorted_inds).all()\n return M\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sort_indices(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.sort_indices()\n assert m.has_sorted_indices\n return m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sort_indices2(self, xp, sp):\n # this test is adopted from SciPy's\n data = xp.arange(5).astype(xp.float32)\n indices = xp.array([7, 2, 1, 5, 4])\n indptr = xp.array([0, 3, 5])\n asp = sp.csr_matrix((data, indices, indptr), shape=(2, 10))\n asp.sort_indices()\n assert (asp.indices == xp.array([1, 2, 7, 4, 5])).all()\n return asp.todense()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sorted_indices(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m = m.sorted_indices()\n assert m.has_sorted_indices\n return m\n\n def test_sum_tuple_axis(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n with pytest.raises(TypeError):\n m.sum(axis=(0, 1))\n\n def test_sum_str_axis(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n with pytest.raises(TypeError):\n m.sum(axis='test')\n\n def test_sum_too_large_axis(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n with pytest.raises(ValueError):\n m.sum(axis=3)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sum_duplicates(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.sum_duplicates()\n assert m.has_canonical_format\n return m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_transpose(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.transpose()\n\n def test_transpose_axes_int(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = self.make(xp, sp, self.dtype)\n with pytest.raises(ValueError):\n m.transpose(axes=0)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_eliminate_zeros(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.eliminate_zeros()\n return m\n\n @testing.numpy_cupy_equal(sp_name='sp')\n @unittest.skipIf(\n cupy.cuda.runtime.runtimeGetVersion() < 8000,\n 'CUDA <8 cannot keep number of non-zero entries ')\n def test_eliminate_zeros_nnz(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n m.eliminate_zeros()\n return m.nnz\n\n # multiply\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_multiply_scalar(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n return m.multiply(2).toarray()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_multiply_dense_row(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(4, dtype=self.dtype)\n return m.multiply(x).toarray()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_multiply_dense_col(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(3, dtype=self.dtype).reshape(3, 1)\n return m.multiply(x).toarray()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_multiply_dense_matrix(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(12, dtype=self.dtype).reshape(3, 4)\n return m.multiply(x).toarray()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_multiply_csr_matrix(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make4(xp, sp, self.dtype)\n return m.multiply(x).toarray()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_multiply_csr_row(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make_row(xp, sp, self.dtype)\n return m.multiply(x).toarray()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_multiply_csr_col(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make_col(xp, sp, self.dtype)\n return m.multiply(x).toarray()\n\n def _make_scalar(self, dtype):\n if numpy.issubdtype(dtype, numpy.integer):\n return dtype(2)\n elif numpy.issubdtype(dtype, numpy.floating):\n return dtype(2.5)\n else:\n return dtype(2.5 - 1.5j)\n\n # divide\n @testing.for_dtypes('ifdFD')\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_divide_scalar(self, xp, sp, dtype):\n m = self.make(xp, sp, self.dtype)\n y = m / self._make_scalar(dtype)\n return y.toarray()\n\n @testing.for_dtypes('ifdFD')\n # type promotion rules are different for ()-shaped arrays\n @testing.numpy_cupy_allclose(sp_name='sp', type_check=False)\n def test_divide_scalarlike(self, xp, sp, dtype):\n m = self.make(xp, sp, self.dtype)\n y = m / xp.array(self._make_scalar(dtype))\n return y.toarray()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_divide_dense_row(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(4, dtype=self.dtype)\n return m / x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_divide_dense_col(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(3, dtype=self.dtype).reshape(3, 1)\n return m / x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_divide_dense_matrix(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(12, dtype=self.dtype).reshape(3, 4)\n return m / x\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_divide_csr_matrix(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = _make4(xp, sp, self.dtype)\n return m / x\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected]_requires('scipy')\nclass TestCsrMatrixPowScipyComparison(unittest.TestCase):\n\n @testing.numpy_cupy_allclose(sp_name='sp', _check_sparse_format=False)\n def test_pow_0(self, xp, sp):\n m = _make_square(xp, sp, self.dtype)\n return m ** 0\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_pow_1(self, xp, sp):\n m = _make_square(xp, sp, self.dtype)\n return m ** 1\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_pow_2(self, xp, sp):\n m = _make_square(xp, sp, self.dtype)\n return m ** 2\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_pow_3(self, xp, sp):\n m = _make_square(xp, sp, self.dtype)\n return m ** 3\n return m ** 3\n\n def test_pow_neg(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make_square(xp, sp, self.dtype)\n with pytest.raises(ValueError):\n m ** -1\n\n def test_pow_not_square(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make(xp, sp, self.dtype)\n with pytest.raises(TypeError):\n m ** 2\n\n def test_pow_float(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make_square(xp, sp, self.dtype)\n with pytest.raises(ValueError):\n m ** 1.5\n\n def test_pow_list(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n m = _make_square(xp, sp, self.dtype)\n with pytest.raises(TypeError):\n m ** []\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64],\n 'ret_dtype': [None, numpy.float32, numpy.float64],\n 'axis': [None, 0, 1, -1, -2],\n}))\[email protected]_requires('scipy')\nclass TestCsrMatrixSum(unittest.TestCase):\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sum(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.sum(axis=self.axis, dtype=self.ret_dtype)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_sum_with_out(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n if self.axis is None:\n shape = ()\n else:\n shape = list(m.shape)\n shape[self.axis] = 1\n shape = tuple(shape)\n out = xp.empty(shape, dtype=self.ret_dtype)\n if xp is numpy:\n # TODO(unno): numpy.matrix is used for scipy.sparse though\n # cupy.ndarray is used for cupyx.scipy.sparse.\n out = xp.asmatrix(out)\n return m.sum(axis=self.axis, dtype=self.ret_dtype, out=out)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mean(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.mean(axis=self.axis, dtype=self.ret_dtype)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mean_with_out(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n if self.axis is None:\n shape = ()\n else:\n shape = list(m.shape)\n shape[self.axis] = 1\n shape = tuple(shape)\n out = xp.empty(shape, dtype=self.ret_dtype)\n if xp is numpy:\n # TODO(unno): numpy.matrix is used for scipy.sparse though\n # cupy.ndarray is used for cupyx.scipy.sparse.\n out = xp.asmatrix(out)\n return m.mean(axis=self.axis, dtype=self.ret_dtype, out=out)\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected]_requires('scipy')\nclass TestCsrMatrixScipyCompressed(unittest.TestCase):\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_get_shape(self, xp, sp):\n return _make(xp, sp, self.dtype).get_shape()\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_getnnz(self, xp, sp):\n return _make(xp, sp, self.dtype).getnnz()\n\n\[email protected](*testing.product({\n # TODO(takagi): Test dtypes\n 'axis': [None, -2, -1, 0, 1],\n 'dense': [False, True], # means a sparse matrix but all elements filled\n}))\[email protected]_requires('scipy>=0.19.0')\nclass TestCsrMatrixScipyCompressedMinMax(unittest.TestCase):\n def _make_data_min(self, xp, sp, dense=False):\n dm_data = testing.shaped_random((10, 20), xp=xp, scale=1.0)\n if not dense:\n dm_data[abs(dm_data) < 0.95] = 0\n return sp.csr_matrix(xp.array(dm_data))\n\n def _make_data_max(self, xp, sp, dense=False):\n return -self._make_data_min(xp, sp, dense=dense)\n\n def _make_data_min_explicit(self, xp, sp, axis):\n dm_data = testing.shaped_random((10, 20), xp=xp, scale=1.0)\n if xp is cupy:\n dm_data[dm_data < 0.95] = 0\n else:\n # As SciPy sparse matrix does not have `explicit` parameter, we\n # make SciPy inputs such that SciPy's spmatrix.min(axis=axis)\n # returns the same value as CuPy's spmatrix.min(axis=axis,\n # explicit=True).\n\n # Put infinity instead of zeros so spmatrix.min(axis=axis) returns\n # the smallest numbers except for zero.\n dm_data[dm_data < 0.95] = numpy.inf\n\n if axis is None:\n # If all elements in the array are set to infinity, we make it\n # have at least a zero so SciPy's spmatrix.min(axis=None)\n # returns zero.\n if numpy.isinf(dm_data).all():\n dm_data[0, 0] = 0\n else:\n if axis < 0:\n axis += 2\n\n # If all elements in a row/column are set to infinity, we make\n # it have at least a zero so spmatrix.min(axis=axis) returns\n # zero for the row/column.\n mask = numpy.zeros_like(dm_data, dtype=numpy.bool_)\n if axis == 0:\n rows = dm_data.argmin(axis=0)\n cols = numpy.arange(20)\n else:\n rows = numpy.arange(10)\n cols = dm_data.argmin(axis=1)\n mask[rows, cols] = numpy.isinf(dm_data[rows, cols])\n dm_data[mask] = 0\n\n return sp.csr_matrix(xp.array(dm_data))\n\n def _make_data_max_explicit(self, xp, sp, axis):\n return -self._make_data_min_explicit(xp, sp, axis=axis)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_min(self, xp, sp):\n data = self._make_data_min(xp, sp, dense=self.dense)\n return data.min(axis=self.axis)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_min_explicit(self, xp, sp):\n data = self._make_data_min_explicit(xp, sp, axis=self.axis)\n if xp is cupy:\n return data.min(axis=self.axis, explicit=True)\n else:\n return data.min(axis=self.axis)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_max(self, xp, sp):\n data = self._make_data_max(xp, sp, dense=self.dense)\n return data.max(axis=self.axis)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_max_explicit(self, xp, sp):\n data = self._make_data_max_explicit(xp, sp, axis=self.axis)\n if xp is cupy:\n return data.max(axis=self.axis, explicit=True)\n else:\n return data.max(axis=self.axis)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_argmin(self, xp, sp):\n # TODO(takagi) Fix axis=None\n if self.axis is None:\n pytest.skip()\n data = self._make_data_min(xp, sp, dense=self.dense)\n return data.argmin(axis=self.axis)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_argmax(self, xp, sp):\n # TODO(takagi) Fix axis=None\n if self.axis is None:\n pytest.skip()\n data = self._make_data_max(xp, sp, dense=self.dense)\n return data.argmax(axis=self.axis)\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],\n}))\[email protected]_requires('scipy')\nclass TestCsrMatrixData(unittest.TestCase):\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_dtype(self, xp, sp):\n return _make(xp, sp, self.dtype).dtype\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_abs(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return abs(m)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_neg(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return -m\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_astype(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n if numpy.dtype(self.dtype).kind == 'c':\n t = 'D'\n else:\n t = 'd'\n return m.astype(t)\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_count_nonzero(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.count_nonzero()\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_power(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.power(2)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_power_with_dtype(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n if numpy.dtype(self.dtype).kind == 'c':\n t = 'D'\n else:\n t = 'd'\n return m.power(2, t)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mean_axis_None(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.mean(axis=None)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mean_axis_0(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.mean(axis=0)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mean_axis_1(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.mean(axis=1)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mean_axis_negative_1(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.mean(axis=-1)\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mean_axis_negative_2(self, xp, sp):\n m = _make(xp, sp, self.dtype)\n return m.mean(axis=-2)\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64],\n 'ufunc': [\n 'arcsin', 'arcsinh', 'arctan', 'arctanh', 'ceil', 'deg2rad', 'expm1',\n 'floor', 'log1p', 'rad2deg', 'rint', 'sign', 'sin', 'sinh', 'sqrt',\n 'tan', 'tanh', 'trunc',\n ],\n}))\[email protected]_requires('scipy')\nclass TestUfunc(unittest.TestCase):\n\n @testing.numpy_cupy_allclose(sp_name='sp', atol=1e-5)\n def test_ufun(self, xp, sp):\n x = _make(xp, sp, self.dtype)\n x.data *= 0.1\n func = getattr(x, self.ufunc)\n complex_unsupported = {'ceil', 'deg2rad', 'floor', 'rad2deg', 'trunc'}\n if (numpy.dtype(self.dtype).kind == 'c' and\n self.ufunc in complex_unsupported):\n with self.assertRaises(TypeError):\n func()\n return numpy.array(0)\n else:\n return func()\n\n\nclass TestIsspmatrixCsr(unittest.TestCase):\n\n def test_csr(self):\n x = sparse.csr_matrix(\n (cupy.array([], 'f'),\n cupy.array([], 'i'),\n cupy.array([0], 'i')),\n shape=(0, 0), dtype='f')\n assert sparse.isspmatrix_csr(x) is True\n\n def test_csc(self):\n x = sparse.csr_matrix(\n (cupy.array([], 'f'),\n cupy.array([], 'i'),\n cupy.array([0], 'i')),\n shape=(0, 0), dtype='f')\n assert sparse.isspmatrix_csc(x) is False\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, cupy.complex64, cupy.complex128],\n}))\[email protected]_requires('scipy>=1.4.0')\nclass TestCsrMatrixGetitem(unittest.TestCase):\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_getitem_int_int(self, xp, sp):\n assert _make(xp, sp, self.dtype)[0, 1] == 1\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_getitem_int_int_not_found(self, xp, sp):\n assert _make(xp, sp, self.dtype)[1, 1] == 0\n\n @testing.numpy_cupy_equal(sp_name='sp')\n def test_getitem_int_int_negative(self, xp, sp):\n assert _make(xp, sp, self.dtype)[-1, -2] == 3\n\n def test_getitem_int_int_too_small_row(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(IndexError):\n _make(xp, sp, self.dtype)[-4, 0]\n\n def test_getitem_int_int_too_large_row(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(IndexError):\n _make(xp, sp, self.dtype)[3, 0]\n\n def test_getitem_int_int_too_small_col(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(IndexError):\n _make(xp, sp, self.dtype)[0, -5]\n\n def test_getitem_int_int_too_large_col(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(IndexError):\n _make(xp, sp, self.dtype)[0, 4]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_int(self, xp, sp):\n return _make(xp, sp, self.dtype)[1]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_int_negative(self, xp, sp):\n return _make(xp, sp, self.dtype)[-1]\n\n def test_getitem_int_to_small(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(IndexError):\n _make(xp, sp, self.dtype)[-4]\n\n def test_getitem_int_to_large(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(IndexError):\n _make(xp, sp, self.dtype)[3]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_int_none_slice(self, xp, sp):\n return _make(xp, sp, self.dtype)[1, :]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_negative_int_none_slice(self, xp, sp):\n return _make(xp, sp, self.dtype)[-1, :]\n\n def test_getitem_int_too_small_none_slice(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(IndexError):\n _make(xp, sp, self.dtype)[-4, :]\n\n def test_getitem_int_too_large_none_slice(self):\n for xp, sp in ((numpy, scipy.sparse), (cupy, sparse)):\n with pytest.raises(IndexError):\n _make(xp, sp, self.dtype)[3, :]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice(self, xp, sp):\n return _make(xp, sp, self.dtype)[1:3]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice_negative(self, xp, sp):\n return _make(xp, sp, self.dtype)[-2:-1]\n\n # SciPy prior to 1.4 has bugs where either an IndexError is raised or a\n # segfault occurs instead of returning an empty slice.\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice_start_larger_than_stop(self, xp, sp):\n return _make(xp, sp, self.dtype)[3:2]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_ellipsis(self, xp, sp):\n return _make(xp, sp, self.dtype)[...]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_int_ellipsis(self, xp, sp):\n return _make(xp, sp, self.dtype)[1, ...]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_rowslice_all(self, xp, sp):\n # This test is adapted from Scipy\n return _make(xp, sp, self.dtype)[slice(None, None, None)]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_rowslice_negative_stop(self, xp, sp):\n # This test is adapted from Scipy\n return _make(xp, sp, self.dtype)[slice(1, -2, 2)]\n\n def test_getrow(self):\n\n # This test is adapted from Scipy's CSR tests\n N = 10\n X = testing.shaped_random((N, N), cupy, seed=0)\n X[X > 0.7] = 0\n Xcsr = sparse.csr_matrix(X)\n\n for i in range(N):\n arr_row = X[i:i + 1, :]\n csr_row = Xcsr.getrow(i)\n assert sparse.isspmatrix_csr(csr_row)\n assert (arr_row == csr_row.toarray()).all()\n\n def test_getcol(self):\n # This test is adapted from Scipy's CSR tests\n N = 10\n X = testing.shaped_random((N, N), cupy, seed=0)\n X[X > 0.7] = 0\n Xcsr = sparse.csr_matrix(X)\n\n for i in range(N):\n arr_col = X[:, i:i + 1]\n csr_col = Xcsr.getcol(i)\n\n assert sparse.isspmatrix_csr(csr_col)\n assert (arr_col == csr_col.toarray()).all()\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float64, cupy.complex64, cupy.complex128],\n}))\[email protected]_requires('scipy>=1.4.0')\nclass TestCsrMatrixGetitem2(unittest.TestCase):\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice_start_too_small(self, xp, sp):\n return _make(xp, sp, self.dtype)[-4:None]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice_start_too_large(self, xp, sp):\n return _make(xp, sp, self.dtype)[4:None]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice_stop_too_small(self, xp, sp):\n return _make(xp, sp, self.dtype)[None:-4]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_getitem_slice_stop_too_large(self, xp, sp):\n return _make(xp, sp, self.dtype)[None:4]\n\n\n# CUB SpMV works only when the matrix size is nonzero\[email protected](*testing.product({\n 'make_method': ['_make', '_make_unordered', '_make_duplicate'],\n 'dtype': [numpy.float32, numpy.float64, cupy.complex64, cupy.complex128],\n}))\[email protected]_requires('scipy')\[email protected]\[email protected](cupy.cuda.cub.available, 'The CUB routine is not enabled')\nclass TestCubSpmv(unittest.TestCase):\n\n def setUp(self):\n self.old_accelerators = _accelerator.get_routine_accelerators()\n _accelerator.set_routine_accelerators(['cub'])\n\n def tearDown(self):\n _accelerator.set_routine_accelerators(self.old_accelerators)\n\n @property\n def make(self):\n return globals()[self.make_method]\n\n @testing.numpy_cupy_allclose(sp_name='sp')\n def test_mul_dense_vector(self, xp, sp):\n m = self.make(xp, sp, self.dtype)\n x = xp.arange(4).astype(self.dtype)\n if xp is numpy:\n return m * x\n\n # xp is cupy, first ensure we really use CUB\n func = 'cupyx.scipy.sparse.csr.cub.device_csrmv'\n with testing.AssertFunctionIsCalled(func):\n m * x\n # ...then perform the actual computation\n return m * x\n\n\[email protected](*testing.product({\n 'a_dtype': ['float32', 'float64', 'complex64', 'complex128'],\n 'b_dtype': ['float32', 'float64', 'complex64', 'complex128'],\n 'shape': [(4, 25), (10, 10), (25, 4)],\n 'nz_rate': [0.1, 0.5],\n 'opt': ['maximum', 'minimum'],\n}))\[email protected]_requires('scipy')\[email protected]\nclass TestCsrMatrixMaximumMinimum(unittest.TestCase):\n\n def _make_array(self, shape, dtype, xp):\n dtype = numpy.dtype(dtype)\n if dtype.char in 'fF':\n real_dtype = 'float32'\n elif dtype.char in 'dD':\n real_dtype = 'float64'\n a = testing.shaped_random(shape, xp, dtype=real_dtype, scale=2)\n a = (a - 1) / self.nz_rate\n a[a > 1] = 0\n a[a < -1] = 0\n return a\n\n def _make_matrix(self, shape, dtype, xp):\n dtype = numpy.dtype(dtype)\n a = self._make_array(shape, dtype, xp)\n if dtype.char in 'FD':\n a = a + 1j * self._make_array(shape, dtype, xp)\n return a\n\n def _make_sp_matrix(self, dtype, xp, sp):\n return sp.csr_matrix(self._make_matrix(self.shape, dtype, xp))\n\n def _make_sp_matrix_row(self, dtype, xp, sp):\n shape = 1, self.shape[1]\n return sp.csr_matrix(self._make_matrix(shape, dtype, xp))\n\n def _make_sp_matrix_col(self, dtype, xp, sp):\n shape = self.shape[0], 1\n return sp.csr_matrix(self._make_matrix(shape, dtype, xp))\n\n def _make_sp_matrix_shape(self, shape, dtype, xp, sp):\n return sp.csr_matrix(self._make_matrix(shape, dtype, xp))\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_sparse(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n b = self._make_sp_matrix(self.b_dtype, xp, sp)\n return getattr(a, self.opt)(b)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_sparse_row(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n b = self._make_sp_matrix_row(self.b_dtype, xp, sp)\n if xp == numpy:\n # SciPy does not support sparse broadcasting\n return getattr(a, self.opt)(b.toarray())\n else:\n return getattr(a, self.opt)(b).toarray()\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_sparse_col(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n b = self._make_sp_matrix_col(self.b_dtype, xp, sp)\n if xp == numpy:\n # SciPy does not support sparse broadcasting\n return getattr(a, self.opt)(b.toarray())\n else:\n return getattr(a, self.opt)(b).toarray()\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_dense(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n b = self._make_sp_matrix(self.b_dtype, xp, sp).toarray()\n return getattr(a, self.opt)(b)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_dense_row(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n b = self._make_sp_matrix_row(self.b_dtype, xp, sp).toarray()\n return getattr(a, self.opt)(b)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_dense_col(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n b = self._make_sp_matrix_col(self.b_dtype, xp, sp).toarray()\n return getattr(a, self.opt)(b)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_scalar_plus(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n return getattr(a, self.opt)(0.5)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_scalar_minus(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n return getattr(a, self.opt)(-0.5)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_scalar_zero(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n return getattr(a, self.opt)(0)\n\n def test_ng_shape(self):\n xp, sp = cupy, sparse\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n for i, j in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n shape = self.shape[0] + i, self.shape[1] + j\n b = self._make_sp_matrix_shape(shape, self.b_dtype, xp, sp)\n with self.assertRaises(ValueError):\n getattr(a, self.opt)(b)\n\n\[email protected](*testing.product({\n 'a_dtype': ['float32', 'float64', 'complex64', 'complex128'],\n 'b_dtype': ['float32', 'float64', 'complex64', 'complex128'],\n 'shape': [(6, 15), (15, 6)],\n 'opt': ['_eq_', '_ne_', '_lt_', '_gt_', '_le_', '_ge_'],\n}))\[email protected]_requires('scipy>=1.2')\[email protected]\nclass TestCsrMatrixComparison(unittest.TestCase):\n nz_rate = 0.3\n\n def _make_array(self, shape, dtype, xp):\n dtype = numpy.dtype(dtype)\n if dtype.char in 'fF':\n real_dtype = 'float32'\n elif dtype.char in 'dD':\n real_dtype = 'float64'\n a = testing.shaped_random(shape, xp, dtype=real_dtype, scale=2)\n a = (a - 1) / self.nz_rate\n a[a > 1] = 0\n a[a < -1] = 0\n return a\n\n def _make_matrix(self, shape, dtype, xp):\n dtype = numpy.dtype(dtype)\n a = self._make_array(shape, dtype, xp)\n if dtype.char in 'FD':\n a = a + 1j * self._make_array(shape, dtype, xp)\n return a\n\n def _make_sp_matrix(self, dtype, xp, sp):\n return sp.csr_matrix(self._make_matrix(self.shape, dtype, xp))\n\n def _make_sp_matrix_row(self, dtype, xp, sp):\n shape = 1, self.shape[1]\n return sp.csr_matrix(self._make_matrix(shape, dtype, xp))\n\n def _make_sp_matrix_col(self, dtype, xp, sp):\n shape = self.shape[0], 1\n return sp.csr_matrix(self._make_matrix(shape, dtype, xp))\n\n def _make_sp_matrix_shape(self, shape, dtype, xp, sp):\n return sp.csr_matrix(self._make_matrix(shape, dtype, xp))\n\n def _compare(self, a, b):\n if self.opt == '_eq_':\n return a == b\n elif self.opt == '_ne_':\n return a != b\n elif self.opt == '_lt_':\n return a < b\n elif self.opt == '_gt_':\n return a > b\n elif self.opt == '_le_':\n return a <= b\n elif self.opt == '_ge_':\n return a >= b\n\n @contextlib.contextmanager\n def _ignore_efficiency_warning(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', sparse.SparseEfficiencyWarning)\n yield\n\n @contextlib.contextmanager\n def _assert_warns_efficiency(self, sp, scalar_rhs=None):\n if scalar_rhs is None and self._compare(0, 0):\n with testing.assert_warns(sp.SparseEfficiencyWarning):\n yield\n elif scalar_rhs is not None and self._compare(0, scalar_rhs):\n if sp is sparse: # cupy\n # TODO(kataoka): Test it, too. But, it seems the current\n # implementation does not depend on the scalar value.\n with self._ignore_efficiency_warning():\n yield\n else: # scipy\n with testing.assert_warns(sp.SparseEfficiencyWarning):\n yield\n else:\n yield\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_sparse(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n b = self._make_sp_matrix(self.b_dtype, xp, sp)\n with self._assert_warns_efficiency(sp):\n return self._compare(a, b)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_sparse_row(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n b = self._make_sp_matrix_row(self.b_dtype, xp, sp)\n if xp == numpy:\n # SciPy does not support sparse broadcasting\n return self._compare(a, b.toarray())\n else:\n with self._assert_warns_efficiency(sp):\n return self._compare(a, b).toarray()\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_sparse_col(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n b = self._make_sp_matrix_col(self.b_dtype, xp, sp)\n if xp == numpy:\n # SciPy does not support sparse broadcasting\n return self._compare(a, b.toarray())\n else:\n with self._assert_warns_efficiency(sp):\n return self._compare(a, b).toarray()\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_dense(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n b = self._make_sp_matrix(self.b_dtype, xp, sp).toarray()\n return self._compare(a, b)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_dense_row(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n b = self._make_sp_matrix_row(self.b_dtype, xp, sp).toarray()\n return self._compare(a, b)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_dense_col(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n b = self._make_sp_matrix_col(self.b_dtype, xp, sp).toarray()\n return self._compare(a, b)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_scalar_plus(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n with self._assert_warns_efficiency(sp, 0.5):\n return self._compare(a, 0.5)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_scalar_minus(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n with self._assert_warns_efficiency(sp, -0.5):\n return self._compare(a, -0.5)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_scalar_zero(self, xp, sp):\n if self.opt in ('_le_', '_ge_'):\n # <= and >= with 0 are not supported by SciPy\n pytest.skip()\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n with self._assert_warns_efficiency(sp, 0):\n return self._compare(a, 0)\n\n @testing.numpy_cupy_array_equal(sp_name='sp')\n def test_scalar_nan(self, xp, sp):\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n with self._assert_warns_efficiency(sp, numpy.nan):\n return self._compare(a, numpy.nan)\n\n def test_ng_shape(self):\n xp, sp = cupy, sparse\n a = self._make_sp_matrix(self.a_dtype, xp, sp)\n for i, j in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n shape = self.shape[0] + i, self.shape[1] + j\n b = self._make_sp_matrix_shape(shape, self.b_dtype, xp, sp)\n with self.assertRaises(ValueError):\n with self._ignore_efficiency_warning():\n self._compare(a, b)\n\n\[email protected](*testing.product({\n 'shape': [(8, 5), (5, 5), (5, 8)],\n}))\[email protected]_requires('scipy>=1.5.0')\[email protected]\nclass TestCsrMatrixDiagonal(unittest.TestCase):\n density = 0.5\n\n def _make_matrix(self, dtype):\n a = testing.shaped_random(self.shape, numpy, dtype=dtype)\n mask = testing.shaped_random(self.shape, numpy, dtype='f', scale=1.0)\n a[mask > self.density] = 0\n scipy_a = scipy.sparse.csr_matrix(a)\n cupyx_a = sparse.csr_matrix(cupy.array(a))\n return scipy_a, cupyx_a\n\n @testing.for_dtypes('fdFD')\n def test_diagonal(self, dtype):\n scipy_a, cupyx_a = self._make_matrix(dtype)\n m, n = self.shape\n for k in range(-m, n+1):\n scipy_diag = scipy_a.diagonal(k=k)\n cupyx_diag = cupyx_a.diagonal(k=k)\n testing.assert_allclose(scipy_diag, cupyx_diag)\n\n def _test_setdiag(self, scipy_a, cupyx_a, x, k):\n scipy_a = scipy_a.copy()\n cupyx_a = cupyx_a.copy()\n scipy_a.setdiag(x, k=k)\n cupyx_a.setdiag(cupy.array(x), k=k)\n testing.assert_allclose(scipy_a.data, cupyx_a.data)\n testing.assert_array_equal(scipy_a.indices, cupyx_a.indices)\n testing.assert_array_equal(scipy_a.indptr, cupyx_a.indptr)\n\n @testing.for_dtypes('fdFD')\n def test_setdiag(self, dtype):\n scipy_a, cupyx_a = self._make_matrix(dtype)\n m, n = self.shape\n for k in range(-m+1, n):\n m_st, n_st = max(0, -k), max(0, k)\n for d in (-1, 0, 1):\n x_len = min(m - m_st, n - n_st) + d\n if x_len <= 0:\n continue\n x = numpy.ones((x_len,), dtype=dtype)\n self._test_setdiag(scipy_a, cupyx_a, x, k)\n\n @testing.for_dtypes('fdFD')\n def test_setdiag_scalar(self, dtype):\n scipy_a, cupyx_a = self._make_matrix(dtype)\n x = numpy.array(1.0, dtype=dtype)\n m, n = self.shape\n for k in range(-m+1, n):\n self._test_setdiag(scipy_a, cupyx_a, x, k)\n\n def test_setdiag_invalid(self):\n dtype = 'f'\n scipy_a, cupyx_a = self._make_matrix(dtype)\n x = numpy.array(1.0, dtype=dtype)\n m, n = self.shape\n for k in (-m, n):\n with self.assertRaises(ValueError):\n scipy_a.setdiag(x, k=k)\n with self.assertRaises(ValueError):\n cupyx_a.setdiag(x, k=k)\n"
] | [
[
"numpy.arange",
"numpy.issubdtype",
"numpy.dtype",
"numpy.ones",
"numpy.zeros_like",
"numpy.array",
"numpy.isinf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FelixAbrahamsson/zounds | [
"197c358acf3bea4252cfc2561da70cbe799e2c75",
"197c358acf3bea4252cfc2561da70cbe799e2c75",
"197c358acf3bea4252cfc2561da70cbe799e2c75"
] | [
"zounds/spectral/frequencyscale.py",
"examples/pytorch_autoencoder_raw_audio.py",
"zounds/learn/test_multiply.py"
] | [
"\nimport numpy as np\nimport bisect\n\n\nclass Hertz(float):\n def __init__(self, hz):\n try:\n self.hz = hz.hz\n except AttributeError:\n self.hz = hz\n\n def __neg__(self):\n return Hertz(-self.hz)\n\n def __add__(self, other):\n try:\n other = other.hz\n except AttributeError:\n pass\n return Hertz(self.hz + other)\n\n def __float__(self):\n return self.hz\n\n\nHz = Hertz\n\n\n# TODO: What commonalities can be factored out of this class and TimeSlice?\nclass FrequencyBand(object):\n \"\"\"\n Represents an interval, or band of frequencies in hertz (cycles per second)\n\n Args:\n start_hz (float): The lower bound of the frequency band in hertz\n stop_hz (float): The upper bound of the frequency band in hertz\n\n Examples::\n >>> import zounds\n >>> band = zounds.FrequencyBand(500, 1000)\n >>> band.center_frequency\n 750.0\n >>> band.bandwidth\n 500\n \"\"\"\n\n def __init__(self, start_hz, stop_hz):\n super(FrequencyBand, self).__init__()\n if stop_hz <= start_hz:\n raise ValueError('stop_hz must be greater than start_hz')\n self.stop_hz = stop_hz\n self.start_hz = start_hz\n\n def __eq__(self, other):\n try:\n return \\\n self.start_hz == other.start_hz \\\n and self.stop_hz == other.stop_hz\n except AttributeError:\n return super(FrequencyBand, self).__eq__(other)\n\n def __hash__(self):\n return (self.__class__.__name__, self.start_hz, self.stop_hz).__hash__()\n\n def intersect(self, other):\n \"\"\"\n Return the intersection between this frequency band and another.\n\n Args:\n other (FrequencyBand): the instance to intersect with\n\n Examples::\n >>> import zounds\n >>> b1 = zounds.FrequencyBand(500, 1000)\n >>> b2 = zounds.FrequencyBand(900, 2000)\n >>> intersection = b1.intersect(b2)\n >>> intersection.start_hz, intersection.stop_hz\n (900, 1000)\n \"\"\"\n lowest_stop = min(self.stop_hz, other.stop_hz)\n highest_start = max(self.start_hz, other.start_hz)\n return FrequencyBand(highest_start, lowest_stop)\n\n @classmethod\n def audible_range(cls, samplerate):\n return FrequencyBand(Hz(20), Hz(samplerate.nyquist))\n\n def bandwidth_ratio(self, other):\n return other.bandwidth / self.bandwidth\n\n def intersection_ratio(self, other):\n intersection = self.intersect(other)\n return self.bandwidth_ratio(intersection)\n\n @staticmethod\n def from_start(start_hz, bandwidth_hz):\n \"\"\"\n Produce a :class:`FrequencyBand` instance from a lower bound and\n bandwidth\n\n Args:\n start_hz (float): the lower bound of the desired FrequencyBand\n bandwidth_hz (float): the bandwidth of the desired FrequencyBand\n\n \"\"\"\n return FrequencyBand(start_hz, start_hz + bandwidth_hz)\n\n @staticmethod\n def from_center(center_hz, bandwidth_hz):\n half_bandwidth = bandwidth_hz / 2\n return FrequencyBand(\n center_hz - half_bandwidth, center_hz + half_bandwidth)\n\n @property\n def bandwidth(self):\n \"\"\"\n The span of this frequency band, in hertz\n \"\"\"\n return self.stop_hz - self.start_hz\n\n @property\n def center_frequency(self):\n return self.start_hz + (self.bandwidth / 2)\n\n def __repr__(self):\n return '''FrequencyBand(\nstart_hz={start_hz},\nstop_hz={stop_hz},\ncenter={center},\nbandwidth={bandwidth})'''.format(\n start_hz=self.start_hz,\n stop_hz=self.stop_hz,\n center=self.center_frequency,\n bandwidth=self.bandwidth)\n\n\nclass FrequencyScale(object):\n \"\"\"\n Represents a set of frequency bands with monotonically increasing start\n frequencies\n\n Args:\n frequency_band (FrequencyBand): A band representing the entire span of\n this scale. E.g., one might want to generate a scale spanning the\n entire range of human hearing by starting with\n :code:`FrequencyBand(20, 20000)`\n n_bands (int): The number of bands in this scale\n always_even (bool): when converting frequency slices to integer indices\n that numpy can understand, should the slice size always be even?\n\n See Also:\n :class:`~zounds.spectral.LinearScale`\n :class:`~zounds.spectral.GeometricScale`\n \"\"\"\n\n def __init__(self, frequency_band, n_bands, always_even=False):\n super(FrequencyScale, self).__init__()\n self.always_even = always_even\n self.n_bands = n_bands\n self.frequency_band = frequency_band\n self._bands = None\n self._starts = None\n self._stops = None\n\n @property\n def bands(self):\n \"\"\"\n An iterable of all bands in this scale\n \"\"\"\n if self._bands is None:\n self._bands = self._compute_bands()\n return self._bands\n\n @property\n def band_starts(self):\n if self._starts is None:\n self._starts = [b.start_hz for b in self.bands]\n return self._starts\n\n @property\n def band_stops(self):\n if self._stops is None:\n self._stops = [b.stop_hz for b in self.bands]\n return self._stops\n\n def _compute_bands(self):\n raise NotImplementedError()\n\n def __len__(self):\n return self.n_bands\n\n @property\n def center_frequencies(self):\n \"\"\"\n An iterable of the center frequencies of each band in this scale\n \"\"\"\n return (band.center_frequency for band in self)\n\n @property\n def bandwidths(self):\n \"\"\"\n An iterable of the bandwidths of each band in this scale\n \"\"\"\n return (band.bandwidth for band in self)\n\n def ensure_overlap_ratio(self, required_ratio=0.5):\n \"\"\"\n Ensure that every adjacent pair of frequency bands meets the overlap\n ratio criteria. This can be helpful in scenarios where a scale is\n being used in an invertible transform, and something like the `constant\n overlap add constraint\n <https://ccrma.stanford.edu/~jos/sasp/Constant_Overlap_Add_COLA_Cases.html>`_\n must be met in order to not introduce artifacts in the reconstruction.\n\n Args:\n required_ratio (float): The required overlap ratio between all\n adjacent frequency band pairs\n\n Raises:\n AssertionError: when the overlap ratio for one or more adjacent\n frequency band pairs is not met\n \"\"\"\n\n msg = \\\n 'band {i}: ratio must be at least {required_ratio} but was {ratio}'\n\n for i in range(0, len(self) - 1):\n b1 = self[i]\n b2 = self[i + 1]\n\n try:\n ratio = b1.intersection_ratio(b2)\n except ValueError:\n ratio = 0\n\n if ratio < required_ratio:\n raise AssertionError(msg.format(**locals()))\n\n @property\n def Q(self):\n \"\"\"\n The quality factor of the scale, or, the ratio of center frequencies\n to bandwidths\n \"\"\"\n return np.array(list(self.center_frequencies)) \\\n / np.array(list(self.bandwidths))\n\n @property\n def start_hz(self):\n \"\"\"\n The lower bound of this frequency scale\n \"\"\"\n return self.frequency_band.start_hz\n\n @property\n def stop_hz(self):\n \"\"\"\n The upper bound of this frequency scale\n \"\"\"\n return self.frequency_band.stop_hz\n\n def _basis(self, other_scale, window):\n weights = np.zeros((len(self), len(other_scale)))\n for i, band in enumerate(self):\n band_slice = other_scale.get_slice(band)\n slce = weights[i, band_slice]\n slce[:] = window * np.ones(len(slce))\n return weights\n\n def apply(self, time_frequency_repr, window):\n basis = self._basis(time_frequency_repr.dimensions[-1].scale, window)\n transformed = np.dot(basis, time_frequency_repr.T).T\n return transformed\n\n def __eq__(self, other):\n return \\\n self.__class__ == other.__class__ \\\n and self.frequency_band == other.frequency_band \\\n and self.n_bands == other.n_bands\n\n def __iter__(self):\n return iter(self.bands)\n\n def _construct_scale_from_slice(self, bands):\n freq_band = FrequencyBand(bands[0].start_hz, bands[-1].stop_hz)\n return self.__class__(freq_band, len(bands))\n\n def get_slice(self, frequency_band):\n \"\"\"\n Given a frequency band, and a frequency dimension comprised of\n n_samples, return a slice using integer indices that may be used to\n extract only the frequency samples that intersect with the frequency\n band\n \"\"\"\n index = frequency_band\n\n if isinstance(index, slice):\n types = {\n index.start.__class__,\n index.stop.__class__,\n index.step.__class__\n }\n\n if Hertz not in types:\n return index\n\n try:\n start = Hertz(0) if index.start is None else index.start\n if start < Hertz(0):\n start = self.stop_hz + start\n stop = self.stop_hz if index.stop is None else index.stop\n if stop < Hertz(0):\n stop = self.stop_hz + stop\n frequency_band = FrequencyBand(start, stop)\n except (ValueError, TypeError):\n pass\n\n start_index = bisect.bisect_left(\n self.band_stops, frequency_band.start_hz)\n stop_index = bisect.bisect_left(\n self.band_starts, frequency_band.stop_hz)\n\n if self.always_even and (stop_index - start_index) % 2:\n # KLUDGE: This is simple, but it may make sense to choose move the\n # upper *or* lower bound, based on which one introduces a lower\n # error\n stop_index += 1\n return slice(start_index, stop_index)\n\n def __getitem__(self, index):\n\n try:\n # index is an integer or slice\n bands = self.bands[index]\n except TypeError:\n # index is a frequency band\n bands = self.bands[self.get_slice(index)]\n\n if isinstance(bands, FrequencyBand):\n return bands\n\n return self._construct_scale_from_slice(bands)\n\n def __str__(self):\n cls = self.__class__.__name__\n return '{cls}(band={self.frequency_band}, n_bands={self.n_bands})' \\\n .format(**locals())\n\n def __repr__(self):\n return self.__str__()\n\n\nclass LinearScale(FrequencyScale):\n \"\"\"\n A linear frequency scale with constant bandwidth. Appropriate for use\n with transforms whose coefficients also lie on a linear frequency scale,\n e.g. the FFT or DCT transforms.\n\n Args:\n frequency_band (FrequencyBand): A band representing the entire span of\n this scale. E.g., one might want to generate a scale spanning the\n entire range of human hearing by starting with\n :code:`FrequencyBand(20, 20000)`\n n_bands (int): The number of bands in this scale\n always_even (bool): when converting frequency slices to integer indices\n that numpy can understand, should the slice size always be even?\n\n Examples:\n >>> from zounds import FrequencyBand, LinearScale\n >>> scale = LinearScale(FrequencyBand(20, 20000), 10)\n >>> scale\n LinearScale(band=FrequencyBand(\n start_hz=20,\n stop_hz=20000,\n center=10010.0,\n bandwidth=19980), n_bands=10)\n >>> scale.Q\n array([ 0.51001001, 1.51001001, 2.51001001, 3.51001001, 4.51001001,\n 5.51001001, 6.51001001, 7.51001001, 8.51001001, 9.51001001])\n \"\"\"\n\n def __init__(self, frequency_band, n_bands, always_even=False):\n super(LinearScale, self).__init__(frequency_band, n_bands, always_even)\n\n @staticmethod\n def from_sample_rate(sample_rate, n_bands, always_even=False):\n \"\"\"\n Return a :class:`~zounds.spectral.LinearScale` instance whose upper\n frequency bound is informed by the nyquist frequency of the sample rate.\n\n Args:\n sample_rate (SamplingRate): the sample rate whose nyquist frequency\n will serve as the upper frequency bound of this scale\n n_bands (int): the number of evenly-spaced frequency bands\n \"\"\"\n fb = FrequencyBand(0, sample_rate.nyquist)\n return LinearScale(fb, n_bands, always_even=always_even)\n\n def _compute_bands(self):\n freqs = np.linspace(\n self.start_hz, self.stop_hz, self.n_bands, endpoint=False)\n # constant, non-overlapping bandwidth\n bandwidth = freqs[1] - freqs[0]\n return tuple(FrequencyBand(f, f + bandwidth) for f in freqs)\n\n\n# class LogScale(FrequencyScale):\n# def __init__(self, frequency_band, n_bands, always_even=False):\n# super(LogScale, self).__init__(\n# frequency_band, n_bands, always_even=always_even)\n#\n# def _compute_bands(self):\n# center_freqs = np.logspace(\n# np.log10(self.start_hz),\n# np.log10(self.stop_hz),\n# self.n_bands + 1)\n# # variable bandwidth\n# bandwidths = np.diff(center_freqs)\n# return tuple(FrequencyBand.from_center(cf, bw)\n# for (cf, bw) in zip(center_freqs[:-1], bandwidths))\n\n\nclass GeometricScale(FrequencyScale):\n \"\"\"\n A constant-Q scale whose center frequencies progress geometrically rather\n than linearly\n\n Args:\n start_center_hz (int): the center frequency of the first band in the\n scale\n stop_center_hz (int): the center frequency of the last band in the scale\n bandwidth_ratio (float): the center frequency to bandwidth ratio\n n_bands (int): the total number of bands\n\n Examples:\n >>> from zounds import GeometricScale\n >>> scale = GeometricScale(20, 20000, 0.05, 10)\n >>> scale\n GeometricScale(band=FrequencyBand(\n start_hz=19.5,\n stop_hz=20500.0,\n center=10259.75,\n bandwidth=20480.5), n_bands=10)\n >>> scale.Q\n array([ 20., 20., 20., 20., 20., 20., 20., 20., 20., 20.])\n >>> list(scale.center_frequencies)\n [20.000000000000004, 43.088693800637671, 92.831776672255558,\n 200.00000000000003, 430.88693800637651, 928.31776672255558,\n 2000.0000000000005, 4308.8693800637648, 9283.1776672255564,\n 20000.000000000004]\n \"\"\"\n\n def __init__(\n self,\n start_center_hz,\n stop_center_hz,\n bandwidth_ratio,\n n_bands,\n always_even=False):\n self.__bands = [\n FrequencyBand.from_center(cf, cf * bandwidth_ratio)\n for cf in np.geomspace(start_center_hz, stop_center_hz, num=n_bands)\n ]\n band = FrequencyBand(self.__bands[0].start_hz, self.__bands[-1].stop_hz)\n super(GeometricScale, self).__init__(\n band, n_bands, always_even=always_even)\n self.start_center_hz = start_center_hz\n self.stop_center_hz = stop_center_hz\n self.bandwidth_ratio = bandwidth_ratio\n\n def _construct_scale_from_slice(self, bands):\n return ExplicitScale(bands)\n\n def __eq__(self, other):\n return \\\n super(GeometricScale, self).__eq__(other) \\\n and self.start_center_hz == other.start_center_hz \\\n and self.stop_center_hz == other.stop_center_hz \\\n and self.bandwidth_ratio == other.bandwidth_ratio\n\n def _compute_bands(self):\n return self.__bands\n\n\nclass ExplicitScale(FrequencyScale):\n \"\"\"\n A scale where the frequency bands are provided explicitly, rather than\n computed\n\n Args:\n bands (list of FrequencyBand): The explicit bands used by this scale\n\n See Also:\n :class:`~zounds.spectral.FrequencyAdaptive`\n \"\"\"\n\n def __init__(self, bands):\n bands = list(bands)\n frequency_band = FrequencyBand(bands[0].start_hz, bands[-1].stop_hz)\n super(ExplicitScale, self).__init__(\n frequency_band, len(bands), always_even=False)\n self._bands = bands\n\n def _construct_scale_from_slice(self, bands):\n return ExplicitScale(bands)\n\n def _compute_bands(self):\n return self._bands\n\n def __eq__(self, other):\n return all([a == b for (a, b) in zip(self, other)])\n\n\nclass Bark(Hertz):\n def __init__(self, bark):\n self.bark = bark\n super(Bark, self).__init__(Bark.to_hz(bark))\n\n @staticmethod\n def to_hz(bark):\n return 300. * ((np.e ** (bark / 6.0)) - (np.e ** (-bark / 6.)))\n\n @staticmethod\n def to_bark(hz):\n return 6. * np.log((hz / 600.) + np.sqrt((hz / 600.) ** 2 + 1))\n\n\ndef equivalent_rectangular_bandwidth(hz):\n return (0.108 * hz) + 24.7\n\n\nclass BarkScale(FrequencyScale):\n def __init__(self, frequency_band, n_bands):\n super(BarkScale, self).__init__(frequency_band, n_bands)\n\n def _compute_bands(self):\n start = Bark.to_bark(self.frequency_band.start_hz)\n stop = Bark.to_bark(self.frequency_band.stop_hz)\n barks = np.linspace(start, stop, self.n_bands)\n center_frequencies_hz = Bark.to_hz(barks)\n bandwidths = equivalent_rectangular_bandwidth(center_frequencies_hz)\n return [\n FrequencyBand.from_center(c, b)\n for c, b in zip(center_frequencies_hz, bandwidths)]\n\n\nclass Mel(Hertz):\n def __init__(self, mel):\n self.mel = mel\n super(Mel, self).__init__(Mel.to_hz(mel))\n\n @staticmethod\n def to_hz(mel):\n return 700 * ((np.e ** (mel / 1127)) - 1)\n\n @staticmethod\n def to_mel(hz):\n return 1127 * np.log(1 + (hz / 700))\n\n\nclass MelScale(FrequencyScale):\n def __init__(self, frequency_band, n_bands):\n super(MelScale, self).__init__(frequency_band, n_bands)\n\n def _compute_bands(self):\n start = Mel.to_mel(self.frequency_band.start_hz)\n stop = Mel.to_mel(self.frequency_band.stop_hz)\n mels = np.linspace(start, stop, self.n_bands)\n center_frequencies_hz = Mel.to_hz(mels)\n bandwidths = equivalent_rectangular_bandwidth(center_frequencies_hz)\n return [\n FrequencyBand.from_center(c, b)\n for c, b in zip(center_frequencies_hz, bandwidths)]\n\n\nclass ChromaScale(FrequencyScale):\n def __init__(self, frequency_band):\n self._a440 = 440.\n self._a = 2 ** (1 / 12.)\n super(ChromaScale, self).__init__(frequency_band, n_bands=12)\n\n def _compute_bands(self):\n raise NotImplementedError()\n\n def get_slice(self, frequency_band):\n raise NotImplementedError()\n\n def _semitones_to_hz(self, semitone):\n return self._a440 * (self._a ** semitone)\n\n def _hz_to_semitones(self, hz):\n \"\"\"\n Convert hertz into a number of semitones above or below some reference\n value, in this case, A440\n \"\"\"\n return np.log(hz / self._a440) / np.log(self._a)\n\n def _basis(self, other_scale, window):\n basis = np.zeros((self.n_bands, len(other_scale)))\n\n # for each tone in the twelve-tone scale, generate narrow frequency\n # bands for every octave of that note that falls within the frequency\n # band.\n start_semitones = \\\n int(np.round(self._hz_to_semitones(self.frequency_band.start_hz)))\n stop_semitones = \\\n int(np.round(self._hz_to_semitones(self.frequency_band.stop_hz)))\n\n semitones = np.arange(start_semitones - 1, stop_semitones)\n hz = self._semitones_to_hz(semitones)\n\n bands = []\n for i in range(0, len(semitones) - 2):\n fh, mh, lh = hz[i: i + 3]\n bands.append(FrequencyBand(fh, lh))\n\n for semitone, band in zip(semitones, bands):\n slce = other_scale.get_slice(band)\n chroma_index = semitone % self.n_bands\n slce = basis[chroma_index, slce]\n slce[:] += np.ones(len(slce)) * window\n\n return basis\n",
"import argparse\nfrom random import choice\n\nimport featureflow as ff\nimport numpy as np\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.optim import Adam\n\nimport zounds\nfrom zounds.learn import Conv1d, ConvTranspose1d, to_var, from_var\nfrom zounds.timeseries import categorical, inverse_categorical\n\nsamplerate = zounds.SR11025()\nBaseModel = zounds.resampled(resample_to=samplerate, store_resampled=True)\n\nwindow_size = 8192\nwscheme = zounds.SampleRate(\n frequency=samplerate.frequency * (window_size // 2),\n duration=samplerate.frequency * window_size)\n\n\[email protected]_lmdb_settings('ae', map_size=1e10, user_supplied_id=True)\nclass Sound(BaseModel):\n windowed = zounds.ArrayWithUnitsFeature(\n zounds.SlidingWindow,\n wscheme=wscheme,\n needs=BaseModel.resampled)\n\n mu_law = zounds.ArrayWithUnitsFeature(\n zounds.mu_law,\n needs=windowed)\n\n categorical = zounds.ArrayWithUnitsFeature(\n categorical,\n needs=windowed)\n\n\n# TODO: Factor out the part of the pipeline that starts with samples and\n# shuffled\[email protected]_settings\nclass AutoEncoderPipeline(ff.BaseModel):\n samples = ff.PickleFeature(ff.IteratorNode)\n\n shuffled = ff.PickleFeature(\n zounds.ShuffledSamples,\n nsamples=int(1e5),\n dtype=np.float32,\n needs=samples)\n\n scaled = ff.PickleFeature(\n zounds.InstanceScaling,\n needs=shuffled)\n\n autoencoder = ff.PickleFeature(\n zounds.PyTorchAutoEncoder,\n trainer=ff.Var('trainer'),\n needs=scaled)\n\n pipeline = ff.PickleFeature(\n zounds.PreprocessingPipeline,\n needs=(scaled, autoencoder,),\n store=True)\n\n\[email protected]_settings\nclass CategoricalAutoEncoderPipeline(ff.BaseModel):\n samples = ff.PickleFeature(ff.IteratorNode)\n\n shuffled = ff.PickleFeature(\n zounds.ShuffledSamples,\n nsamples=int(1e5),\n dtype=np.float32,\n needs=samples)\n\n autoencoder = ff.PickleFeature(\n zounds.PyTorchAutoEncoder,\n trainer=ff.Var('trainer'),\n needs=shuffled)\n\n pipeline = ff.PickleFeature(\n zounds.PreprocessingPipeline,\n needs=(autoencoder,),\n store=True)\n\n\nclass EncoderLayer(Conv1d):\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding):\n super(EncoderLayer, self).__init__(\n in_channels, out_channels, kernel_size, stride, padding)\n\n\nclass DecoderLayer(ConvTranspose1d):\n def __init__(\n self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n dropout=True,\n activation=lambda x: F.leaky_relu(x, 0.2)):\n super(DecoderLayer, self).__init__(\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n activation,\n dropout)\n\n\nclass Encoder(nn.Module):\n def __init__(self, in_channels):\n super(Encoder, self).__init__()\n self.in_channels = in_channels\n self.main = nn.Sequential(\n EncoderLayer(in_channels, 64, 16, 8, 4),\n EncoderLayer(64, 128, 8, 4, 2),\n EncoderLayer(128, 128, 8, 4, 2),\n EncoderLayer(128, 128, 8, 4, 2),\n EncoderLayer(128, 256, 8, 4, 2),\n EncoderLayer(256, 512, 4, 1, 0))\n\n def forward(self, x):\n x = x.view(-1, self.in_channels, window_size)\n return self.main(x).view(-1, 512)\n\n\nclass Decoder(nn.Module):\n def __init__(self, out_channels, output_activation):\n super(Decoder, self).__init__()\n act = output_activation\n self.out_channels = out_channels\n self.main = nn.Sequential(\n DecoderLayer(512, 256, 4, 1, 0),\n DecoderLayer(256, 128, 8, 4, 2),\n DecoderLayer(128, 128, 8, 4, 2),\n DecoderLayer(128, 128, 8, 4, 2),\n DecoderLayer(128, 64, 8, 4, 2),\n DecoderLayer(\n 64, self.out_channels, 16, 8, 4, dropout=False, activation=act))\n\n def forward(self, x):\n x = x.view(-1, 512, 1)\n x = self.main(x)\n x = x.view(-1, self.out_channels, window_size)\n x = x.squeeze()\n return x\n\n\nclass AutoEncoder(nn.Module):\n def __init__(self, channels, output_activation):\n super(AutoEncoder, self).__init__()\n self.encoder = Encoder(channels)\n self.decoder = Decoder(channels, output_activation)\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x\n\n\nclass RawSamplesAutoEncoder(AutoEncoder):\n def __init__(self):\n super(RawSamplesAutoEncoder, self).__init__(\n channels=1, output_activation=F.tanh)\n\n\nclass CategoricalAutoEncoder(AutoEncoder):\n def __init__(self):\n super(CategoricalAutoEncoder, self).__init__(\n channels=256, output_activation=F.log_softmax)\n\n\ndef raw_samples_synthesize(x):\n # TODO: it should be possible to apply windowing at the synthesis step\n synth = zounds.WindowedAudioSynthesizer()\n return synth.synthesize(x)\n\n\ndef categorical_synthesize(x):\n samples = inverse_categorical(x.reshape(-1, 8192, 256))\n samples = zounds.ArrayWithUnits(samples, dimensions=[\n zounds.TimeDimension(*wscheme),\n zounds.TimeDimension(*samplerate)\n ])\n return raw_samples_synthesize(samples)\n\n\ndef preprocess_categorical(x):\n return categorical(x).reshape((-1, 256, 8192))\n\n\nclass CategoricalLoss(nn.NLLLoss):\n def __init__(self):\n super(CategoricalLoss, self).__init__()\n\n def forward(self, input, target):\n input = input.view(-1, 256)\n target = target.view(-1, 256)\n values, indices = target.max(dim=1)\n return super(CategoricalLoss, self).forward(input, indices)\n\n\nclass FrequencyBandLoss(nn.MSELoss):\n def __init__(self):\n super(FrequencyBandLoss, self).__init__()\n\n def forward(self, input, target):\n target_samples = from_var(target).squeeze()\n target_fft = np.fft.rfft(target_samples, axis=-1, norm='ortho')\n target_fft[:, :50] = 0\n recon = np.fft.irfft(target_fft, axis=-1, norm='ortho')\n recon = to_var(recon)\n return super(FrequencyBandLoss, self).forward(input, recon)\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--internet-archive-id',\n type=str,\n help='the internet archive id to use for training')\n parser.add_argument(\n '--epochs',\n type=int,\n help='the number of epochs to train the network')\n parser.add_argument(\n '--force-train',\n action='store_true',\n help='re-train the network, even if it has already been trained')\n parser.add_argument(\n '--categorical',\n action='store_true',\n help='use a categorical distribution of samples')\n args = parser.parse_args()\n\n if args.internet_archive_id:\n zounds.ingest(\n zounds.InternetArchive(args.internet_archive_id),\n Sound,\n multi_threaded=True)\n\n if args.categorical:\n network = CategoricalAutoEncoder()\n loss = CategoricalLoss()\n synthesize = categorical_synthesize\n pipeline_cls = CategoricalAutoEncoderPipeline\n data_preprocessor = label_preprocessor = preprocess_categorical\n batch_size = 16\n else:\n network = RawSamplesAutoEncoder()\n loss = FrequencyBandLoss()\n synthesize = raw_samples_synthesize\n pipeline_cls = AutoEncoderPipeline\n data_preprocessor = label_preprocessor = lambda x: x\n batch_size = 64\n gen = (snd.windowed for snd in Sound\n if args.internet_archive_id in snd._id)\n\n if args.force_train or not AutoEncoderPipeline.exists():\n trainer = zounds.SupervisedTrainer(\n network,\n loss,\n lambda model: Adam(model.parameters(), lr=0.0001),\n epochs=args.epochs,\n batch_size=batch_size,\n holdout_percent=0.25,\n data_preprocessor=data_preprocessor,\n label_preprocessor=label_preprocessor)\n\n gen = (snd.windowed for snd in Sound\n if args.internet_archive_id in snd._id)\n pipeline_cls.process(samples=gen, trainer=trainer)\n\n # instantiate the trained pipeline\n pipeline = pipeline_cls()\n\n snds = [snd for snd in Sound if args.internet_archive_id in snd._id]\n snd = choice(snds)\n time_slice = zounds.TimeSlice(duration=zounds.Seconds(10))\n encoded = pipeline.pipeline.transform(\n data_preprocessor(snd.windowed[time_slice]))\n recon = encoded.inverse_transform()\n samples = synthesize(recon)\n\n # start up an in-browser REPL to interact with the results\n app = zounds.ZoundsApp(\n model=Sound,\n audio_feature=Sound.ogg,\n visualization_feature=Sound.windowed,\n globals=globals(),\n locals=locals())\n app.start(8888)\n",
"import unittest2\nimport numpy as np\nfrom .preprocess import Multiply, PreprocessingPipeline\nfrom zounds.util import simple_in_memory_settings\nimport featureflow as ff\n\n\nclass MultiplyTests(unittest2.TestCase):\n def get_model(self, factor):\n @simple_in_memory_settings\n class Model(ff.BaseModel):\n multiply = ff.PickleFeature(\n Multiply,\n factor=factor,\n store=False)\n\n pipeline = ff.PickleFeature(\n PreprocessingPipeline,\n needs=(multiply,),\n store=True)\n\n return Model\n\n def test_can_do_forward_transform_with_scalar(self):\n training = np.random.random_sample((100, 30))\n factor = 10\n Model = self.get_model(factor)\n _id = Model.process(multiply=training)\n model = Model(_id)\n data = np.random.random_sample((10, 30))\n transformed = model.pipeline.transform(data)\n np.testing.assert_allclose(data * factor, transformed.data)\n\n def test_can_do_forward_transform_with_array(self):\n training = np.random.random_sample((100, 30))\n factor = np.random.random_sample(30)\n Model = self.get_model(factor)\n _id = Model.process(multiply=training)\n model = Model(_id)\n data = np.random.random_sample((10, 30))\n transformed = model.pipeline.transform(data)\n np.testing.assert_allclose(data * factor, transformed.data)\n\n def test_raises_if_shapes_do_not_match(self):\n training = np.random.random_sample((100, 30))\n factor = np.random.random_sample(3)\n Model = self.get_model(factor)\n self.assertRaises(ValueError, lambda: Model.process(multiply=training))\n\n def test_can_do_forward_and_backward_transform_with_scalar(self):\n training = np.random.random_sample((100, 30))\n factor = 10\n Model = self.get_model(factor)\n _id = Model.process(multiply=training)\n model = Model(_id)\n data = np.random.random_sample((10, 30))\n transformed = model.pipeline.transform(data)\n recon = transformed.inverse_transform()\n np.testing.assert_allclose(data, recon)\n\n def test_can_do_forward_and_backward_transform_with_array(self):\n training = np.random.random_sample((100, 30))\n factor = np.random.random_sample(30)\n Model = self.get_model(factor)\n _id = Model.process(multiply=training)\n model = Model(_id)\n data = np.random.random_sample((10, 30))\n transformed = model.pipeline.transform(data)\n recon = transformed.inverse_transform()\n np.testing.assert_allclose(data, recon)\n"
] | [
[
"numpy.dot",
"numpy.log",
"numpy.sqrt",
"numpy.linspace",
"numpy.arange",
"numpy.geomspace"
],
[
"torch.nn.functional.leaky_relu",
"numpy.fft.irfft",
"numpy.fft.rfft"
],
[
"numpy.random.random_sample",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.11",
"1.10",
"1.12",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.21",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Pugavkomm/NS-analyst | [
"698af0e94f57b431fd77c17c49d4a23f11d21d3f"
] | [
"temp/maintestgraph.py"
] | [
"import sys\nimport time\n\nimport numpy as np\n\nfrom matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\nif is_pyqt5():\n from matplotlib.backends.backend_qt5agg import (\n FigureCanvas, NavigationToolbar2QT as NavigationToolbar)\nelse:\n from matplotlib.backends.backend_qt4agg import (\n FigureCanvas, NavigationToolbar2QT as NavigationToolbar)\nfrom matplotlib.figure import Figure\n\n\nclass ApplicationWindow(QtWidgets.QMainWindow):\n def __init__(self):\n super().__init__()\n self._main = QtWidgets.QWidget()\n self.setCentralWidget(self._main)\n layout = QtWidgets.QVBoxLayout(self._main)\n\n static_canvas = FigureCanvas(Figure(figsize=(5, 3)))\n layout.addWidget(static_canvas)\n self.addToolBar(NavigationToolbar(static_canvas, self))\n\n dynamic_canvas = FigureCanvas(Figure(figsize=(5, 3)))\n layout.addWidget(dynamic_canvas)\n self.addToolBar(QtCore.Qt.BottomToolBarArea,\n NavigationToolbar(dynamic_canvas, self))\n\n self._static_ax = static_canvas.figure.subplots()\n t = np.linspace(0, 10, 501)\n self._static_ax.plot(t, np.tan(t), \".\")\n\n self._dynamic_ax = dynamic_canvas.figure.subplots()\n self._timer = dynamic_canvas.new_timer(\n 100, [(self._update_canvas, (), {})])\n self._timer.start()\n\n def _update_canvas(self):\n self._dynamic_ax.clear()\n t = np.linspace(0, 10, 101)\n # Shift the sinusoid as a function of time.\n self._dynamic_ax.plot(t, np.sin(t + time.time()))\n self._dynamic_ax.figure.canvas.draw()\n\n\nif __name__ == \"__main__\":\n qapp = QtWidgets.QApplication(sys.argv)\n app = ApplicationWindow()\n app.show()\n qapp.exec_()"
] | [
[
"matplotlib.backends.qt_compat.QtWidgets.QApplication",
"matplotlib.backends.backend_qt4agg.NavigationToolbar2QT",
"numpy.linspace",
"matplotlib.figure.Figure",
"matplotlib.backends.qt_compat.QtWidgets.QWidget",
"matplotlib.backends.qt_compat.is_pyqt5",
"numpy.tan",
"matplotlib.backends.qt_compat.QtWidgets.QVBoxLayout"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rjleveque/amrclaw | [
"d7acfe4a71b2515778b134540a015923ce77a3cd"
] | [
"examples/advection_2d_flagregions/setrun.py"
] | [
"\"\"\" \nModule to set up run time parameters for Clawpack.\n\nThe values set in the function setrun are then written out to data files\nthat will be read in by the Fortran code.\n \n\"\"\" \n\nfrom __future__ import absolute_import\nimport os\nimport numpy as np\n\n# used to create ruled rectangle:\nfrom clawpack.amrclaw import region_tools \n\n\n#------------------------------\ndef setrun(claw_pkg='amrclaw'):\n#------------------------------\n \n \"\"\" \n Define the parameters used for running Clawpack.\n\n INPUT:\n claw_pkg expected to be \"amrclaw\" for this setrun.\n\n OUTPUT:\n rundata - object of class ClawRunData \n \n \"\"\" \n \n from clawpack.clawutil import data \n \n \n assert claw_pkg.lower() == 'amrclaw', \"Expected claw_pkg = 'amrclaw'\"\n\n num_dim = 2\n rundata = data.ClawRunData(claw_pkg, num_dim)\n\n #------------------------------------------------------------------\n # Problem-specific parameters to be written to setprob.data:\n #------------------------------------------------------------------\n\n probdata = rundata.new_UserData(name='probdata',fname='setprob.data')\n probdata.add_param('u', 0.5, 'ubar advection velocity')\n probdata.add_param('v', 1.0, 'vbar advection velocity')\n \n #------------------------------------------------------------------\n # Standard Clawpack parameters to be written to claw.data:\n # (or to amr2ez.data for AMR)\n #------------------------------------------------------------------\n\n clawdata = rundata.clawdata # initialized when rundata instantiated\n\n\n # Set single grid parameters first.\n # See below for AMR parameters.\n\n\n # ---------------\n # Spatial domain:\n # ---------------\n\n # Number of space dimensions:\n clawdata.num_dim = num_dim\n \n # Lower and upper edge of computational domain:\n clawdata.lower[0] = 0. # xlower\n clawdata.upper[0] = 1. # xupper\n clawdata.lower[1] = 0. # ylower\n clawdata.upper[1] = 1. # yupper\n \n # Number of grid cells:\n clawdata.num_cells[0] = 50 # mx\n clawdata.num_cells[1] = 50 # my\n \n\n # ---------------\n # Size of system:\n # ---------------\n\n # Number of equations in the system:\n clawdata.num_eqn = 1\n\n # Number of auxiliary variables in the aux array (initialized in setaux)\n clawdata.num_aux = 0\n \n # Index of aux array corresponding to capacity function, if there is one:\n clawdata.capa_index = 0\n \n \n # -------------\n # Initial time:\n # -------------\n\n clawdata.t0 = 0.0 \n \n\n # Restart from checkpoint file of a previous run?\n # If restarting, t0 above should be from original run, and the\n # restart_file 'fort.chkNNNNN' specified below should be in \n # the OUTDIR indicated in Makefile.\n\n clawdata.restart = False # True to restart from prior results\n clawdata.restart_file = 'fort.chk00006' # File to use for restart data\n \n \n # -------------\n # Output times:\n #--------------\n\n # Specify at what times the results should be written to fort.q files.\n # Note that the time integration stops after the final output time.\n \n clawdata.output_style = 1\n \n if clawdata.output_style==1:\n # Output ntimes frames at equally spaced times up to tfinal:\n # Can specify num_output_times = 0 for no output\n clawdata.num_output_times = 10\n clawdata.tfinal = 1.0\n clawdata.output_t0 = True # output at initial (or restart) time?\n \n elif clawdata.output_style == 2:\n # Specify a list or numpy array of output times:\n # Include t0 if you want output at the initial time.\n clawdata.output_times = [0., 0.1]\n \n elif clawdata.output_style == 3:\n # Output every step_interval timesteps over total_steps timesteps:\n clawdata.output_step_interval = 2\n clawdata.total_steps = 4\n clawdata.output_t0 = True # output at initial (or restart) time?\n \n\n clawdata.output_format = 'ascii' # 'ascii', 'binary', 'netcdf'\n\n clawdata.output_q_components = 'all' # could be list such as [True,True]\n clawdata.output_aux_components = 'none' # could be list\n clawdata.output_aux_onlyonce = True # output aux arrays only at t0\n \n\n # ---------------------------------------------------\n # Verbosity of messages to screen during integration: \n # ---------------------------------------------------\n\n # The current t, dt, and cfl will be printed every time step\n # at AMR levels <= verbosity. Set verbosity = 0 for no printing.\n # (E.g. verbosity == 2 means print only on levels 1 and 2.)\n clawdata.verbosity = 0\n \n \n\n # --------------\n # Time stepping:\n # --------------\n\n # if dt_variable==True: variable time steps used based on cfl_desired,\n # if dt_variable==False: fixed time steps dt = dt_initial always used.\n clawdata.dt_variable = True\n \n # Initial time step for variable dt. \n # (If dt_variable==0 then dt=dt_initial for all steps)\n clawdata.dt_initial = 0.016\n \n # Max time step to be allowed if variable dt used:\n clawdata.dt_max = 1e+99\n \n # Desired Courant number if variable dt used \n clawdata.cfl_desired = 0.9\n # max Courant number to allow without retaking step with a smaller dt:\n clawdata.cfl_max = 1.0\n \n # Maximum number of time steps to allow between output times:\n clawdata.steps_max = 100000\n\n\n # ------------------\n # Method to be used:\n # ------------------\n\n # Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters\n clawdata.order = 2\n \n # Use dimensional splitting?\n clawdata.dimensional_split = 'unsplit'\n \n # For unsplit method, transverse_waves can be \n # 0 or 'none' ==> donor cell (only normal solver used)\n # 1 or 'increment' ==> corner transport of waves\n # 2 or 'all' ==> corner transport of 2nd order corrections too\n clawdata.transverse_waves = 'all'\n \n \n # Number of waves in the Riemann solution:\n clawdata.num_waves = 1\n \n # List of limiters to use for each wave family: \n # Required: len(limiter) == num_waves\n # Some options:\n # 0 or 'none' ==> no limiter (Lax-Wendroff)\n # 1 or 'minmod' ==> minmod\n # 2 or 'superbee' ==> superbee\n # 3 or 'vanleer' ==> van Leer\n # 4 or 'mc' ==> MC limiter\n clawdata.limiter = ['vanleer']\n \n clawdata.use_fwaves = False # True ==> use f-wave version of algorithms\n \n # Source terms splitting:\n # src_split == 0 or 'none' ==> no source term (src routine never called)\n # src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used, \n # src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.\n clawdata.source_split = 'none'\n \n \n # --------------------\n # Boundary conditions:\n # --------------------\n\n # Number of ghost cells (usually 2)\n clawdata.num_ghost = 2\n \n # Choice of BCs at xlower and xupper:\n # 0 or 'user' => user specified (must modify bcNamr.f to use this option)\n # 1 or 'extrap' => extrapolation (non-reflecting outflow)\n # 2 or 'periodic' => periodic (must specify this at both boundaries)\n # 3 or 'wall' => solid wall for systems where q(2) is normal velocity\n \n clawdata.bc_lower[0] = 'periodic' # at xlower\n clawdata.bc_upper[0] = 'periodic' # at xupper\n\n clawdata.bc_lower[1] = 'periodic' # at ylower\n clawdata.bc_upper[1] = 'periodic' # at yupper\n \n\n # ---------------\n # Gauges:\n # ---------------\n rundata.gaugedata.gauges = []\n # for gauges append lines of the form [gaugeno, x, y, t1, t2]\n rundata.gaugedata.gauges.append([1, 0.6, 0.4, 0., 10.])\n \n \n # --------------\n # Checkpointing:\n # --------------\n\n # Specify when checkpoint files should be created that can be\n # used to restart a computation.\n\n clawdata.checkpt_style = 0\n\n if clawdata.checkpt_style == 0:\n # Do not checkpoint at all\n pass\n\n elif clawdata.checkpt_style == 1:\n # Checkpoint only at tfinal.\n pass\n\n elif clawdata.checkpt_style == 2:\n # Specify a list of checkpoint times. \n clawdata.checkpt_times = [0.1,0.15]\n\n elif clawdata.checkpt_style == 3:\n # Checkpoint every checkpt_interval timesteps (on Level 1)\n # and at the final time.\n clawdata.checkpt_interval = 5\n\n # ---------------\n # AMR parameters:\n # ---------------\n amrdata = rundata.amrdata\n\n\n # max number of refinement levels:\n amrdata.amr_levels_max = 3\n\n # List of refinement ratios at each level (length at least amr_level_max-1)\n amrdata.refinement_ratios_x = [2,2]\n amrdata.refinement_ratios_y = [2,2]\n amrdata.refinement_ratios_t = [2,2]\n\n\n # Specify type of each aux variable in amrdata.auxtype.\n # This must be a list of length num_aux, each element of which is one of:\n # 'center', 'capacity', 'xleft', or 'yleft' (see documentation).\n amrdata.aux_type = []\n\n\n # Flag for refinement based on Richardson error estimater:\n amrdata.flag_richardson = False # use Richardson?\n amrdata.flag_richardson_tol = 0.1 # Richardson tolerance\n \n # Flag for refinement using routine flag2refine:\n amrdata.flag2refine = True # use this?\n amrdata.flag2refine_tol = 0.05 # tolerance used in this routine\n # User can modify flag2refine to change the criterion for flagging.\n # Default: check max-norm of difference between q in a cell and \n # each of its neighbors.\n\n # steps to take on each level L between regriddings of level L+1:\n amrdata.regrid_interval = 2 \n\n # width of buffer zone around flagged points:\n # (typically the same as regrid_interval so waves don't escape):\n amrdata.regrid_buffer_width = 3 \n\n # clustering alg. cutoff for (# flagged pts) / (total # of cells refined)\n # (closer to 1.0 => more small grids may be needed to cover flagged cells)\n amrdata.clustering_cutoff = 0.9\n\n # print info about each regridding up to this level:\n amrdata.verbosity_regrid = 3 \n\n\n # ---------------\n # Regions: (old style rectangles)\n # ---------------\n rundata.regiondata.regions = []\n # to specify regions of refinement append lines of the form\n # [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]\n\n # ---------------\n # NEW flagregions\n # ---------------\n\n flagregions = rundata.flagregiondata.flagregions # initialized to []\n\n # now append as many flagregions as desired to this list:\n from clawpack.amrclaw.data import FlagRegion\n\n # The entire domain restricted to level 1 for illustration:\n # Note that this is a rectangle specified in the new way:\n # (other regions below will force/allow more refinement)\n flagregion = FlagRegion(num_dim=2)\n flagregion.name = 'Region_domain'\n flagregion.minlevel = 1\n flagregion.maxlevel = 1\n flagregion.t1 = 0.\n flagregion.t2 = 1e9\n flagregion.spatial_region_type = 1 # Rectangle\n flagregion.spatial_region = [0.,1.,0.,1.] # = [x1,x2,y1,y2]\n flagregions.append(flagregion)\n\n # A more general ruled rectangle:\n flagregion = FlagRegion(num_dim=2)\n flagregion.name = 'Region_triangle'\n flagregion.minlevel = 1\n flagregion.maxlevel = 3\n flagregion.t1 = 0.\n flagregion.t2 = 1e9\n flagregion.spatial_region_type = 2 # Ruled Rectangle\n flagregion.spatial_region_file = \\\n os.path.abspath('RuledRectangle_Triangle.data')\n flagregions.append(flagregion)\n\n # code to make RuledRectangle_Triangle.data:\n rr = region_tools.RuledRectangle()\n rr.method = 1 # piecewiselinear edges between s values\n rr.ixy = 'x' # so s refers to x, lower & upper are limits in y\n rr.s = np.array([0.1, 0.8])\n rr.lower = np.array([0.2, 0.8])\n rr.upper = np.array([0.8, 0.8])\n rr.write('RuledRectangle_Triangle.data')\n \n # A trapezoid:\n flagregion = FlagRegion(num_dim=2)\n flagregion.name = 'Region_trapezoid'\n flagregion.minlevel = 3\n flagregion.maxlevel = 3\n flagregion.t1 = 0.\n flagregion.t2 = 1e9\n flagregion.spatial_region_type = 2 # Ruled Rectangle\n flagregion.spatial_region_file = \\\n os.path.abspath('RuledRectangle_Trapezoid.data')\n flagregions.append(flagregion)\n\n # code to make RuledRectangle_Trapezoid.data:\n rr = region_tools.RuledRectangle()\n rr.method = 1 # piecewiselinear edges between s values\n rr.ixy = 'x' # so s refers to x, lower & upper are limits in y\n rr.s = np.array([0.2, 0.9])\n rr.lower = np.array([0.05, 0.75])\n rr.upper = np.array([0.15, 0.85])\n rr.write('RuledRectangle_Trapezoid.data')\n\n\n # ----- For developers ----- \n # Toggle debugging print statements:\n amrdata.dprint = False # print domain flags\n amrdata.eprint = False # print err est flags\n amrdata.edebug = False # even more err est flags\n amrdata.gprint = False # grid bisection/clustering\n amrdata.nprint = False # proper nesting output\n amrdata.pprint = False # proj. of tagged points\n amrdata.rprint = False # print regridding summary\n amrdata.sprint = False # space/memory output\n amrdata.tprint = False # time step reporting each level\n amrdata.uprint = False # update/upbnd reporting\n \n return rundata\n\n # end of function setrun\n # ----------------------\n\n\nif __name__ == '__main__':\n # Set up run-time parameters and write all data files.\n import sys\n rundata = setrun(*sys.argv[1:])\n rundata.write()\n \n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pragupta/Inverse-Reinforcement-Learning | [
"e7bbb7bb0ad24ebc36d9e0d4b4e6c6788229fd9c"
] | [
"irl/mdp/gridworld.py"
] | [
"\"\"\"\nImplements the gridworld MDP.\n\nMatthew Alger, 2015\[email protected]\n\"\"\"\n\nimport numpy as np\nimport numpy.random as rn\nimport matplotlib.pyplot as plt\n\nclass Gridworld(object):\n \"\"\"\n Gridworld MDP.\n \"\"\"\n\n def __init__(self, grid_size, wind, discount):\n \"\"\"\n grid_size: Grid size. int.\n wind: Chance of moving randomly. float.\n discount: MDP discount. float.\n -> Gridworld\n \"\"\"\n\n self.actions = ((1, 0), (0, 1), (-1, 0), (0, -1), (0, 0))\n self.n_actions = len(self.actions)\n self.n_states = grid_size**2\n self.grid_size = grid_size\n self.wind = wind\n self.discount = discount\n\n # Preconstruct the transition probability array.\n self.transition_probability = np.array(\n [[[self._transition_probability(i, j, k)\n for k in range(self.n_states)]\n for j in range(self.n_actions)]\n for i in range(self.n_states)])\n\n def __str__(self):\n return \"Gridworld({}, {}, {})\".format(self.grid_size, self.wind,\n self.discount)\n\n def plot_grid (self, filename=\"grid_world.png\", policy=[], value=[]):\n fig = plt.figure()\n ax = fig.add_subplot(111, xlim=(0, self.grid_size),\n ylim=(0, self.grid_size))\n\n\n font_size = 'x-large'\n ax.title.set_text(\"Gridworld\")\n\n cell_color = ['black', 'gray', 'white']\n\n for i in range(self.n_states):\n x, y = self.int_to_point(i)\n c_x = x\n c_y = y\n\n x = x - 0.5\n y = y - 0.5\n if self.reward(i) == 0:\n rect_color = 'gray'\n ec = 'white'\n else:\n rect_color = 'white'\n ec = 'black'\n\n p = plt.Rectangle([x, y], 1, 1, ec=ec)\n p.set_facecolor(rect_color)\n ax.add_patch(p)\n\n if len(policy) > 0:\n actions = [\">\", \"^\", \"<\", \"v\", \"-\"]\n if len(policy.shape) > 1:\n action = actions[np.argmax(policy[i])]\n else:\n action = actions[policy[i]]\n ax.text(c_x, c_y, action, color='k', #weight='bold',\n fontsize=10, ha='center', va='top')\n\n if len(value) > 0:\n ax.text(c_x, c_y, round(value[i], 2), color='k', #weight='bold',\n fontsize=8, ha='center', va='bottom')\n\n ax.set_xlim(-0.5, self.grid_size - 0.5)\n ax.set_ylim(-0.5, self.grid_size - 0.5)\n\n ax.set_xticks(range(self.grid_size))\n ax.set_yticks(range(self.grid_size))\n plt.savefig(filename, format='png', dpi=150)\n plt.close()\n def feature_vector(self, i, feature_map=\"ident\"):\n \"\"\"\n Get the feature vector associated with a state integer.\n\n i: State int.\n feature_map: Which feature map to use (default ident). String in {ident,\n coord, proxi}.\n -> Feature vector.\n \"\"\"\n\n if feature_map == \"coord\":\n f = np.zeros(self.grid_size)\n x, y = i % self.grid_size, i // self.grid_size\n f[x] += 1\n f[y] += 1\n return f\n if feature_map == \"proxi\":\n f = np.zeros(self.n_states)\n x, y = i % self.grid_size, i // self.grid_size\n for b in range(self.grid_size):\n for a in range(self.grid_size):\n dist = abs(x - a) + abs(y - b)\n f[self.point_to_int((a, b))] = dist\n return f\n # Assume identity map.\n f = np.zeros(self.n_states)\n f[i] = 1\n return f\n\n def feature_matrix(self, feature_map=\"ident\"):\n \"\"\"\n Get the feature matrix for this gridworld.\n\n feature_map: Which feature map to use (default ident). String in {ident,\n coord, proxi}.\n -> NumPy array with shape (n_states, d_states).\n \"\"\"\n\n features = []\n for n in range(self.n_states):\n f = self.feature_vector(n, feature_map)\n features.append(f)\n return np.array(features)\n\n def int_to_point(self, i):\n \"\"\"\n Convert a state int into the corresponding coordinate.\n\n i: State int.\n -> (x, y) int tuple.\n \"\"\"\n\n return (i % self.grid_size, i // self.grid_size)\n\n def point_to_int(self, p):\n \"\"\"\n Convert a coordinate into the corresponding state int.\n\n p: (x, y) tuple.\n -> State int.\n \"\"\"\n\n return p[0] + p[1]*self.grid_size\n\n def neighbouring(self, i, k):\n \"\"\"\n Get whether two points neighbour each other. Also returns true if they\n are the same point.\n\n i: (x, y) int tuple.\n k: (x, y) int tuple.\n -> bool.\n \"\"\"\n\n return abs(i[0] - k[0]) + abs(i[1] - k[1]) <= 1\n\n def _transition_probability(self, i, j, k):\n \"\"\"\n Get the probability of transitioning from state i to state k given\n action j.\n\n i: State int.\n j: Action int.\n k: State int.\n -> p(s_k | s_i, a_j)\n \"\"\"\n\n xi, yi = self.int_to_point(i)\n xj, yj = self.actions[j]\n xk, yk = self.int_to_point(k)\n\n if not self.neighbouring((xi, yi), (xk, yk)):\n return 0.0\n\n # Is k the intended state to move to?\n if (xi + xj, yi + yj) == (xk, yk):\n return 1 - self.wind + self.wind/self.n_actions\n\n # If these are not the same point, then we can move there by wind.\n if (xi, yi) != (xk, yk):\n return self.wind/self.n_actions\n\n # If these are the same point, we can only move here by either moving\n # off the grid or being blown off the grid. Are we on a corner or not?\n if (xi, yi) in {(0, 0), (self.grid_size-1, self.grid_size-1),\n (0, self.grid_size-1), (self.grid_size-1, 0)}:\n # Corner.\n # Can move off the edge in two directions.\n # Did we intend to move off the grid?\n if not (0 <= xi + xj < self.grid_size and\n 0 <= yi + yj < self.grid_size):\n # We intended to move off the grid, so we have the regular\n # success chance of staying here plus an extra chance of blowing\n # onto the *other* off-grid square.\n return 1 - self.wind + 2*self.wind/self.n_actions\n else:\n # We can blow off the grid in either direction only by wind.\n return 2*self.wind/self.n_actions\n else:\n # Not a corner. Is it an edge?\n if (xi not in {0, self.grid_size-1} and\n yi not in {0, self.grid_size-1}):\n # Not an edge.\n return 0.0\n\n # Edge.\n # Can only move off the edge in one direction.\n # Did we intend to move off the grid?\n if not (0 <= xi + xj < self.grid_size and\n 0 <= yi + yj < self.grid_size):\n # We intended to move off the grid, so we have the regular\n # success chance of staying here.\n return 1 - self.wind + self.wind/self.n_actions\n else:\n # We can blow off the grid only by wind.\n return self.wind/self.n_actions\n\n def reward(self, state_int):\n \"\"\"\n Reward for being in state state_int.\n\n state_int: State integer. int.\n -> Reward.\n \"\"\"\n\n# if state_int == self.n_states - 1:\n# return 1\n points = {self.point_to_int((0,0)) : 1,\n self.point_to_int((0,9)) : 1,\n self.point_to_int((9,0)) : 1,\n self.point_to_int((9,9)) : 1}\n if state_int in points:\n return points[state_int]\n return 0\n\n def average_reward(self, n_trajectories, trajectory_length, policy):\n \"\"\"\n Calculate the average total reward obtained by following a given policy\n over n_paths paths.\n\n policy: Map from state integers to action integers.\n n_trajectories: Number of trajectories. int.\n trajectory_length: Length of an episode. int.\n -> Average reward, standard deviation.\n \"\"\"\n\n trajectories = self.generate_trajectories(n_trajectories,\n trajectory_length, policy)\n rewards = [[r for _, _, r in trajectory] for trajectory in trajectories]\n rewards = np.array(rewards)\n\n # Add up all the rewards to find the total reward.\n total_reward = rewards.sum(axis=1)\n\n # Return the average reward and standard deviation.\n return total_reward.mean(), total_reward.std()\n\n def optimal_policy(self, state_int):\n \"\"\"\n The optimal policy for this gridworld.\n\n state_int: What state we are in. int.\n -> Action int.\n \"\"\"\n\n sx, sy = self.int_to_point(state_int)\n\n if sx < self.grid_size and sy < self.grid_size:\n return rn.randint(0, 2)\n if sx < self.grid_size-1:\n return 0\n if sy < self.grid_size-1:\n return 1\n raise ValueError(\"Unexpected state.\")\n\n def optimal_policy_deterministic(self, state_int):\n \"\"\"\n Deterministic version of the optimal policy for this gridworld.\n\n state_int: What state we are in. int.\n -> Action int.\n \"\"\"\n\n sx, sy = self.int_to_point(state_int)\n if sx < sy:\n return 0\n return 1\n\n def generate_trajectories(self, n_trajectories, trajectory_length, policy,\n random_start=False,\n start_state=(0, 0)):\n \"\"\"\n Generate n_trajectories trajectories with length trajectory_length,\n following the given policy.\n\n n_trajectories: Number of trajectories. int.\n trajectory_length: Length of an episode. int.\n policy: Map from state integers to action integers.\n random_start: Whether to start randomly (default False). bool.\n -> [[(state int, action int, reward float)]]\n \"\"\"\n\n trajectories = []\n for _ in range(n_trajectories):\n if random_start:\n sx, sy = rn.randint(self.grid_size), rn.randint(self.grid_size)\n else:\n sx, sy = start_state\n\n trajectory = []\n for _ in range(trajectory_length):\n if rn.random() < self.wind:\n action = self.actions[rn.randint(0, 4)]\n else:\n # Follow the given policy.\n action = self.actions[policy(self.point_to_int((sx, sy)))]\n\n if (0 <= sx + action[0] < self.grid_size and\n 0 <= sy + action[1] < self.grid_size):\n next_sx = sx + action[0]\n next_sy = sy + action[1]\n else:\n next_sx = sx\n next_sy = sy\n\n state_int = self.point_to_int((sx, sy))\n action_int = self.actions.index(action)\n next_state_int = self.point_to_int((next_sx, next_sy))\n reward = self.reward(next_state_int)\n trajectory.append((state_int, action_int, reward))\n\n sx = next_sx\n sy = next_sy\n\n trajectories.append(trajectory)\n\n return np.array(trajectories)\n"
] | [
[
"matplotlib.pyplot.Rectangle",
"numpy.random.random",
"matplotlib.pyplot.savefig",
"numpy.argmax",
"numpy.random.randint",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cfleschhut/virushack | [
"2fe7ded0be8672b066edef7fed52573794db2ba5",
"2fe7ded0be8672b066edef7fed52573794db2ba5"
] | [
"hystreet/hystreet_to_s3/compute_station_means.py",
"dashboard/dashboard.py"
] | [
"import pandas as pd\nfrom datetime import datetime, date\n\n\ndef compute_weekday(timestamp):\n date_str = timestamp.split('+')[0]\n date = datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%f')\n return date.weekday()\n\n\ndata = pd.read_csv('data.temp.csv')\n\ndata['weekday'] = float(\"NaN\")\nfor index, row in data.iterrows():\n data.at[index, 'weekday'] = compute_weekday(row['timestamp'])\n\n# compute mean pedestrians for stations by weekday\nstation_means = data.groupby(['station_id', 'weekday']).mean().reset_index().rename(columns={'pedestrians_count': 'mean_pedestrians_count_weekday', 'station_id': 'station_id_mean', 'weekday': 'weekday_mean'})[\n ['station_id_mean', 'weekday_mean', 'mean_pedestrians_count_weekday']]\nstation_means.to_csv('station_means.csv')\n",
"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport altair as alt\nimport datetime\nimport urllib\nimport json\nimport requests\nfrom PIL import Image\nfrom io import BytesIO\n\[email protected](persist=True)\ndef load_topojson():\n url_topojson = 'https://raw.githubusercontent.com/AliceWi/TopoJSON-Germany/master/germany.json'\n r = requests.get(url_topojson)\n jsondump = r.json()\n county_names = []\n county_ids = []\n for county in jsondump[\"objects\"][\"counties\"][\"geometries\"]:\n county_names.append(county[\"properties\"][\"name\"] + \" (\" + county[\"properties\"][\"districtType\"]+\")\")\n county_ids.append(county[\"id\"])\n state_names = []\n state_ids = []\n for state in jsondump[\"objects\"][\"states\"][\"geometries\"]:\n state_names.append(state[\"properties\"][\"name\"])\n state_ids.append(state[\"id\"])\n return county_names, county_ids, state_names, state_ids\n\[email protected](persist=True)\ndef load_real_data(dummy_time):\n # dummy_time parameter changes twice daily. Otherwise, streamlit \n # would always return cached data\n response = requests.get('https://0he6m5aakd.execute-api.eu-central-1.amazonaws.com/prod')\n jsondump = response.json()[\"body\"]\n \n county_names, county_ids, state_names, state_ids = load_topojson()\n id_to_name = {cid:county_names[idx] for idx,cid in enumerate(county_ids)}\n \n # get names for all scores\n scorenames = []\n for (date, row) in list(jsondump.items()):\n for cid, scores in row.items():\n for key in scores.keys():\n if key not in scorenames:\n scorenames.append(key)\n scorenames = [key for key in scorenames if '_score' in key]\n \n # prepare lists\n scorevalues = {scorename:[] for scorename in scorenames}\n ids = []\n names = []\n dates = []\n \n # loop over data\n for (date, row) in list(jsondump.items()):\n for cid, scores in row.items():\n ids.append(cid)\n names.append(id_to_name[cid])\n dates.append(date)\n for scorename in scorenames:\n if scorename in scores:\n scorevalue = scores[scorename]*100\n else:\n scorevalue = None\n scorevalues[scorename].append(scorevalue)\n \n # create dataframe\n df_scores = pd.DataFrame({\n \"id\": ids, \n \"name\": names, \n \"date\": dates\n })\n \n # add scores\n for scorename in scorenames:\n df_scores[scorename] = scorevalues[scorename]\n df_scores = df_scores.replace([np.inf, -np.inf], np.nan)\n \n return df_scores, scorenames\n \[email protected](persist=True)\ndef get_map(df_scores,selected_score,selected_score_axis,use_states,latest_date):\n url_topojson = 'https://raw.githubusercontent.com/AliceWi/TopoJSON-Germany/master/germany.json'\n MAPHEIGHT = 600\n if use_states:\n features = 'states'\n sw = 1\n else:\n features = 'counties'\n sw = 0.2\n # overlay state boundaries with thicker lines\n data_topojson_remote_states = alt.topo_feature(url=url_topojson, feature='states')\n overlaymap = alt.Chart(data_topojson_remote_states).mark_geoshape(\n fill=None,\n stroke='white',\n strokeWidth=1.5\n ).properties(width='container',height = MAPHEIGHT)\n data_topojson_remote = alt.topo_feature(url=url_topojson, feature=features)\n \n basemap = alt.Chart(data_topojson_remote).mark_geoshape(\n fill='lightgray',\n stroke='white',\n strokeWidth=sw\n ).properties(width='container',height = MAPHEIGHT)\n if use_states:\n #draw state map\n layer = alt.Chart(data_topojson_remote).mark_geoshape(\n stroke='white',\n strokeWidth=sw\n ).encode(\n color=alt.Color(selected_score+':Q', \n title=selected_score_axis, \n scale=alt.Scale(domain=(200, 0),\n scheme='redyellowgreen'),\n legend=None\n ),\n tooltip=[alt.Tooltip(\"state_name:N\", title=\"Bundesland\"),\n alt.Tooltip(selected_score+\":Q\", title=selected_score_axis)]\n ).transform_lookup(\n lookup='id',\n from_= alt.LookupData(df_scores[(df_scores[\"date\"] == str(latest_date)) & (df_scores[selected_score] > 0)], 'id', [selected_score])\n ).transform_lookup(\n lookup='id',\n from_= alt.LookupData(df_scores[(df_scores[\"date\"] == str(latest_date)) & (df_scores[selected_score] > 0)], 'id', ['state_name'])\n ).properties(width='container',height = MAPHEIGHT)\n else:\n # draw counties map\n df_scores_lookup = df_scores[(df_scores[\"date\"] == str(latest_date)) & (df_scores[\"filtered_score\"] > 0)]\n df_scores_lookup = df_scores_lookup[['id','date','name','filtered_score']]\n \n layer = alt.Chart(data_topojson_remote).mark_geoshape(\n stroke='white',\n strokeWidth=sw\n ).encode(\n color=alt.Color('filtered_score:Q', \n title=selected_score_axis, \n scale=alt.Scale(domain=(200, 0),\n scheme='redyellowgreen'),\n legend=None\n ),\n tooltip=[alt.Tooltip(\"name:N\", title=\"Kreis\"),\n alt.Tooltip(\"filtered_score:Q\", title=selected_score_axis)]\n ).transform_lookup(\n lookup='id',\n from_= alt.LookupData(df_scores_lookup, 'id', ['filtered_score'])\n ).transform_lookup(\n lookup='id',\n from_= alt.LookupData(df_scores_lookup, 'id', ['name'])\n ).properties(width='container',height = MAPHEIGHT)\n if use_states:\n c = alt.layer(basemap, layer).configure_view(strokeOpacity=0)\n else:\n c = alt.layer(basemap, layer, overlaymap).configure_view(strokeOpacity=0)\n return c\n \[email protected](persist=True)\ndef get_timeline_plots(df_scores, selected_score, selected_score_axis, use_states, countys):\n if len(countys) > 0 and not use_states:\n # Landkreise\n df_scores = df_scores[df_scores[\"name\"].isin(countys)].dropna(axis=1, how=\"all\")\n c = alt.Chart(\n df_scores[df_scores[\"name\"].isin(countys)][[\"name\", \"date\", \"filtered_score\"]].dropna()\n ).mark_line(point=True).encode(\n x=alt.X('date:T', axis=alt.Axis(title='Datum', format=(\"%d %b\"))),\n y=alt.Y('filtered_score:Q', title=selected_score_axis),\n color=alt.Color('name', title=\"Landkreis\"),\n tooltip=[\n alt.Tooltip(\"name:N\", title=\"Landkreis\"),\n alt.Tooltip('filtered_score:Q', title=selected_score_axis),\n alt.Tooltip(\"date:T\", title=\"Datum\"),\n ]\n ).properties(\n width='container',\n height=400\n )\n return c\n elif use_states:\n # Bundesländer\n df_scores=df_scores[[\"state_name\", \"date\", selected_score]].dropna()\n c = alt.Chart(df_scores).mark_line(point=True).encode(\n x=alt.X('date:T', axis=alt.Axis(title='Datum', format=(\"%d %b\"))),\n y=alt.Y(selected_score+':Q', title=selected_score_axis),\n color=alt.Color('state_name', title=\"Bundesland\", scale=alt.Scale(scheme='category20')),\n tooltip=[\n alt.Tooltip(\"state_name:N\", title=\"Bundesland\"),\n alt.Tooltip(selected_score+\":Q\", title=selected_score_axis),\n alt.Tooltip(\"date:T\", title=\"Datum\"),\n ]\n ).properties(\n width='container',\n height=400\n )\n return c\n else:\n return None\n\ndef detail_score_selector(df_scores_in, scorenames_desc, scorenames_axis, allow_county_select, key, default_detail_index=0, default_score=\"hystreet_score\"):\n\n df_scores = df_scores_in.copy()\n \n # get counties\n county_names, county_ids, state_names, state_ids = load_topojson()\n id_to_name = {cid:county_names[idx] for idx,cid in enumerate(county_ids)}\n state_id_to_name = {cid:state_names[idx] for idx,cid in enumerate(state_ids)}\n state_name_to_id = {state_names[idx]:cid for idx,cid in enumerate(state_ids)}\n\n # LEVEL OF DETAIL SELECT\n use_states_select = st.selectbox('Detailgrad:', \n ('Bundesländer', 'Landkreise'), \n index =default_detail_index,\n key = key\n )\n use_states = use_states_select == 'Bundesländer'\n \n # SCORE SELECT\n sorted_desc = sorted(list(scorenames_desc.values()))\n selected_score_desc = st.selectbox(\n 'Datenquelle:', sorted_desc, \n index = sorted_desc.index(scorenames_desc[default_score]), # default value in sorted list\n key = key\n )\n inverse_scorenames_desc = {scorenames_desc[key]:key for key in scorenames_desc.keys()}\n selected_score = inverse_scorenames_desc[selected_score_desc]\n selected_score_axis = scorenames_axis[selected_score] + ' (%)'\n \n latest_date = pd.Series(df_scores[df_scores[selected_score] > 0][\"date\"]).values[-1]\n \n # COUNTY SELECT\n if (not use_states) and allow_county_select:\n available_countys = [value for value in county_names if value in df_scores[df_scores[selected_score] > 0][\"name\"].values]\n if len(available_countys) > 1:\n default=available_countys[:2]\n else:\n default = []\n countys = st.multiselect('Wähle Landkreise aus:',\n options = available_countys, \n default=default,\n key = key\n )\n else:\n countys = []\n \n # Prepare df_scores according to Landkreis/Bundesland selection\n if use_states:\n # aggregate state data\n df_scores['state_id'] = df_scores.apply(lambda x: str(x['id'])[:2],axis=1) # get state id (first two letters of county id)\n df_scores['state_name'] = df_scores.apply(lambda x: state_id_to_name[x['state_id']],axis=1) # get state name\n df_scores = df_scores.groupby(['state_name','date']).mean() # group by state and date, calculate mean scores\n df_scores = df_scores.round(1) #round\n df_scores['id'] = df_scores.apply(lambda x: state_name_to_id[x.name[0]],axis=1) # re-add state indices\n df_scores = df_scores.replace([np.inf, -np.inf], np.nan) # remove infs\n df_scores = df_scores.reset_index() # make index columns into regular columns\n else:\n #filter scores based on selected places\n #if len(countys) > 0:\n #df_scores[\"filtered_score\"] = np.where(df_scores[\"name\"].isin(countys), df_scores[selected_score],[0] *# len(df_scores))\n #else:\n df_scores[\"filtered_score\"] = df_scores[selected_score]\n\n df_scores[\"date\"] = pd.to_datetime(df_scores[\"date\"])\n df_scores = df_scores.round(1)\n \n return (df_scores,selected_score, selected_score_desc, selected_score_axis, use_states, use_states_select, countys, latest_date)\n\n\n\ndef dashboard():\n # make page here with placeholders\n # thus later elements (e.g. county selector) can influence\n # earlier elements (the map) because they can appear earlier in \n # the code without appearing earlier in the webpage\n st.title(\"EveryoneCounts\")\n st.header(\"Das Social Distancing Dashboard\")\n st_map_header = st.empty()\n st_info_text = st.empty()\n \n\n \n # Insert custom CSS\n # - prevent horizontal scrolling on mobile\n # - restrict images to container width\n # - restrict altair plots to container width\n # - make inputs better visible\n st.markdown(\"\"\"\n <style type='text/css'>\n .block-container>div {\n width:100% !important;\n overflow:hidden !important;\n }\n .image-container {\n width: 99%;\n }\n img {\n max-width: 99%;\n margin:auto;\n }\n div.stVegaLiteChart, fullScreenFrame {\n width:99%;\n }\n .stSelectbox div[data-baseweb=\"select\"]>div,\n .stMultiSelect div[data-baseweb=\"select\"]>div{\n border:1px solid #fcbfcf;\n }\n </style>\n \"\"\", unsafe_allow_html=True)\n \n \n # get score data\n dummy_time = datetime.datetime.now().strftime(\"%Y-%m-%d-%p\") # 2020-03-28-PM, changes twice daily\n df_scores_full, scorenames = load_real_data(dummy_time)\n #df_scores = df_scores_full.copy()\n \n \n # descriptive names for each score\n scorenames_desc_manual = {\n \"gmap_score\":\"Menschen an Haltestellen des ÖPNV\",\n \"gmap_supermarket_score\":\"Besucher in Supermärkten\",\n \"hystreet_score\":\"Fußgänger in Innenstädten (Laserscanner-Messung)\",\n \"zug_score\":\"DB Züge\",\n \"bike_score\":\"Fahrradfahrer\",\n \"bus_score\":\"ÖPV Busse\",\n \"national_score\":\"ÖPV IC-Züge\",\n \"suburban_score\":\"ÖPV Nahverkehr\",\n \"regional_score\":\"ÖPV Regionalzüge\",\n \"nationalExpress_score\":\"ÖPV ICE-Züge\",\n \"webcam_score\":\"Fußgänger auf öffentlichen Webcams\",\n \"tomtom_score\":\"Autoverkehr\"\n }\n # very short axis labels for each score\n scorenames_axis_manual = {\n \"gmap_score\":\"Menschen\",\n \"gmap_supermarket_score\":\"Besucher\",\n \"hystreet_score\":\"Fußgänger\",\n \"zug_score\":\"Züge\",\n \"bike_score\":\"Fahrradfahrer\",\n \"bus_score\":\"Busse\",\n \"national_score\":\"IC-Züge\",\n \"suburban_score\":\"Nahverkehr\",\n \"regional_score\":\"Regionalzüge\",\n \"nationalExpress_score\":\"ICE-Züge\",\n \"webcam_score\":\"Fußgänger\",\n \"tomtom_score\":\"Autoverkehr\"\n }\n \n # for scores not in the hardcoded list above\n # default to their scorename as a fallback\n scorenames_desc = {}\n scorenames_axis = {}\n for scorename in scorenames:\n if scorename in scorenames_desc_manual:\n scorenames_desc[scorename] = scorenames_desc_manual[scorename]\n else:\n scorenames_desc[scorename] = scorename\n if scorename in scorenames_axis_manual:\n scorenames_axis[scorename] = scorenames_axis_manual[scorename]\n else:\n scorenames_axis[scorename] = scorename\n \n # Selection box for the map\n df_scores, selected_score, selected_score_desc, selected_score_axis, use_states, use_states_select, countys, latest_date = detail_score_selector(df_scores_full, \n scorenames_desc, \n scorenames_axis, \n allow_county_select=False,\n key='map',\n default_detail_index=0,\n default_score=\"gmap_score\"\n )\n st_map = st.empty()\n st_legend = st.empty()\n st_timeline_header = st.empty()\n st_timeline_desc = st.empty()\n \n # Selection box for the timeline\n df_scores2, selected_score2, selected_score_desc2, selected_score_axis2, use_states2, use_states_select2, countys2, latest_date2 = detail_score_selector(df_scores_full, \n scorenames_desc, \n scorenames_axis, \n allow_county_select=True,\n key='timeline',\n default_detail_index=1,\n default_score=\"hystreet_score\"\n )\n\n \n st_timeline = st.empty()\n\n #selected_date = st.sidebar.date_input('für den Zeitraum vom', datetime.date(2020,3,24))\n #end_date = st.sidebar.date_input('bis', datetime.date(2020,3,22))\n\n\n # WRITE DESCRIPTION TEXTS\n if selected_score == \"bike_score\" :\n st_info_text.markdown('''\n In der Karte siehst Du wie sich Social Distancing auf die verschiedenen **{regionen}** in Deutschland auswirkt. Wir nutzen Daten über **{datasource}** um zu berechnen, wie gut Social Distancing aktuell funktioniert. Du kannst die Datenauswahl weiter unten im Menü ändern. Ein Wert von **100% entspricht dem Normal-Wert vor der COVID-Pandemie**, also bevor die Bürger zu Social Distancing aufgerufen wurden. Ein kleiner Wert weist darauf hin, dass in unserer Datenquelle eine Verringerung der Aktivität gemessen wurde. **Im Fall von Radfahrern ist ein erhöhtes Verkehrsaufkommen ein positiver Indikator für Social Distancing!** Mehr Menschen sind mit dem Fahrrad unterwegs anstatt mit anderen Verkehrsmitteln, bei denen Social Distancing schwierieger einzuhalten ist.\n '''.format(regionen=use_states_select,datasource=selected_score_desc)\n )\n else:\n st_info_text.markdown('''\n In der Karte siehst Du wie sich Social Distancing auf die verschiedenen **{regionen}** in Deutschland auswirkt. Wir nutzen Daten über **{datasource}** um zu berechnen, wie gut Social Distancing aktuell funktioniert. Du kannst die Datenauswahl weiter unten im Menü ändern. Ein Wert von **100% entspricht dem Normal-Wert vor der COVID-Pandemie**, also bevor die Bürger zu Social Distancing aufgerufen wurden. Ein kleiner Wert weist darauf hin, dass in unserer Datenquelle eine Verringerung der Aktivität gemessen wurde, was ein guter Indikator für erfolgreich umgesetztes Social Distancing ist. **Weniger ist besser!**\n '''.format(regionen=use_states_select,datasource=selected_score_desc)\n )\n if selected_score2 == \"bike_score\" :\n st_timeline_desc.markdown('''\n Hier kannst du den zeitlichen Verlauf der gewählten Datenquelle für verschiedene **{regionen}** in Deutschland vergleichen. Wir nutzen Daten über **{datasource}** um zu berechnen, wie gut Social Distancing aktuell funktioniert. Du kannst die Datenauswahl weiter unten im Menü ändern. **Ein Wert von 100% entspricht dem Normal-Wert vor der COVID-Pandemie, also bevor die Bürger zu Social Distancing aufgerufen wurden.** Ein kleiner Wert weist darauf hin, dass in unserer Datenquelle eine Verringerung der Aktivität gemessen wurde, was ein guter Indikator für erfolgreich umgesetztes Social Distancing ist. **Im Fall von Radfahrern ist ein erhöhtes Verkehrsaufkommen ein positiver Indikator für Social Distancing!** Mehr Menschen sind mit dem Fahrrad unterwegs anstatt mit anderen Verkehrsmitteln, bei denen Social Distancing schwierieger einzuhalten ist.\n \n **Sieh doch mal nach wie die Lage in Deiner Region ist!**\n '''.format(regionen=use_states_select2,datasource=selected_score_desc2)\n )\n else:\n st_timeline_desc.markdown('''\n Hier kannst du den zeitlichen Verlauf der gewählten Datenquelle für verschiedene **{regionen}** in Deutschland vergleichen. Wir nutzen Daten über **{datasource}** um zu berechnen, wie gut Social Distancing aktuell funktioniert. Du kannst die Datenauswahl weiter unten im Menü ändern. **Ein Wert von 100% entspricht dem Normal-Wert vor der COVID-Pandemie, also bevor die Bürger zu Social Distancing aufgerufen wurden.** Ein kleiner Wert weist darauf hin, dass in unserer Datenquelle eine Verringerung der Aktivität gemessen wurde, was ein guter Indikator für erfolgreich umgesetztes Social Distancing ist. \n \n **Sieh doch mal nach wie die Lage in Deiner Region ist!**\n '''.format(regionen=use_states_select2,datasource=selected_score_desc2)\n )\n\n\n try:\n st_map_header.subheader('Social Distancing Karte vom {}'.format( datetime.datetime.strptime(latest_date,\"%Y-%m-%d\").strftime(\"%d.%m.%Y\") ))\n except:\n st_map_header.subheader('Social Distancing Karte vom {}'.format(latest_date))\n st_legend.image(\"https://github.com/socialdistancingdashboard/virushack/raw/master/dashboard/legende.png\") \n \n\n \n # DRAW MAP\n # ========\n map = get_map(df_scores, selected_score, selected_score_axis, use_states, latest_date)\n map2 = map.copy() # otherwise streamlit gives a Cached Object Mutated warning\n st_map.altair_chart(map2)\n \n # DRAW TIMELINES\n # ==============\n st_timeline_header.subheader(\"Zeitlicher Verlauf\")\n \n timeline = get_timeline_plots(df_scores2, selected_score2, selected_score_axis2, use_states2, countys2)\n if timeline is not None:\n timeline2 = timeline.copy() # otherwise streamlit gives a Cached Object Mutated warning\n st_timeline.altair_chart(timeline2)\n\n \n # FOOTER\n # ======\n st.subheader(\"Unsere Datenquellen\")\n st.markdown(\"\"\"\n \n \"\"\")\n"
] | [
[
"pandas.read_csv"
],
[
"pandas.to_datetime",
"pandas.Series",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
OtavioPiza/project-euler | [
"96ca6d5af85ab2c2b911e38d89a78ac2443fbc5f"
] | [
"utils/plotting.py"
] | [
"from matplotlib import pyplot as plt\nfrom typing import Any, NoReturn, Tuple, List\n\n\ndef plot_range(params: Tuple[Tuple[Any], ...], functions: Tuple[(Any, )], x_label: str = 'input',\n x_axis_labeling_function: (Any) = lambda i: i[0]) -> NoReturn:\n \"\"\"\n plots the time each function took to execute each of the provided parameters; if there are more then one parameters,\n the first one is used for the x-axis\n\n :param params: parameters for the functions\n :param functions: functions\n \"\"\"\n x_axis: List[Any] = list(map(x_axis_labeling_function, params))\n y_axes: List[List[float]] = [[function(*param)[1] for param in params] for function in functions]\n index: int = 0\n\n for y_axis in y_axes:\n plt.plot(x_axis, y_axis, label=f'solution {(index := index + 1)}')\n\n plt.xlabel(x_label)\n plt.ylabel('time')\n plt.legend()\n plt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
skynetera/openpilot | [
"a7e099c946800c7a8b60c47678801d9a95f95549"
] | [
"selfdrive/controls/lib/radar_helpers.py"
] | [
"import numpy as np\nimport platform\nimport os\nimport sys\n\nfrom common.kalman.ekf import FastEKF1D, SimpleSensor\n\n# radar tracks\nSPEED, ACCEL = 0, 1 # Kalman filter states enum\n\nrate, ratev = 20., 20. # model and radar are both at 20Hz\nts = 1./rate\nfreq_v_lat = 0.2 # Hz\nk_v_lat = 2*np.pi*freq_v_lat*ts / (1 + 2*np.pi*freq_v_lat*ts)\n\nfreq_a_lead = .5 # Hz\nk_a_lead = 2*np.pi*freq_a_lead*ts / (1 + 2*np.pi*freq_a_lead*ts)\n\n# stationary qualification parameters\nv_stationary_thr = 4. # objects moving below this speed are classified as stationary\nv_oncoming_thr = -3.9 # needs to be a bit lower in abs value than v_stationary_thr to not leave \"holes\"\nv_ego_stationary = 4. # no stationary object flag below this speed\n\nclass Track(object):\n def __init__(self):\n self.ekf = None\n self.stationary = True\n self.initted = False\n\n def update(self, d_rel, y_rel, v_rel, d_path, v_ego_t_aligned):\n if self.initted:\n self.dPathPrev = self.dPath\n self.vLeadPrev = self.vLead\n self.vRelPrev = self.vRel\n\n # relative values, copy\n self.dRel = d_rel # LONG_DIST\n self.yRel = y_rel # -LAT_DIST\n self.vRel = v_rel # REL_SPEED\n\n # compute distance to path\n self.dPath = d_path\n\n # computed velocity and accelerations\n self.vLead = self.vRel + v_ego_t_aligned\n\n if not self.initted:\n self.aRel = 0. # nidec gives no information about this\n self.vLat = 0.\n self.aLead = 0.\n else:\n # estimate acceleration\n a_rel_unfilt = (self.vRel - self.vRelPrev) / ts\n a_rel_unfilt = np.clip(a_rel_unfilt, -10., 10.)\n self.aRel = k_a_lead * a_rel_unfilt + (1 - k_a_lead) * self.aRel\n\n v_lat_unfilt = (self.dPath - self.dPathPrev) / ts\n self.vLat = k_v_lat * v_lat_unfilt + (1 - k_v_lat) * self.vLat\n\n a_lead_unfilt = (self.vLead - self.vLeadPrev) / ts\n a_lead_unfilt = np.clip(a_lead_unfilt, -10., 10.)\n self.aLead = k_a_lead * a_lead_unfilt + (1 - k_a_lead) * self.aLead\n\n if self.stationary:\n # stationary objects can become non stationary, but not the other way around\n self.stationary = v_ego_t_aligned > v_ego_stationary and abs(self.vLead) < v_stationary_thr\n self.oncoming = self.vLead < v_oncoming_thr\n\n if self.ekf is None:\n self.ekf = FastEKF1D(ts, 1e3, [0.1, 1])\n self.ekf.state[SPEED] = self.vLead\n self.ekf.state[ACCEL] = 0\n self.lead_sensor = SimpleSensor(SPEED, 1, 2)\n\n self.vLeadK = self.vLead\n self.aLeadK = self.aLead\n else:\n self.ekf.update_scalar(self.lead_sensor.read(self.vLead))\n self.ekf.predict(ts)\n self.vLeadK = float(self.ekf.state[SPEED])\n self.aLeadK = float(self.ekf.state[ACCEL])\n\n if not self.initted:\n self.cnt = 1\n self.vision_cnt = 0\n else:\n self.cnt += 1\n\n self.initted = True\n self.vision = False\n\n def mix_vision(self, dist_to_vision, rel_speed_diff):\n # rel speed is very hard to estimate from vision\n if dist_to_vision < 4.0 and rel_speed_diff < 10.:\n # vision point is never stationary\n self.stationary = False\n self.vision = True\n self.vision_cnt += 1\n\n def get_key_for_cluster(self):\n # Weigh y higher since radar is inaccurate in this dimension\n return [self.dRel, self.dPath*2, self.vRel]\n\n# ******************* Cluster *******************\n\nif platform.machine() == 'aarch64':\n for x in sys.path:\n pp = os.path.join(x, \"phonelibs/hierarchy/lib\")\n if os.path.isfile(os.path.join(pp, \"_hierarchy.so\")):\n sys.path.append(pp)\n break\n import _hierarchy\nelse:\n from scipy.cluster import _hierarchy\n\ndef fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):\n # supersimplified function to get fast clustering. Got it from scipy\n Z = np.asarray(Z, order='c')\n n = Z.shape[0] + 1\n T = np.zeros((n,), dtype='i')\n _hierarchy.cluster_dist(Z, T, float(t), int(n))\n return T\n\nRDR_TO_LDR = 2.7\n\ndef mean(l):\n return sum(l)/len(l)\n\nclass Cluster(object):\n def __init__(self):\n self.tracks = set()\n\n def add(self, t):\n # add the first track\n self.tracks.add(t)\n\n # TODO: make generic\n @property\n def dRel(self):\n return mean([t.dRel for t in self.tracks])\n\n @property\n def yRel(self):\n return mean([t.yRel for t in self.tracks])\n\n @property\n def vRel(self):\n return mean([t.vRel for t in self.tracks])\n\n @property\n def aRel(self):\n return mean([t.aRel for t in self.tracks])\n\n @property\n def vLead(self):\n return mean([t.vLead for t in self.tracks])\n\n @property\n def aLead(self):\n return mean([t.aLead for t in self.tracks])\n\n @property\n def dPath(self):\n return mean([t.dPath for t in self.tracks])\n\n @property\n def vLat(self):\n return mean([t.vLat for t in self.tracks])\n\n @property\n def vLeadK(self):\n return mean([t.vLeadK for t in self.tracks])\n\n @property\n def aLeadK(self):\n return mean([t.aLeadK for t in self.tracks])\n\n @property\n def vision(self):\n return any([t.vision for t in self.tracks])\n\n @property\n def vision_cnt(self):\n return max([t.vision_cnt for t in self.tracks])\n\n @property\n def stationary(self):\n return all([t.stationary for t in self.tracks])\n\n @property\n def oncoming(self):\n return all([t.oncoming for t in self.tracks])\n\n def toLive20(self, lead):\n lead.dRel = float(self.dRel) - RDR_TO_LDR\n lead.yRel = float(self.yRel)\n lead.vRel = float(self.vRel)\n lead.aRel = float(self.aRel)\n lead.vLead = float(self.vLead)\n lead.aLead = float(self.aLead)\n lead.dPath = float(self.dPath)\n lead.vLat = float(self.vLat)\n lead.vLeadK = float(self.vLeadK)\n lead.aLeadK = float(self.aLeadK)\n lead.status = True\n lead.fcw = False\n\n def __str__(self):\n ret = \"x: %7.2f y: %7.2f v: %7.2f a: %7.2f\" % (self.dRel, self.yRel, self.vRel, self.aRel)\n if self.stationary:\n ret += \" stationary\"\n if self.vision:\n ret += \" vision\"\n if self.oncoming:\n ret += \" oncoming\"\n if self.vision_cnt > 0:\n ret += \" vision_cnt: %6.0f\" % self.vision_cnt\n return ret\n\n def is_potential_lead(self, v_ego, enabled):\n # predict cut-ins by extrapolating lateral speed by a lookahead time\n # lookahead time depends on cut-in distance. more attentive for close cut-ins\n # also, above 50 meters the predicted path isn't very reliable\n\n # the distance at which v_lat matters is higher at higher speed\n lookahead_dist = 40. + v_ego/1.2 #40m at 0mph, ~70m at 80mph\n\n t_lookahead_v = [1., 0.]\n t_lookahead_bp = [10., lookahead_dist]\n\n # average dist\n d_path = self.dPath\n\n if enabled:\n t_lookahead = np.interp(self.dRel, t_lookahead_bp, t_lookahead_v)\n # correct d_path for lookahead time, considering only cut-ins and no more than 1m impact\n lat_corr = np.clip(t_lookahead * self.vLat, -1, 0)\n else:\n lat_corr = 0.\n d_path = np.maximum(d_path + lat_corr, 0)\n\n if d_path < 1.5 and not self.stationary and not self.oncoming:\n return True\n else:\n return False\n\n def is_potential_lead2(self, lead_clusters):\n if len(lead_clusters) > 0:\n lead_cluster = lead_clusters[0]\n # check if the new lead is too close and roughly at the same speed of the first lead: it might just be the second axle of the same vehicle\n if (self.dRel - lead_cluster.dRel) < 8. and abs(self.vRel - lead_cluster.vRel) < 1.:\n return False\n else:\n return True\n else:\n return False\n"
] | [
[
"numpy.maximum",
"numpy.clip",
"numpy.asarray",
"numpy.interp",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chw3k5/WaferScreen | [
"c0ca7fe939fe7cd0b722b7d6129b148c03a7505c",
"c0ca7fe939fe7cd0b722b7d6129b148c03a7505c",
"c0ca7fe939fe7cd0b722b7d6129b148c03a7505c"
] | [
"waferscreen/plot/band_and_keepout.py",
"waferscreen/inst_control/Keysight_USB_VNA.py",
"submm_python_routines/KIDs/find_resonances_interactive.py"
] | [
"# Copyright (C) 2021 Members of the Simons Observatory collaboration.\n# Please refer to the LICENSE file in the root of this repository.\n\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as mtransforms\nfrom ref import band_params, smurf_keepout_zones_ghz\n\n\ncolors = ['BlueViolet', 'Brown', 'CadetBlue', 'Coral', 'Crimson',\n 'DarkGoldenRod', 'DarkGreen', 'DarkMagenta', 'DarkOrange',\n 'DarkOrchid', 'DarkRed', 'DarkSalmon', 'DodgerBlue', 'FireBrick']\nhatches = ['/', '*', '\\\\', 'x', 'o']\n\n\ndef band_and_keepout_plot(ax, do_labels=False, y_ticks_off=False):\n trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)\n if y_ticks_off:\n ax.tick_params(axis='y', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n left=False, # ticks along the bottom edge are off\n right=False, # ticks along the top edge are off\n labelleft=False)\n\n # SO band definitions\n for band_int in range(14):\n band_name = F\"Band{'%02i' % band_int}\"\n band_dict = band_params[band_name]\n band_min_ghz = band_dict[\"min_GHz\"]\n band_max_ghz = band_dict[\"max_GHz\"]\n color = colors[band_int]\n ax.fill_between((band_dict['min_GHz'], band_dict['max_GHz']), 0, 1,\n facecolor=color, alpha=0.5, transform=trans)\n band_size_mhz = 1000.0 * (band_max_ghz - band_min_ghz)\n if do_labels:\n plt.text(x=band_dict['min_GHz'], y=0.9 - (band_int * 0.8 / 14.0),\n s=F\"{band_name}\\n size={'%5.1f' % band_size_mhz}MHz\",\n color=\"white\", fontsize=6,\n bbox={\"facecolor\": color, \"alpha\": 0.5}, transform=trans)\n\n # smurf keep out zones\n for keepout_index, keepout_zone in list(enumerate(smurf_keepout_zones_ghz)):\n keepout_min, keepout_max = keepout_zone\n hatch = hatches[keepout_index]\n ax.fill_between((keepout_min, keepout_max), 0, 1,\n facecolor='black', alpha=0.5, transform=trans,\n hatch=hatch)\n if do_labels:\n plt.text(x=keepout_min, y=0.1, s=\"SMURF Keepout Zone\", color=\"white\", fontsize=6,\n bbox={\"facecolor\": 'black', \"alpha\": 0.5}, transform=trans)\n return ax\n\n\nif __name__ == \"__main__\":\n # plot initialization\n fig, ax_band_keepout = plt.subplots(figsize=(12, 8))\n # call the plot function\n ax_band_keepout = band_and_keepout_plot(ax=ax_band_keepout, do_labels=True, y_ticks_off=False)\n # get the plotting axis back to make additions\n ax_band_keepout.set_xlabel(\"Frequency (GHz)\")\n # show the plot\n plt.show(block=True)\n",
"import visa\nimport math\nimport numpy as np\nimport time\n\n\nclass USBVNA():\n \"\"\" Keysight USB VNA instrument class. \"\"\"\n\n def __init__(self, address=\"TCPIP0::687UWAVE-TEST::hislip_PXI10_CHASSIS1_SLOT1_INDEX0,4880::INSTR\"):\n self.ResourceManager = visa.ResourceManager()\n self.ctrl = self.ResourceManager.open_resource(\"%s\" % address, write_termination='\\n')\n self.ctrl.timeout = 1000000\n self.ctrl.vna_id = self.ctrl.query(\"*IDN?\").rstrip()\n print(\"Connected to : \" + self.ctrl.vna_id)\n\n def close(self):\n \"\"\" closes the VISA instance \"\"\"\n self.ctrl.write(\"INIT:CONT ON\")\n self.ctrl.close()\n print(\"VNA control closed\")\n\n def preset(self):\n \"\"\"presets PNA\"\"\"\n self.ctrl.write(\"SYST:FPR\")\n self.ctrl.write(\"*CLS\")\n # print(self.ctrl.query(\"*STB?\"))\n # print(self.ctrl.query(\"*SRE?\"))\n print(\"VNA Preset\")\n\n def wait(self):\n self.ctrl.write(\"*WAI\")\n\n def setup2port(self):\n \"\"\"sets up 2 port measurement\"\"\"\n time.sleep(0.1)\n self.ctrl.meas_type = \"FULL\"\n self.ctrl.write(\"DISP:Window1:STATE ON\")\n # self.ctrl.write(\"DISP:Window2:STATE ON\")\n self.ctrl.write(\"CALC:PAR:DEL:ALL\")\n self.ctrl.write(\"CALC:PAR:DEF:EXT 'Meas11','S11'\")\n self.ctrl.write(\"CALC:PAR:DEF:EXT 'Meas12','S12'\")\n self.ctrl.write(\"CALC:PAR:DEF:EXT 'Meas21','S21'\")\n self.ctrl.write(\"CALC:PAR:DEF:EXT 'Meas22','S22'\")\n self.ctrl.write(\"DISP:Window1:Trace1:FEED 'Meas11'\")\n self.ctrl.write(\"DISP:Window1:Trace2:FEED 'Meas12'\")\n self.ctrl.write(\"DISP:Window1:Trace3:FEED 'Meas21'\")\n self.ctrl.write(\"DISP:Window1:Trace4:FEED 'Meas22'\")\n # self.ctrl.write(\"CONT:CHAN:INT:CONT 1\")\n self.ctrl.write(\"INIT:CONT OFF\") # turn off continuous triggering\n self.ctrl.write(\"TRIG:SOUR MAN\") # set trigger to manual\n self.ctrl.write(\"TRIG:SCOP ALL\") # trigger all channels sequentially\n self.ctrl.write(\"SENS:SWE:MODE CONT\") # allow channels to trigger repeatedly\n self.ctrl.write(\"*WAI\")\n self.reset_sweep()\n self.avg_inquire()\n self.sweep_inquire()\n self.freqs_inquire()\n self.ifbw_inquire()\n self.power_inquire()\n print(\"2 Port Measurement Set up\")\n\n def setup_thru(self):\n \"\"\"sets up a simple S21 measurement\"\"\"\n time.sleep(0.1)\n self.ctrl.meas_type = \"THRU\"\n self.ctrl.write(\"DISP:Window1:STATE ON\")\n self.ctrl.write(\"CALC:PAR:DEL:ALL\")\n self.ctrl.write(\"CALC:PAR:DEF:EXT 'Meas21','S21'\")\n # define display format for this measurement? ... CALC:FORM MLOG or MLIN\n self.ctrl.write(\"DISP:Window1:Trace1:FEED 'Meas21'\")\n # self.ctrl.write(\"CONT:CHAN:INT:CONT 1\")\n self.ctrl.write(\"INIT:CONT OFF\")\n self.ctrl.write(\"TRIG:SOUR MAN\")\n self.ctrl.write(\"*WAI\")\n self.reset_sweep()\n self.avg_inquire()\n self.sweep_inquire()\n self.freqs_inquire()\n self.ifbw_inquire()\n self.power_inquire()\n print(\"Thru Measurement Set Up\")\n\n def set_cal(self, calset=\"ArbitraryCalSet\", calstate='OFF'):\n if calstate == 'ON':\n self.ctrl.write(\"SENS:CORR:CSET:ACT \\\"%s\\\",1\" % calset)\n self.ctrl.write(\"*WAI\")\n time.sleep(0.1)\n self.avg_inquire()\n self.sweep_inquire()\n self.freqs_inquire()\n self.ifbw_inquire()\n self.power_inquire()\n print(\"Using Cal Set: \" + calset)\n print(\"PNA State post Cal Set Application: \")\n if self.ctrl.avestate == 1:\n print(\"Averaging ON with \" + str(int(self.ctrl.avecount)) + \" points\")\n else:\n print(\"Averaging OFF\")\n if self.ctrl.sweeptype == \"LIN\":\n print(\"Linear Freq. Sweep with \" + str(int(self.ctrl.sweeppoints)) + \" points\")\n elif self.ctrl.sweeptype == \"LOG\":\n print(\"Logarithmic Freq. Sweep with \" + str(int(self.ctrl.sweeppoints)) + \" points\")\n else:\n print(\"Unrecognized Sweep Type\")\n print(\"Sweep time: \" + str(self.ctrl.sweeptime) + \" seconds\")\n print(\"IF Bandwidth: \" + str(self.ctrl.ifbw) + \"Hz\")\n print(\"Measurement from \" + str(self.ctrl.freqstart / 1e9) + \"GHz to \" + str(\n float(self.ctrl.freqstop) / 1e9) + \"GHz\")\n print(\"Source 1 Power: %.2f dBm\" % self.ctrl.powersource1)\n print(\"Source 2 Power: %.2f dBm\" % self.ctrl.powersource2)\n self.reset_sweep()\n elif calstate == 'OFF':\n self.ctrl.write(\"SENS:CORR OFF\")\n print(\"Taking Un-Calibrated Data\")\n\n def set_sweeptype(self, sweeptype=\"lin\"):\n test_type = sweeptype.lower().strip()\n if test_type == \"lin\":\n self.ctrl.write(\"SENS:SWE:TYPE LIN\")\n elif test_type == \"log\":\n self.ctrl.write(\"SENS:SWE:TYPE LOG\")\n else:\n raise KeyError(F\"{sweeptype} is not a recognized sweeptype.\")\n\n def set_num_freq_points(self, num_freq_points):\n self.ctrl.write(\"SENS:SWE:POIN %d\" % num_freq_points)\n\n def set_sweep(self, num_freq_points, sweeptype=\"lin\"):\n self.set_num_freq_points(num_freq_points=num_freq_points)\n self.set_sweeptype(sweeptype)\n self.sweep_inquire()\n print(\"Sweep type = \" + self.ctrl.sweeptype)\n print(\"Sweep points = \" + str(self.ctrl.sweeppoints))\n self.reset_sweep()\n\n def sweep_inquire(self):\n self.ctrl.write(\"*WAI\")\n self.ctrl.sweeptype = self.ctrl.query(\"SENS:SWE:TYPE?\").rstrip()\n self.ctrl.write(\"*WAI\")\n self.ctrl.sweeppoints = int(self.ctrl.query(\"SENS:SWE:POIN?\"))\n self.ctrl.write(\"*WAI\")\n self.ctrl.sweeptime = float(self.ctrl.query(\"SENS:SWE:TIME?\")) # in milliseconds\n self.ctrl.write(\"*WAI\")\n\n def set_freq_limits(self, start=0.01, stop=50.0):\n self.ctrl.write(\"SENS:FREQ:STAR %fghz \" % start)\n self.ctrl.write(\"SENS:FREQ:STOP %fghz\" % stop)\n self.freqs_inquire()\n print(\"Freq Start = \" + str(1e-9 * self.ctrl.freqstart) + \"GHz\")\n print(\"Freq Stop = \" + str(1e-9 * self.ctrl.freqstop) + \"GHz\")\n self.sweep_inquire()\n self.reset_sweep()\n\n def set_freq_center(self, center=21.755, span=43.49):\n self.ctrl.write(\"SENS:FREQ:CENT %fghz \" % center)\n self.ctrl.write(\"SENS:FREQ:SPAN %fghz \" % span)\n self.freqs_inquire()\n print(\"Freq Center = \" + str(1e-9 * self.ctrl.freqcent) + \"GHz\")\n print(\"Span = \" + str(1e-9 * self.ctrl.freqspan) + \"GHz\")\n self.sweep_inquire()\n self.reset_sweep()\n\n def set_center_freq_GHz(self, center_freq_GHz):\n self.ctrl.write(\"SENS:FREQ:CENT %fghz \" % center_freq_GHz)\n\n def set_span_GHz(self, span_GHz):\n self.ctrl.write(\"SENS:FREQ:SPAN %fghz \" % span_GHz)\n\n def freqs_inquire(self):\n self.ctrl.write(\"*WAI\")\n self.ctrl.freqstart = float(self.ctrl.query(\"SENS:FREQ:STAR?\"))\n self.ctrl.write(\"*WAI\")\n self.ctrl.freqstop = float(self.ctrl.query(\"SENS:FREQ:STOP?\"))\n self.ctrl.write(\"*WAI\")\n self.ctrl.freqcent = float(self.ctrl.query(\"SENS:FREQ:CENT?\"))\n self.ctrl.write(\"*WAI\")\n self.ctrl.freqspan = float(self.ctrl.query(\"SENS:FREQ:SPAN?\"))\n self.ctrl.write(\"*WAI\")\n\n def set_avg(self, count=1, mode=\"sweep\"):\n if count > 1:\n self.ctrl.write(\"SENS:AVER ON\")\n self.ctrl.write(\"SENS:AVER:COUN %d\" % count)\n self.ctrl.write(\"SENS:AVER:MODE %s\" % mode)\n else:\n self.ctrl.write(\"SENS:AVER OFF\")\n self.avg_inquire()\n if self.ctrl.avestate == 1:\n print(\"Averaging ON\")\n print(\"Averaging COUNT = \" + str(self.ctrl.avecount))\n print(\"Averaging MODE = \" + self.ctrl.avemode.rstrip())\n elif self.ctrl.avestate == 0:\n print(\"Averaging OFF\")\n self.sweep_inquire()\n self.reset_sweep()\n\n def avg_inquire(self):\n self.ctrl.write(\"*WAI\")\n self.ctrl.avestate = int(self.ctrl.query(\"SENS:AVER:STAT?\"))\n self.ctrl.write(\"*WAI\")\n if self.ctrl.avestate == 1:\n self.ctrl.avemode = self.ctrl.query(\"SENS:AVER:MODE?\")\n self.ctrl.write(\"*WAI\")\n self.ctrl.avecount = int(self.ctrl.query(\"SENS:AVER:COUN?\"))\n\n def avg_clear(self):\n self.ctrl.write(\"SENS:AVER:CLE\")\n\n def set_ifbw(self, ifbw=100, track=None):\n self.ctrl.write(\"SENS:BWID:RES %d \" % ifbw)\n self.ctrl.write(\"*WAI\")\n # print(\"IF Bandwidth set to :\" + str(ifbw) + \"Hz\")\n if track == True:\n self.ctrl.write(\"SENS:BWID:TRAC ON\")\n elif track == False:\n self.ctrl.write(\"SENS:BWID:TRAC OFF\")\n self.ctrl.write(\"*WAI\")\n self.ifbw_inquire()\n print('IF Bandwidth set to: %.1fHz' % self.ctrl.ifbw)\n if self.ctrl.ifbwtrack == 1:\n print(\"IF Bandwidth Tracking ON\")\n elif self.ctrl.ifbwtrack == 0:\n print(\"IF Bandwidth Tracking OFF\")\n self.sweep_inquire()\n self.reset_sweep()\n\n def set_if_bw_Hz(self, if_bw_Hz):\n self.ctrl.write(F\"SENS:BWID:RES {if_bw_Hz}\")\n\n\n def ifbw_inquire(self):\n self.ctrl.write(\"*WAI\")\n self.ctrl.ifbw = float(self.ctrl.query(\"SENS:BWID:RES?\"))\n self.ctrl.write(\"*WAI\")\n self.ctrl.ifbwtrack = int(self.ctrl.query(\"SENS:BWID:TRAC?\"))\n self.ctrl.write(\"*WAI\")\n\n def set_port_power_dBm(self, port_power_dBm, port=1):\n self.ctrl.write(F\"SOUR:POW{port}:LEV {port_power_dBm} \")\n\n def set_power_on(self):\n self.ctrl.write(\"SOUR:POW1:MODE ON\")\n\n def set_power_off(self):\n self.ctrl.write(\"SOUR:POW1:MODE OFF\")\n\n def set_power(self, port=1, level=-5, state='ON'):\n if state == 'ON':\n if port == 1:\n self.ctrl.write(\"SOUR:POW1:LEV %f \" % level)\n #\n if port == 2:\n self.ctrl.write(\"SOUR:POW2:LEV %f \" % level)\n # self.ctrl.write(\"SOUR:POW2:MODE ON\")\n elif state == 'OFF':\n if port == 1:\n self.ctrl.write(\"SOUR:POW1:MODE OFF\")\n if port == 2:\n self.ctrl.write(\"SOUR:POW2:MODE OFF\")\n else:\n print(\"Port \" + str(port) + \" power state not recognized\")\n self.power_inquire()\n print(\"Port 1 Power set to: %.2fdBm\" % self.ctrl.powersource1)\n print(\"Port 2 Power set to: %.2fdBm\" % self.ctrl.powersource2)\n self.sweep_inquire()\n self.reset_sweep()\n\n def power_inquire(self):\n self.ctrl.write(\"*WAI\")\n self.ctrl.powersource1 = float(self.ctrl.query(\"SOUR:POW1:LEV?\"))\n self.ctrl.write(\"*WAI\")\n self.ctrl.powersource2 = float(self.ctrl.query(\"SOUR:POW2:LEV?\"))\n self.ctrl.write(\"*WAI\")\n\n def trig_sweep(self):\n self.sweep_inquire()\n print(\"\")\n print(\"Sweep time is %.2f seconds\" % float(self.ctrl.sweeptime))\n if self.ctrl.avestate == 1: # averaging ON\n self.avg_clear()\n # use stat oper cond ave to check that averaging is done\n for i in range(0, self.ctrl.avecount):\n self.ctrl.write(\"INIT:IMM\")\n self.ctrl.write(\"*WAI\")\n self.ctrl.query(\"*OPC?\")\n print(\"Sweep %d/%d finished\" % (i + 1, self.ctrl.avecount))\n self.ctrl.trig1 = True\n else: # averaging OFF\n if self.ctrl.trig1 == False:\n print(\"Triggering VNA Sweep\")\n self.ctrl.write(\"INIT:IMM\")\n self.ctrl.write(\"*WAI\")\n self.ctrl.query(\"*OPC?\")\n self.ctrl.trig1 = True\n print(\"Sweep finished\")\n\n def get_trace(self, trace=1, format=\"LM\"):\n if trace == 1:\n self.ctrl.write(\"CALC:PAR:SEL \\'Meas11\\'\")\n elif trace == 2:\n self.ctrl.write(\"CALC:PAR:SEL \\'Meas12\\'\")\n elif trace == 3:\n self.ctrl.write(\"CALC:PAR:SEL \\'Meas21\\'\")\n elif trace == 4:\n self.ctrl.write(\"CALC:PAR:SEL \\'Meas22\\'\")\n else:\n print(\"Not a recognized trace\")\n return 0\n # print(\"Triggering VNA Sweep\")\n # self.trig_sweep()\n self.ctrl.write(\"*WAI\")\n self.ctrl.write(\"CALC:DATA? SDATA\")\n rawtrace = self.ctrl.read()\n self.ctrl.write(\"*WAI\")\n tracesplit = rawtrace.split(\",\")\n if format == \"LM\":\n traceLM = []\n tracePH = []\n for i in range(0, len(tracesplit)):\n if i % 2 == 1:\n traceLM.append(10 * math.log10(float(tracesplit[i - 1]) ** 2 + float(tracesplit[i]) ** 2))\n tracePH.append(180 / math.pi * math.atan2(float(tracesplit[i]), float(tracesplit[i - 1])))\n return (traceLM, tracePH)\n elif format == \"RI\":\n traceR = []\n traceI = []\n for i in range(0, len(tracesplit)):\n if i % 2 == 1:\n traceR.append(float(tracesplit[i - 1]))\n traceI.append(float(tracesplit[i]))\n traceR = np.array(traceR)\n traceI = np.array(traceI)\n return (traceR, traceI)\n elif format == \"COM\":\n tracecom = []\n for i in range(0, len(tracesplit)):\n if i % 2 == 1:\n tracecom.append(tracesplit[i - 1] + 1j * tracesplit[i])\n return tracecom\n else:\n print(\"Data Format not recognized\")\n return 0\n\n def get_S21(self, format='LM'):\n self.ctrl.write(\"CALC:PAR:SEL \\'Meas21\\'\")\n # print(\"Triggering VNA Sweep\")\n # self.trig_sweep()\n self.ctrl.write(\"*WAI\")\n self.ctrl.write(\"CALC:DATA? SDATA\")\n rawtrace = self.ctrl.read()\n self.ctrl.write(\"*WAI\")\n tracesplit = rawtrace.split(\",\")\n if format == 'LM':\n traceLM = []\n tracePH = []\n for i in range(0, len(tracesplit)):\n if i % 2 == 1:\n traceLM.append(10.0 * math.log10(float(tracesplit[i - 1]) ** 2 + float(tracesplit[i]) ** 2))\n tracePH.append(180.0 / math.pi * math.atan2(float(tracesplit[i]), float(tracesplit[i - 1])))\n traceLM = np.array(traceLM)\n tracePH = np.array(tracePH)\n return (traceLM, tracePH)\n elif format == 'RI':\n traceR = []\n traceI = []\n for i in range(0, len(tracesplit)):\n if i % 2 == 1:\n traceR.append(float(tracesplit[i - 1]))\n traceI.append(float(tracesplit[i]))\n traceR = np.array(traceR)\n traceI = np.array(traceI)\n return traceR, traceI\n else:\n print('Format not recognized!')\n return 0\n\n def get_S12(self, format='LM'):\n self.ctrl.write(\"CALC:PAR:SEL \\'Meas12\\'\")\n # print(\"Triggering VNA Sweep\")\n # self.trig_sweep()\n self.ctrl.write(\"*WAI\")\n self.ctrl.write(\"CALC:DATA? SDATA\")\n rawtrace = self.ctrl.read()\n self.ctrl.write(\"*WAI\")\n tracesplit = rawtrace.split(\",\")\n if format == 'LM':\n traceLM = []\n tracePH = []\n for i in range(0, len(tracesplit)):\n if i % 2 == 1:\n traceLM.append(10.0 * math.log10(float(tracesplit[i - 1]) ** 2 + float(tracesplit[i]) ** 2))\n tracePH.append(180.0 / math.pi * math.atan2(float(tracesplit[i]), float(tracesplit[i - 1])))\n traceLM = np.array(traceLM)\n tracePH = np.array(tracePH)\n return (traceLM, tracePH)\n elif format == 'RI':\n traceR = []\n traceI = []\n for i in range(0, len(tracesplit)):\n if i % 2 == 1:\n traceR.append(float(tracesplit[i - 1]))\n traceI.append(float(tracesplit[i]))\n traceR = np.array(traceR)\n traceI = np.array(traceI)\n return (traceR, traceI)\n else:\n print('Format not recognized!')\n return 0\n\n def reset_sweep(self):\n self.ctrl.trig1 = False\n self.ctrl.trig2 = False",
"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nfrom scipy import signal, fftpack\nimport platform\nfrom submm_python_routines.KIDs import resonance_fitting as rf\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom typing import NamedTuple\n\n\n\n\"\"\"\nStandalone version of kidPy's find_KIDs_interactive\nuse fin\n\nIf you have already identified most of the resonators at indexes kid_idx\njust call the interactive plot object like so\nip = InteractivePlot(f,20*np.log10(np.abs(z)),kid_idx)\n\nif you want to use use the filtering and threshold resonator finder\ncall find_vna_sweep(f,z) like\nip = find_vna_sweep(f,z)\nget the kid indexes out from ip.kid_idx\nget the frequencies out from f[ip.kid_idx]\n\n\"\"\"\n\n\ndef open_stored_sweep(savepath,load_std = False):\n \"\"\"Opens sweep data\n inputs:\n char savepath: The absolute path where sweep data is saved\n ouputs:\n numpy array Is: The I values\n numpy array Qs: The Q values\"\"\"\n files = sorted(os.listdir(savepath))\n I_list, Q_list, stdI_list, stdQ_list = [], [], [], []\n for filename in files:\n if filename.startswith('I'):\n I_list.append(os.path.join(savepath, filename))\n if filename.startswith('Q'):\n Q_list.append(os.path.join(savepath, filename))\n if filename.startswith('stdI'):\n stdI_list.append(os.path.join(savepath, filename))\n if filename.startswith('stdQ'):\n stdQ_list.append(os.path.join(savepath, filename))\n Is = np.array([np.load(filename) for filename in I_list])\n Qs = np.array([np.load(filename) for filename in Q_list])\n if len(stdI_list) >0:\n std_Is = np.array([np.load(filename) for filename in stdI_list])\n std_Qs = np.array([np.load(filename) for filename in stdQ_list])\n if load_std:\n return Is, Qs, std_Is, std_Qs\n else:\n return Is, Qs\n\nclass SingleWindow(NamedTuple):\n left_max: int\n left_fitter_pad: int\n left_pad: int\n left_window: int\n minima: int\n right_window: int\n right_pad: int\n right_fitter_pad: int\n right_max: int\n\n\nclass InteractivePlot(object):\n \"\"\"\n Convention is to supply the data in magnitude units i.e. 20*np.log10(np.abs(z))\n \"\"\"\n\n def __init__(self, chan_freqs, data, kid_idx, f_old=None, data_old=None, kid_idx_old=None):\n plt.rcParams['keymap.forward'] = ['v']\n plt.rcParams['keymap.back'] = ['c', 'backspace'] # remove arrows from back and forward on plot\n self.chan_freqs = chan_freqs\n self.data = data\n self.f_old = f_old\n self.data_old = data_old\n self.kid_idx_old = kid_idx_old\n self.kid_idx = kid_idx\n self.lim_shift_factor = 0.2\n self.zoom_factor = 0.1 # no greater than 0.5\n self.kid_idx_len = len(kid_idx)\n self.fig = plt.figure(1000, figsize=(16, 6))\n self.ax = self.fig.add_subplot(111)\n self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)\n self.fig.canvas.mpl_connect('key_release_event', self.on_key_release)\n self.fig.canvas.mpl_connect('button_press_event', self.onClick)\n self.l1, = self.ax.plot(self.chan_freqs, self.data)\n self.p1, = self.ax.plot(self.chan_freqs[self.kid_idx], self.data[self.kid_idx], \"r*\", markersize=8)\n self.text_dict = {}\n for i in range(0, len(self.kid_idx)):\n self.text_dict[i] = plt.text(self.chan_freqs[self.kid_idx][i], self.data[self.kid_idx][i], str(i))\n\n if isinstance(self.f_old, np.ndarray):\n self.l2, = self.ax.plot(self.f_old, self.data_old, color=\"C0\", alpha=0.25)\n self.p2, = self.ax.plot(self.f_old[self.kid_idx_old], self.data_old[self.kid_idx_old], \"r*\", markersize=8,\n alpha=0.1)\n self.text_dict_old = {}\n for i in range(0, len(self.kid_idx_old)):\n self.text_dict_old[i] = plt.text(self.f_old[self.kid_idx_old][i], self.data_old[self.kid_idx_old][i],\n str(i), color='Grey')\n\n self.shift_is_held = False\n self.control_is_held = False\n self.add_list = []\n self.delete_list = []\n if platform.system() == 'Darwin':\n print(\"please hold either the a or d key \\n while right clicking to add or delete points\")\n else:\n print(\"please hold either the shift or control key \\n while right clicking to add or remove points\")\n print(\"You can use the arrow keys to pan around\")\n print(\"You can use z and x keys to zoom in and out\")\n print(\"close all plots when finished\")\n plt.xlabel('frequency (MHz)')\n plt.ylabel('dB')\n plt.show(block=True)\n\n def on_key_press(self, event):\n # mac or windows\n if platform.system().lower() == 'darwin':\n if event.key == 'a':\n self.shift_is_held = True\n if event.key == 'd':\n self.control_is_held = True\n else:\n if event.key == 'shift':\n self.shift_is_held = True\n if event.key == 'control':\n self.control_is_held = True\n\n if event.key == 'right': # pan right\n xlim_left, xlim_right = self.ax.get_xlim()\n xlim_size = xlim_right - xlim_left\n self.ax.set_xlim(xlim_left + self.lim_shift_factor * xlim_size,\n xlim_right + self.lim_shift_factor * xlim_size)\n plt.draw()\n\n if event.key == 'left': # pan left\n xlim_left, xlim_right = self.ax.get_xlim()\n xlim_size = xlim_right - xlim_left\n self.ax.set_xlim(xlim_left - self.lim_shift_factor * xlim_size,\n xlim_right - self.lim_shift_factor * xlim_size)\n plt.draw()\n\n if event.key == 'up': # pan up\n ylim_left, ylim_right = self.ax.get_ylim()\n ylim_size = ylim_right - ylim_left\n self.ax.set_ylim(ylim_left + self.lim_shift_factor * ylim_size,\n ylim_right + self.lim_shift_factor * ylim_size)\n plt.draw()\n\n if event.key == 'down': # pan down\n ylim_left, ylim_right = self.ax.get_ylim()\n ylim_size = ylim_right - ylim_left\n self.ax.set_ylim(ylim_left - self.lim_shift_factor * ylim_size,\n ylim_right - self.lim_shift_factor * ylim_size)\n plt.draw()\n\n if event.key == 'z': # zoom in\n xlim_left, xlim_right = self.ax.get_xlim()\n ylim_left, ylim_right = self.ax.get_ylim()\n xlim_size = xlim_right - xlim_left\n ylim_size = ylim_right - ylim_left\n self.ax.set_xlim(xlim_left + self.zoom_factor * xlim_size, xlim_right - self.zoom_factor * xlim_size)\n self.ax.set_ylim(ylim_left + self.zoom_factor * ylim_size, ylim_right - self.zoom_factor * ylim_size)\n plt.draw()\n\n if event.key == 'x': # zoom out\n xlim_left, xlim_right = self.ax.get_xlim()\n ylim_left, ylim_right = self.ax.get_ylim()\n xlim_size = xlim_right - xlim_left\n ylim_size = ylim_right - ylim_left\n self.ax.set_xlim(xlim_left - self.zoom_factor * xlim_size, xlim_right + self.zoom_factor * xlim_size)\n self.ax.set_ylim(ylim_left - self.zoom_factor * ylim_size, ylim_right + self.zoom_factor * ylim_size)\n plt.draw()\n\n def on_key_release(self, event):\n # windows or mac\n if platform.system() == 'Darwin':\n if event.key == 'a':\n self.shift_is_held = False\n if event.key == 'd':\n self.control_is_held = False\n else:\n if event.key == 'shift':\n self.shift_is_held = False\n if event.key == 'control':\n self.control_is_held = False\n\n def onClick(self, event):\n if event.button == 3:\n if self.shift_is_held: # add point\n print(\"adding point\", event.xdata)\n self.kid_idx = np.hstack((self.kid_idx, np.argmin(np.abs(self.chan_freqs - event.xdata))))\n self.kid_idx = self.kid_idx[np.argsort(self.kid_idx)]\n self.refresh_plot()\n elif self.control_is_held: # delete point\n print(\"removing point\", event.xdata)\n delete_index = np.argmin(np.abs(self.chan_freqs[self.kid_idx] - event.xdata))\n self.kid_idx = np.delete(self.kid_idx, delete_index)\n self.refresh_plot()\n # self.delete_list.append(event.xdata)\n # plt.plot(event.xdata,event.ydata,\"x\",markersize = 20,mew = 5)\n else:\n print(\"please hold either the shift or control key while right clicking to add or remove points\")\n\n def refresh_plot(self):\n self.p1.set_data(self.chan_freqs[self.kid_idx], self.data[self.kid_idx])\n for i in range(0, self.kid_idx_len):\n self.text_dict[i].set_text(\"\") # clear all of the texts\n self.text_dict = {}\n for i in range(0, len(self.kid_idx)):\n self.text_dict[i] = plt.text(self.chan_freqs[self.kid_idx][i], self.data[self.kid_idx][i], str(i))\n self.kid_idx_len = len(self.kid_idx)\n plt.draw()\n\n\nclass InteractiveThresholdPlot(object):\n def __init__(self, f_Hz, s21_mag, peak_threshold_dB, spacing_threshold_Hz=None,\n window_pad_factor=1.2, fitter_pad_factor=5.0, debug_mode=False):\n self.peak_threshold_dB = peak_threshold_dB\n self.spacing_threshold_Hz = spacing_threshold_Hz\n\n self.window_pad_factor = window_pad_factor\n self.fitter_pad_factor = fitter_pad_factor\n self.f_Hz = f_Hz\n self.f_GHz = f_Hz * 1.0e-9\n self.s21_mag = s21_mag\n\n self.regions = None\n self.ilo = None\n self.local_minima = None\n self.minima_as_windows = None\n self.calc_regions()\n\n if not debug_mode:\n self.fig = plt.figure(2, figsize=(16, 6))\n self.ax = self.fig.add_subplot(111)\n self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)\n self.l1, = self.ax.plot(self.f_GHz, self.s21_mag)\n\n self.p1, = self.ax.plot(self.f_GHz[self.ilo], self.s21_mag[self.ilo], \"r*\")\n self.p2, = self.ax.plot(self.f_GHz[self.local_minima], self.s21_mag[self.local_minima], \"b*\")\n print(\"Press up or down to change the threshold by 0.1 dB or press t to enter a custom threshold value.\")\n print(\"Close all plots when finished\")\n plt.xlabel('frequency (GHz)')\n plt.ylabel('dB')\n self.ax.set_title(F\"Threshold: 3 adjacent points under {'%2.2f' % self.peak_threshold_dB} dB.\")\n plt.show(block=True)\n\n def on_key_press(self, event):\n # print event.key\n # has to be shift and ctrl because remote viewers only forward\n # certain key combinations\n # print event.key == 'd'\n if event.key == 'up':\n self.peak_threshold_dB = self.peak_threshold_dB + 0.1\n self.refresh_plot()\n if event.key == 'down':\n self.peak_threshold_dB = self.peak_threshold_dB - 0.1\n self.refresh_plot()\n if event.key == 't':\n self.peak_threshold_dB = np.float(input(\"What threshold would you like in dB? \"))\n self.refresh_plot()\n\n def refresh_plot(self):\n self.calc_regions()\n self.p1.set_data(self.f_GHz[self.ilo], self.s21_mag[self.ilo])\n self.p2.set_data(self.f_GHz[self.local_minima], self.s21_mag[self.local_minima])\n self.ax.set_title(F\"Threshold: 3 adjacent points under {'%2.2f' % self.peak_threshold_dB} dB.\")\n plt.draw()\n\n def calc_regions(self):\n bool_threshhold = self.s21_mag < -1.0 * self.peak_threshold_dB\n # self.ilo = np.where(self.s21_mag < -1.0 * self.peak_threshold_dB)[0]\n self.ilo = []\n self.regions = []\n self.local_minima = []\n is_in_theshhold_last = False\n sub_region = []\n for test_index, is_in_theshhold in list(enumerate(bool_threshhold)):\n if is_in_theshhold:\n self.ilo.append(test_index)\n sub_region.append(test_index)\n else:\n if is_in_theshhold_last:\n # when the last point was in, but not this point it is time to finish the old region\n self.regions.append(sub_region)\n sub_region = []\n is_in_theshhold_last = is_in_theshhold\n else:\n if sub_region:\n self.regions.append(sub_region)\n\n window_calc_data = []\n # calculate the local minima in a simple brute force method\n for region in self.regions:\n minima_this_region = []\n minima_this_region_index = []\n found_this_region = False\n if len(region) > 2:\n for region_index in range(len(region) - 2):\n middle_region_index = region_index + 1\n middle_data_index = region[middle_region_index]\n left = self.s21_mag[region[region_index]]\n middle = self.s21_mag[middle_data_index]\n right = self.s21_mag[region[region_index + 2]]\n if middle < left and middle <= right:\n found_this_region = True\n self.local_minima.append(middle_data_index)\n minima_this_region.append(middle_data_index)\n minima_this_region_index.append(middle_region_index)\n if found_this_region:\n window_calc_data.append((region, minima_this_region_index, minima_this_region))\n\n # calculate the resonator windows\n self.minima_as_windows = []\n data_index_minima_left = None\n single_window = None\n right_window_not_found = False\n data_index_bound = 0\n for region, minima_this_region_index, minima_this_region in window_calc_data:\n # deal with spacing conflicts in the same region\n minima_this_region, minima_this_region_index = \\\n self.resolve_spacing_conflicts(minima_this_region=minima_this_region,\n minima_this_region_index=minima_this_region_index)\n data_index_region_bound_left = region[0]\n data_index_region_bound_right = region[-1]\n # combine minima in the same region with a spacing conflict\n for region_index, data_index_minima in zip(minima_this_region_index, minima_this_region):\n # halfway to the next resonator\n if single_window is not None:\n data_index_bound = int(np.round((data_index_minima_left + data_index_minima) / 2))\n if right_window_not_found:\n single_window[\"right_max\"] = single_window[\"right_pad\"] = \\\n single_window[\"right_fitter_pad\"] = single_window[\"right_window\"] = data_index_bound\n else:\n single_window[\"right_max\"] = data_index_bound\n test_right_pad = single_window[\"minima\"] \\\n + int(np.round((single_window[\"right_window\"] - single_window[\"minima\"]) \\\n * self.window_pad_factor))\n if single_window[\"right_max\"] < test_right_pad:\n single_window[\"right_pad\"] = single_window[\"right_max\"]\n else:\n single_window[\"right_pad\"] = test_right_pad\n test_right_fitter_pad = single_window[\"minima\"] \\\n + int(np.round((single_window[\"right_window\"] - single_window[\"minima\"]) \\\n * self.fitter_pad_factor))\n if single_window[\"right_max\"] < test_right_fitter_pad:\n single_window[\"right_fitter_pad\"] = single_window[\"right_max\"]\n else:\n single_window[\"right_fitter_pad\"] = test_right_fitter_pad\n\n self.minima_as_windows.append(SingleWindow(**single_window))\n # the window where resonator is located\n if region_index == minima_this_region_index[0]:\n data_index_boundary_left = data_index_region_bound_left\n else:\n data_index_boundary_left = data_index_bound\n if region_index == minima_this_region_index[-1]:\n data_index_boundary_right = data_index_region_bound_right\n right_window_not_found = False\n else:\n right_window_not_found = True\n if right_window_not_found:\n single_window = {\"left_max\": data_index_bound, \"left_window\": data_index_boundary_left,\n \"minima\": data_index_minima}\n else:\n single_window = {\"left_max\": data_index_bound, \"left_window\": data_index_boundary_left,\n \"minima\": data_index_minima, \"right_window\": data_index_boundary_right}\n # window padding\n test_left_pad = single_window[\"minima\"] \\\n - int(np.round((single_window[\"minima\"] - single_window[\"left_window\"])\n * self.window_pad_factor))\n if test_left_pad < single_window[\"left_max\"]:\n single_window[\"left_pad\"] = single_window[\"left_max\"]\n else:\n single_window[\"left_pad\"] = test_left_pad\n test_left_fitter_pad = single_window[\"minima\"] \\\n - int(np.round((single_window[\"minima\"] - single_window[\"left_window\"])\n * self.fitter_pad_factor))\n if test_left_fitter_pad < single_window[\"left_max\"]:\n single_window[\"left_fitter_pad\"] = single_window[\"left_max\"]\n else:\n single_window[\"left_fitter_pad\"] = test_left_fitter_pad\n\n data_index_minima_left = single_window[\"minima\"]\n else:\n # finish the last step in the loop\n data_index_bound = len(self.s21_mag)\n if right_window_not_found:\n single_window[\"right_max\"] = single_window[\"right_window\"] = data_index_bound\n else:\n single_window[\"right_max\"] = data_index_bound\n test_right_pad = single_window[\"minima\"] + \\\n int(np.round((single_window[\"right_window\"] - single_window[\"minima\"])\n * self.window_pad_factor))\n if single_window[\"right_max\"] < test_right_pad:\n single_window[\"right_pad\"] = single_window[\"right_max\"]\n else:\n single_window[\"right_pad\"] = test_right_pad\n test_right_fitter_pad = single_window[\"minima\"] \\\n + int(np.round((single_window[\"right_window\"] - single_window[\"minima\"])\n * self.fitter_pad_factor))\n if single_window[\"right_max\"] < test_right_fitter_pad:\n single_window[\"right_fitter_pad\"] = single_window[\"right_max\"]\n else:\n single_window[\"right_fitter_pad\"] = test_right_fitter_pad\n self.minima_as_windows.append(SingleWindow(**single_window))\n self.local_minima = [single_window.minima for single_window in self.minima_as_windows]\n # spacing conflicts across all regions\n self.local_minima, self.minima_as_windows = \\\n self.resolve_spacing_conflicts(minima_this_region=self.local_minima,\n minima_this_region_index=self.minima_as_windows)\n\n def resolve_spacing_conflicts(self, minima_this_region, minima_this_region_index):\n found_spacing_conflict = True\n while found_spacing_conflict:\n found_spacing_conflict = False\n number_of_minima_this_region = len(minima_this_region)\n if number_of_minima_this_region > 1:\n for counter in range(number_of_minima_this_region - 1):\n data_index_minima_left_test = minima_this_region[counter]\n data_index_minima_right_test = minima_this_region[counter + 1]\n minima_spacing_Hz = abs(\n self.f_Hz[data_index_minima_left_test] - self.f_Hz[data_index_minima_right_test])\n if minima_spacing_Hz < self.spacing_threshold_Hz:\n # minima are too close:\n print(F\"Spacing Conflict in same threshold region.\")\n print(F\" Allowed spacing (MHz): {'%3.3f' % (self.spacing_threshold_Hz * 1.0e-6)}\")\n print(F\" Minima spacing (MHz): {'%3.3f' % (minima_spacing_Hz * 1.0e-6)}\")\n # keep the lowest of the minima\n value_left_minima = self.s21_mag[data_index_minima_left_test]\n value_right_minima = self.s21_mag[data_index_minima_right_test]\n if value_left_minima < value_right_minima:\n index_location_to_remove = counter + 1\n index_location_to_keep = counter\n else:\n index_location_to_remove = counter\n index_location_to_keep = counter + 1\n # data for the print statement\n data_index_kept = minima_this_region[index_location_to_keep]\n data_index_removed = minima_this_region[index_location_to_remove]\n value_kept_minima = self.s21_mag[data_index_kept]\n f_MHz_kept_minima = self.f_GHz[data_index_kept] * 1.0e3\n value_removed_minima = self.s21_mag[data_index_removed]\n f_MHz_removed_minima = self.f_GHz[data_index_removed] * 1.0e3\n # where the data is removed\n minima_this_region_index.pop(index_location_to_remove)\n minima_this_region.pop(index_location_to_remove)\n # make the users see what decisions the code is making\n print(F\"Minima Kept: {value_kept_minima} dbM at {'%3.3f' % f_MHz_kept_minima} MHz\")\n print(F\"Minima Removed: {value_removed_minima} dbM at {'%3.3f' % f_MHz_removed_minima} MHz\\n\")\n # stop the loop here and restart from scratch with one less minima\n found_spacing_conflict = True\n break\n return minima_this_region, minima_this_region_index\n\n\ndef compute_dI_and_dQ(I, Q, freq=None, filterstr='SG', do_deriv=True):\n \"\"\"\n Given I,Q,freq arrays\n input filterstr = 'SG' for sav-gol filter with builtin gradient, 'SGgrad' savgol then apply gradient to filtered\n do_deriv: if want to look at filtered non differentiated data.\n \"\"\"\n if freq is None:\n df = 1.0\n else:\n df = freq[1] - freq[0]\n dI = filtered_differential(I, df, filtertype=filterstr, do_deriv=do_deriv)\n dQ = filtered_differential(Q, df, filtertype=filterstr, do_deriv=do_deriv)\n return dI, dQ\n\n\ndef filtered_differential(data, df, filtertype=None, do_deriv=True):\n \"\"\"\n take 1d array data with spacing df. return filtered version of data depending on filterrype\n \"\"\"\n window = 13\n n = 3\n if filtertype is None:\n out = np.gradient(data, df)\n elif filtertype.lower() == 'sg':\n if do_deriv == True:\n out = signal.savgol_filter(data, window, n, deriv=1, delta=df)\n else:\n out = signal.savgol_filter(data, window, n, deriv=0, delta=df)\n elif filtertype.lower() == 'sggrad':\n tobegrad = signal.savgol_filter(data, window, n)\n out = np.gradient(tobegrad, df)\n else:\n raise KeyError(F\"filtertype: {filtertype} is not recognized.\")\n return out\n\n\ndef filter_trace(path, bb_freqs, sweep_freqs):\n chan_I, chan_Q = open_stored_sweep(path)\n channels = np.arange(np.shape(chan_I)[1])\n mag = np.zeros((len(bb_freqs), len(sweep_freqs)))\n chan_freqs = np.zeros((len(bb_freqs), len(sweep_freqs)))\n for chan in channels:\n mag[chan] = (np.sqrt(chan_I[:, chan] ** 2 + chan_Q[:, chan] ** 2))\n chan_freqs[chan] = (sweep_freqs + bb_freqs[chan]) / 1.0e6\n # mag = np.concatenate((mag[len(mag)/2:], mag[0:len(mag)/2]))\n mags = 20 * np.log10(mag / np.max(mag))\n mags = np.hstack(mags)\n # chan_freqs = np.concatenate((chan_freqs[len(chan_freqs)/2:],chan_freqs[0:len(chan_freqs)/2]))\n chan_freqs = np.hstack(chan_freqs)\n return chan_freqs, mags\n\n\ndef lowpass_cosine(y, tau, f_3db, width, padd_data=True):\n # padd_data = True means we are going to symmetric copies of the data to the start and stop\n # to reduce/eliminate the discontinuities at the start and stop of a dataset due to filtering\n #\n # False means we're going to have transients at the start and stop of the data\n # kill the last data point if y has an odd length\n if np.mod(len(y), 2):\n y = y[0:-1]\n # add the weird padd\n # so, make a backwards copy of the data, then the data, then another backwards copy of the data\n if padd_data:\n y = np.append(np.append(np.flipud(y), y), np.flipud(y))\n # take the FFT\n ffty = fftpack.fft(y)\n ffty = fftpack.fftshift(ffty)\n # make the companion frequency array\n delta = 1.0 / (len(y) * tau)\n nyquist = 1.0 / (2.0 * tau)\n freq = np.arange(-nyquist, nyquist, delta)\n # turn this into a positive frequency array\n print((len(ffty) // 2))\n pos_freq = freq[(len(ffty) // 2):]\n # make the transfer function for the first half of the data\n i_f_3db = min(np.where(pos_freq >= f_3db)[0])\n f_min = f_3db - (width / 2.0)\n i_f_min = min(np.where(pos_freq >= f_min)[0])\n f_max = f_3db + (width / 2.0)\n i_f_max = min(np.where(pos_freq >= f_max)[0])\n transfer_function = np.zeros(len(y) // 2)\n transfer_function[0:i_f_min] = 1\n transfer_function[i_f_min:i_f_max] = (1 + np.sin(-np.pi * ((freq[i_f_min:i_f_max] - freq[i_f_3db]) / width))) / 2.0\n transfer_function[i_f_max:(len(freq) // 2)] = 0\n # symmetrize this to be [0 0 0 ... .8 .9 1 1 1 1 1 1 1 1 .9 .8 ... 0 0 0] to match the FFT\n transfer_function = np.append(np.flipud(transfer_function), transfer_function)\n # apply the filter, undo the fft shift, and invert the fft\n filtered = np.real(fftpack.ifft(fftpack.ifftshift(ffty * transfer_function)))\n # remove the padd, if we applied it\n if padd_data:\n filtered = filtered[(len(y) // 3):(2 * (len(y) // 3))]\n # return the filtered data\n return filtered\n\n\ndef find_vna_sweep(f_Hz, z, smoothing_scale_Hz=5.0e6, spacing_threshold_Hz=1.0e5):\n \"\"\"\n f is frequencies (Hz)\n z is complex S21\n Smoothing scale (Hz)\n spacing threshold (Hz)\n \"\"\"\n # first plot data and filter function before removing filter function\n s21_mags = 20 * np.log10(np.abs(z))\n filtermags = lowpass_cosine(y=s21_mags,\n tau=f_Hz[1] - f_Hz[0],\n f_3db=1. / smoothing_scale_Hz,\n width=0.1 * (1.0 / smoothing_scale_Hz),\n padd_data=True)\n # the cosine filter drops the last point is the array has an pdd number of points\n len_filtered = len(filtermags)\n s21_mags = s21_mags[:len_filtered]\n f_Hz = f_Hz[:len_filtered]\n f_GHz = f_Hz * 1.0e-9\n # calculations for peak spacing (rejection based on threshold)\n highpass_mags = s21_mags - filtermags\n\n # results plot for filter\n plt.figure(2)\n plt.plot(f_GHz, s21_mags, 'b', label='#nofilter')\n plt.plot(f_GHz, filtermags, 'g', label='Filtered')\n plt.xlabel('frequency (GHz)')\n plt.ylabel('dB')\n plt.legend()\n plt.show()\n\n # identify peaks using the interactive threshold plot\n ipt = InteractiveThresholdPlot(f_Hz=f_Hz,\n s21_mag=highpass_mags,\n peak_threshold_dB=1.5,\n spacing_threshold_Hz=spacing_threshold_Hz)\n\n # Zero everything but the resonators\n highpass_mags[highpass_mags > -1.0 * ipt.peak_threshold_dB] = 0\n\n # the spacing thresholding was move to be inside the interactive threshold class\n kid_idx = ipt.local_minima\n ip = InteractivePlot(f_Hz, highpass_mags, kid_idx)\n return ip\n\n\ndef slice_vna(f, z, kid_index, q_slice=2000):\n # make f in Hz for fitting\n # Q = f/(delta f) for fitting is determined by the lowest frequencies assumed to be at index 0\n # delta f = f/Q\n df = f[1] - f[0]\n n_iq_points = int(f[0] / q_slice // df)\n print(n_iq_points)\n res_freq_array = np.zeros((len(kid_index), n_iq_points))\n res_array = np.zeros((len(kid_index), n_iq_points)).astype('complex')\n print(res_array.dtype)\n for i in range(0, len(kid_index)):\n a = kid_index[i] - n_iq_points // 2 - 1\n b = kid_index[i] + n_iq_points // 2\n\n res_freq_array[i, :] = f[a:b]\n res_array[i, :] = z[a:b]\n # if i == 4:\n # plt.plot(res_freq_array[i,:],20*np.log10(np.abs(res_array[i,:])))\n if i < len(kid_index) - 1: # dont check last res\n # print(i)\n if kid_index[i + 1] - kid_index[i] < n_iq_points: # collision at higher frequency\n high_cutoff = int((kid_index[i + 1] + kid_index[i]) / 2)\n # print(i,a,high_cutoff,b)\n res_freq_array[i, high_cutoff - a:] = np.nan\n res_array[i, high_cutoff - a:] = np.nan * (1 + 1j)\n if i != 0: # dont check first res\n # print(i)\n if kid_index[i] - kid_index[i - 1] < n_iq_points:\n low_cutoff = int((kid_index[i] + kid_index[i - 1]) / 2)\n # print(i,a,low_cutoff,b)\n res_freq_array[i, :low_cutoff - a] = np.nan\n res_array[i, :low_cutoff - a] = np.nan * (1 + 1j)\n # if i == 4:\n # plt.plot(res_freq_array[i,:],20*np.log10(np.abs(res_array[i,:])),'--')\n # plt.show()\n return res_freq_array, res_array\n\n\ndef fit_slices(res_freq_array, res_array, do_plots=True, plot_filename='fits'):\n pdf_pages = PdfPages(plot_filename + \".pdf\")\n fits_dict_mag = {}\n fits_dict_iq = {}\n for i in range(0, res_freq_array.shape[0]):\n if do_plots:\n fig = plt.figure(i, figsize=(12, 6))\n try:\n fit = rf.fit_nonlinear_iq(res_freq_array[i, :][~np.isnan(res_freq_array[i, :])],\n res_array[i, :][~np.isnan(res_array[i, :])])\n fits_dict_iq[i] = fit\n if do_plots:\n plt.subplot(121)\n plt.plot(np.real(res_array[i, :]), np.imag(res_array[i, :]), 'o', label='data')\n plt.plot(np.real(fit['fit_result']), np.imag(fit['fit_result']), label='fit')\n plt.plot(np.real(fit['x0_result']), np.imag(fit['x0_result']), label='guess')\n plt.legend()\n except:\n print(\"could not fit\")\n fits_dict_iq[i] = 'bad fit'\n try:\n fit2 = rf.fit_nonlinear_mag(res_freq_array[i, :][~np.isnan(res_freq_array[i, :])],\n res_array[i, :][~np.isnan(res_array[i, :])])\n fits_dict_mag[i] = fit2\n if do_plots:\n plt.subplot(122)\n plt.plot(res_freq_array[i, :], 20 * np.log10(np.abs(res_array[i, :])), label='data')\n plt.plot(res_freq_array[i, :][~np.isnan(res_freq_array[i, :])],\n 10 * np.log10(np.abs(fit2['fit_result'])), label='fit')\n plt.plot(res_freq_array[i, :][~np.isnan(res_freq_array[i, :])],\n 10 * np.log10(np.abs(fit2['x0_result'])), label='guess')\n plt.legend()\n except:\n print(\"could not fit\")\n fits_dict_mag[i] = 'bad fit'\n pdf_pages.savefig(fig)\n plt.close()\n pdf_pages.close()\n\n return fits_dict_iq, fits_dict_mag\n\n\ndef retune_vna(f, z, kid_index, n_points_look_around=0, look_low_high=[0, 0], f_old=None, z_old=None,\n kid_index_old=None):\n \"\"\"\n This is a program for when the resonances move and you need to retune the indexes of the resonators\n use n_point_look_around = 10 to look to lower and higher frequencies within 10 data points to find a new min\n use look_left_right = [10,20] to look for a new min 10 points to the lower frequencies and 20 points to higher frequencies\n\n if you would like to have the old data and kid indexes displayed in the background suppy\n f_old, z_old, kid_index old\n \"\"\"\n if n_points_look_around > 0:\n for i in range(0, len(kid_index)):\n new_index = np.argmin(\n 20 * np.log10(np.abs(z[kid_index[i] - n_points_look_around:kid_index[i] + n_points_look_around]))) + \\\n kid_index[i] - n_points_look_around\n kid_index[i] = new_index\n\n ip = InteractivePlot(f, 20 * np.log10(np.abs(z)), kid_index, f_old=f_old, data_old=20 * np.log10(np.abs(z_old)),\n kid_idx_old=kid_index_old)\n\n return ip\n"
] | [
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.show",
"matplotlib.transforms.blended_transform_factory",
"matplotlib.pyplot.subplots"
],
[
"numpy.array"
],
[
"matplotlib.pyplot.legend",
"numpy.imag",
"numpy.sqrt",
"numpy.flipud",
"scipy.fftpack.fft",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.round",
"numpy.where",
"scipy.signal.savgol_filter",
"numpy.hstack",
"matplotlib.backends.backend_pdf.PdfPages",
"numpy.arange",
"numpy.sin",
"numpy.real",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"numpy.load",
"matplotlib.pyplot.figure",
"numpy.isnan",
"numpy.delete",
"numpy.argsort",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"scipy.fftpack.fftshift",
"numpy.abs",
"numpy.gradient",
"matplotlib.pyplot.draw",
"numpy.shape",
"matplotlib.pyplot.xlabel",
"scipy.fftpack.ifftshift"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andreas-eberle/agents | [
"27b9498689ea5b8f69fc77ada752e05e38192852",
"c355882d35273b889b84f90071d91ee6354d27d3",
"c355882d35273b889b84f90071d91ee6354d27d3"
] | [
"tf_agents/networks/network.py",
"tf_agents/agents/categorical_dqn/categorical_dqn_agent.py",
"tf_agents/policies/q_policy.py"
] | [
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base extension to Keras network to simplify copy operations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport sys\nimport six\nimport tensorflow as tf\n\nfrom tensorflow.keras import layers # pylint: disable=unused-import\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import time_step\n\nfrom tensorflow.python.keras.engine import network as keras_network # TF internal\nfrom tensorflow.python.util import tf_decorator # TF internal\nfrom tensorflow.python.util import tf_inspect # TF internal\n\n\nclass _NetworkMeta(abc.ABCMeta):\n \"\"\"Meta class for Network object.\n\n We mainly use this class to capture all args to `__init__` of all `Network`\n instances, and store them in `instance._saved_kwargs`. This in turn is\n used by the `instance.copy` method.\n \"\"\"\n\n def __new__(mcs, classname, baseclasses, attrs):\n \"\"\"Control the creation of subclasses of the Network class.\n\n Args:\n classname: The name of the subclass being created.\n baseclasses: A tuple of parent classes.\n attrs: A dict mapping new attributes to their values.\n\n Returns:\n The class object.\n\n Raises:\n RuntimeError: if the class __init__ has *args in its signature.\n \"\"\"\n if baseclasses[0] == keras_network.Network:\n # This is just Network below. Return early.\n return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)\n\n init = attrs.get(\"__init__\", None)\n\n if not init:\n # This wrapper class does not define an __init__. When someone creates\n # the object, the __init__ of its parent class will be called. We will\n # call that __init__ instead separately since the parent class is also a\n # subclass of Network. Here just create the class and return.\n return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)\n\n arg_spec = tf_inspect.getargspec(init)\n if arg_spec.varargs is not None:\n raise RuntimeError(\n \"%s.__init__ function accepts *args. This is not allowed.\" %\n classname)\n\n def capture_init(self, *args, **kwargs):\n if len(args) > len(arg_spec.args) + 1:\n # Error case: more inputs than args. Call init so that the appropriate\n # error can be raised to the user.\n init(self, *args, **kwargs)\n for i, arg in enumerate(args):\n # Add +1 to skip `self` in arg_spec.args.\n kwargs[arg_spec.args[1 + i]] = arg\n init(self, **kwargs)\n setattr(self, \"_saved_kwargs\", kwargs)\n\n attrs[\"__init__\"] = tf_decorator.make_decorator(init, capture_init)\n return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)\n\n\[email protected]_metaclass(_NetworkMeta)\nclass Network(keras_network.Network):\n \"\"\"Base extension to Keras network to simplify copy operations.\"\"\"\n\n def __init__(self, input_tensor_spec, state_spec, name):\n super(Network, self).__init__(name=name)\n self._input_tensor_spec = input_tensor_spec\n self._state_spec = state_spec\n\n @property\n def state_spec(self):\n return self._state_spec\n\n def _build(self):\n if not self.built and self.input_tensor_spec is not None:\n random_input = tensor_spec.sample_spec_nest(\n self.input_tensor_spec, outer_dims=(1,))\n step_type = tf.expand_dims(time_step.StepType.FIRST, 0)\n self.__call__(random_input, step_type, None)\n\n @property\n def input_tensor_spec(self):\n \"\"\"Returns the spec of the input to the network of type InputSpec.\"\"\"\n return self._input_tensor_spec\n\n @property\n def variables(self):\n \"\"\"Return the variables for all the network layers.\n\n If the network hasn't been built, builds it on random input (generated\n using self._input_tensor_spec) to build all the layers and their variables.\n\n Raises:\n ValueError: If the network fails to build.\n \"\"\"\n try:\n self._build()\n except ValueError as e:\n traceback = sys.exc_info()[2]\n six.reraise(\n ValueError, \"Failed to call build on the network when accessing \"\n \"variables. Message: {!r}.\".format(e), traceback)\n return self.weights\n\n def copy(self, **kwargs):\n \"\"\"Create a shallow copy of this network.\n\n **NOTE** Network layer weights are *never* copied. This method recreates\n the `Network` instance with the same arguments it was initialized with\n (excepting any new kwargs).\n\n Args:\n **kwargs: Args to override when recreating this network. Commonly\n overridden args include 'name'.\n\n Returns:\n A shallow copy of this network.\n \"\"\"\n return type(self)(**dict(self._saved_kwargs, **kwargs))\n\n def __call__(self, inputs, *args, **kwargs):\n tf.nest.assert_same_structure(inputs, self.input_tensor_spec)\n return super(Network, self).__call__(inputs, *args, **kwargs)\n\n\nclass DistributionNetwork(Network):\n \"\"\"Base class for networks which generate Distributions as their output.\"\"\"\n\n def __init__(self, input_tensor_spec, state_spec, output_spec, name):\n super(DistributionNetwork, self).__init__(\n input_tensor_spec=input_tensor_spec, state_spec=state_spec, name=name)\n self._output_spec = output_spec\n\n @property\n def output_spec(self):\n return self._output_spec\n",
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A Categorical DQN Agent.\n\nImplements the Categorical DQN agent from\n\n\"A Distributional Perspective on Reinforcement Learning\"\n Bellemare et al., 2017\n https://arxiv.org/abs/1707.06887\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gin\nimport tensorflow as tf\n\nfrom tf_agents.agents import tf_agent\nfrom tf_agents.agents.dqn import dqn_agent\nfrom tf_agents.networks import utils\nfrom tf_agents.policies import boltzmann_policy\nfrom tf_agents.policies import categorical_q_policy\nfrom tf_agents.policies import epsilon_greedy_policy\nfrom tf_agents.policies import greedy_policy\nfrom tf_agents.utils import common\nfrom tf_agents.utils import nest_utils\nfrom tf_agents.utils import value_ops\n\n\[email protected]\nclass CategoricalDqnAgent(dqn_agent.DqnAgent):\n \"\"\"A Categorical DQN Agent based on the DQN Agent.\"\"\"\n\n def __init__(self,\n time_step_spec,\n action_spec,\n categorical_q_network,\n optimizer,\n min_q_value=-10.0,\n max_q_value=10.0,\n epsilon_greedy=0.1,\n n_step_update=1,\n boltzmann_temperature=None,\n # Params for target network updates\n target_update_tau=1.0,\n target_update_period=1,\n # Params for training.\n td_errors_loss_fn=None,\n gamma=1.0,\n reward_scale_factor=1.0,\n gradient_clipping=None,\n # Params for debugging\n debug_summaries=False,\n summarize_grads_and_vars=False,\n train_step_counter=None,\n name=None):\n \"\"\"Creates a Categorical DQN Agent.\n\n Args:\n time_step_spec: A `TimeStep` spec of the expected time_steps.\n action_spec: A `BoundedTensorSpec` representing the actions.\n categorical_q_network: A categorical_q_network.CategoricalQNetwork that\n returns the q_distribution for each action.\n optimizer: The optimizer to use for training.\n min_q_value: A float specifying the minimum Q-value, used for setting up\n the support.\n max_q_value: A float specifying the maximum Q-value, used for setting up\n the support.\n epsilon_greedy: probability of choosing a random action in the default\n epsilon-greedy collect policy (used only if a wrapper is not provided to\n the collect_policy method).\n n_step_update: The number of steps to consider when computing TD error and\n TD loss. Defaults to single-step updates. Note that this requires the\n user to call train on Trajectory objects with a time dimension of\n `n_step_update + 1`. However, note that we do not yet support\n `n_step_update > 1` in the case of RNNs (i.e., non-empty\n `q_network.state_spec`).\n boltzmann_temperature: Temperature value to use for Boltzmann sampling of\n the actions during data collection. The closer to 0.0, the higher the\n probability of choosing the best action.\n target_update_tau: Factor for soft update of the target networks.\n target_update_period: Period for soft update of the target networks.\n td_errors_loss_fn: A function for computing the TD errors loss. If None, a\n default value of element_wise_huber_loss is used. This function takes as\n input the target and the estimated Q values and returns the loss for\n each element of the batch.\n gamma: A discount factor for future rewards.\n reward_scale_factor: Multiplicative scale for the reward.\n gradient_clipping: Norm length to clip gradients.\n debug_summaries: A bool to gather debug summaries.\n summarize_grads_and_vars: If True, gradient and network variable summaries\n will be written during training.\n train_step_counter: An optional counter to increment every time the train\n op is run. Defaults to the global_step.\n name: The name of this agent. All variables in this module will fall\n under that name. Defaults to the class name.\n\n Raises:\n TypeError: If the action spec contains more than one action.\n \"\"\"\n num_atoms = getattr(categorical_q_network, 'num_atoms', None)\n if num_atoms is None:\n raise TypeError('Expected categorical_q_network to have property '\n '`num_atoms`, but it doesn\\'t (note: you likely want to '\n 'use a CategoricalQNetwork). Network is: %s' %\n (categorical_q_network,))\n\n self._num_atoms = num_atoms\n self._min_q_value = min_q_value\n self._max_q_value = max_q_value\n self._support = tf.linspace(min_q_value, max_q_value, num_atoms)\n\n super(CategoricalDqnAgent, self).__init__(\n time_step_spec,\n action_spec,\n categorical_q_network,\n optimizer,\n epsilon_greedy=epsilon_greedy,\n n_step_update=n_step_update,\n boltzmann_temperature=boltzmann_temperature,\n target_update_tau=target_update_tau,\n target_update_period=target_update_period,\n td_errors_loss_fn=td_errors_loss_fn,\n gamma=gamma,\n reward_scale_factor=reward_scale_factor,\n gradient_clipping=gradient_clipping,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=train_step_counter,\n name=name)\n\n policy = categorical_q_policy.CategoricalQPolicy(\n min_q_value,\n max_q_value,\n self._q_network,\n self._action_spec)\n if boltzmann_temperature is not None:\n self._collect_policy = boltzmann_policy.BoltzmannPolicy(\n policy, temperature=self._boltzmann_temperature)\n else:\n self._collect_policy = epsilon_greedy_policy.EpsilonGreedyPolicy(\n policy, epsilon=self._epsilon_greedy)\n self._policy = greedy_policy.GreedyPolicy(policy)\n\n def _loss(self,\n experience,\n td_errors_loss_fn=tf.losses.huber_loss,\n gamma=1.0,\n reward_scale_factor=1.0,\n weights=None):\n \"\"\"Computes critic loss for CategoricalDQN training.\n\n See Algorithm 1 and the discussion immediately preceding it in page 6 of\n \"A Distributional Perspective on Reinforcement Learning\"\n Bellemare et al., 2017\n https://arxiv.org/abs/1707.06887\n\n Args:\n experience: A batch of experience data in the form of a `Trajectory`. The\n structure of `experience` must match that of `self.policy.step_spec`.\n All tensors in `experience` must be shaped `[batch, time, ...]` where\n `time` must be equal to `self.required_experience_time_steps` if that\n property is not `None`.\n td_errors_loss_fn: A function(td_targets, predictions) to compute loss.\n gamma: Discount for future rewards.\n reward_scale_factor: Multiplicative factor to scale rewards.\n weights: Optional weights used for importance sampling.\n Returns:\n critic_loss: A scalar critic loss.\n Raises:\n ValueError:\n if the number of actions is greater than 1.\n \"\"\"\n # Check that `experience` includes two outer dimensions [B, T, ...]. This\n # method requires a time dimension to compute the loss properly.\n self._check_trajectory_dimensions(experience)\n\n if self._n_step_update == 1:\n time_steps, actions, next_time_steps = self._experience_to_transitions(\n experience)\n else:\n # To compute n-step returns, we need the first time steps, the first\n # actions, and the last time steps. Therefore we extract the first and\n # last transitions from our Trajectory.\n first_two_steps = tf.nest.map_structure(lambda x: x[:, :2], experience)\n last_two_steps = tf.nest.map_structure(lambda x: x[:, -2:], experience)\n time_steps, actions, _ = self._experience_to_transitions(first_two_steps)\n _, _, next_time_steps = self._experience_to_transitions(last_two_steps)\n\n with tf.name_scope('critic_loss'):\n tf.nest.assert_same_structure(actions, self.action_spec)\n tf.nest.assert_same_structure(time_steps, self.time_step_spec)\n tf.nest.assert_same_structure(next_time_steps, self.time_step_spec)\n\n rank = nest_utils.get_outer_rank(time_steps.observation,\n self._time_step_spec.observation)\n\n # If inputs have a time dimension and the q_network is stateful,\n # combine the batch and time dimension.\n batch_squash = (None\n if rank <= 1 or self._q_network.state_spec in ((), None)\n else utils.BatchSquash(rank))\n\n # q_logits contains the Q-value logits for all actions.\n q_logits, _ = self._q_network(time_steps.observation,\n time_steps.step_type)\n next_q_distribution = self._next_q_distribution(next_time_steps,\n batch_squash)\n\n if batch_squash is not None:\n # Squash outer dimensions to a single dimensions for facilitation\n # computing the loss the following. Required for supporting temporal\n # inputs, for example.\n q_logits = batch_squash.flatten(q_logits)\n actions = batch_squash.flatten(actions)\n next_time_steps = tf.nest.map_structure(batch_squash.flatten,\n next_time_steps)\n\n actions = tf.nest.flatten(actions)[0]\n if actions.shape.ndims > 1:\n actions = tf.squeeze(actions, range(1, actions.shape.ndims))\n\n # Project the sample Bellman update \\hat{T}Z_{\\theta} onto the original\n # support of Z_{\\theta} (see Figure 1 in paper).\n batch_size = tf.shape(q_logits)[0]\n tiled_support = tf.tile(self._support, [batch_size])\n tiled_support = tf.reshape(tiled_support, [batch_size, self._num_atoms])\n\n if self._n_step_update == 1:\n discount = next_time_steps.discount\n if discount.shape.ndims == 1:\n # We expect discount to have a shape of [batch_size], while\n # tiled_support will have a shape of [batch_size, num_atoms]. To\n # multiply these, we add a second dimension of 1 to the discount.\n discount = discount[:, None]\n next_value_term = tf.multiply(discount,\n tiled_support,\n name='next_value_term')\n\n reward = next_time_steps.reward\n if reward.shape.ndims == 1:\n # See the explanation above.\n reward = reward[:, None]\n reward_term = tf.multiply(reward_scale_factor,\n reward,\n name='reward_term')\n\n target_support = tf.add(reward_term, gamma * next_value_term,\n name='target_support')\n else:\n # When computing discounted return, we need to throw out the last time\n # index of both reward and discount, which are filled with dummy values\n # to match the dimensions of the observation.\n rewards = reward_scale_factor * experience.reward[:, :-1]\n discounts = gamma * experience.discount[:, :-1]\n\n # TODO(b/134618876): Properly handle Trajectories that include episode\n # boundaries with nonzero discount.\n\n discounted_returns = value_ops.discounted_return(\n rewards=rewards,\n discounts=discounts,\n final_value=tf.zeros([batch_size], dtype=discounts.dtype),\n time_major=False,\n provide_all_returns=False)\n\n # Convert discounted_returns from [batch_size] to [batch_size, 1]\n discounted_returns = discounted_returns[:, None]\n\n final_value_discount = tf.reduce_prod(discounts, axis=1)\n final_value_discount = final_value_discount[:, None]\n\n # Save the values of discounted_returns and final_value_discount in\n # order to check them in unit tests.\n self._discounted_returns = discounted_returns\n self._final_value_discount = final_value_discount\n\n target_support = tf.add(discounted_returns,\n final_value_discount * tiled_support,\n name='target_support')\n\n target_distribution = tf.stop_gradient(project_distribution(\n target_support, next_q_distribution, self._support))\n\n # Obtain the current Q-value logits for the selected actions.\n indices = tf.range(tf.shape(q_logits)[0])[:, None]\n indices = tf.cast(indices, actions.dtype)\n reshaped_actions = tf.concat([indices, actions[:, None]], 1)\n chosen_action_logits = tf.gather_nd(q_logits, reshaped_actions)\n\n # Compute the cross-entropy loss between the logits. If inputs have\n # a time dimension, compute the sum over the time dimension before\n # computing the mean over the batch dimension.\n if batch_squash is not None:\n target_distribution = batch_squash.unflatten(target_distribution)\n chosen_action_logits = batch_squash.unflatten(chosen_action_logits)\n critic_loss = tf.reduce_mean(\n tf.reduce_sum(\n tf.nn.softmax_cross_entropy_with_logits_v2(\n labels=target_distribution,\n logits=chosen_action_logits),\n axis=1))\n else:\n critic_loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(\n labels=target_distribution,\n logits=chosen_action_logits))\n\n with tf.name_scope('Losses/'):\n tf.compat.v2.summary.scalar(\n 'critic_loss', critic_loss, step=self.train_step_counter)\n\n if self._debug_summaries:\n distribution_errors = target_distribution - chosen_action_logits\n with tf.name_scope('distribution_errors'):\n common.generate_tensor_summaries(\n 'distribution_errors', distribution_errors,\n step=self.train_step_counter)\n tf.compat.v2.summary.scalar(\n 'mean', tf.reduce_mean(distribution_errors),\n step=self.train_step_counter)\n tf.compat.v2.summary.scalar(\n 'mean_abs', tf.reduce_mean(tf.abs(distribution_errors)),\n step=self.train_step_counter)\n tf.compat.v2.summary.scalar(\n 'max', tf.reduce_max(distribution_errors),\n step=self.train_step_counter)\n tf.compat.v2.summary.scalar(\n 'min', tf.reduce_min(distribution_errors),\n step=self.train_step_counter)\n with tf.name_scope('target_distribution'):\n common.generate_tensor_summaries(\n 'target_distribution', target_distribution,\n step=self.train_step_counter)\n\n # TODO(b/127318640): Give appropriate values for td_loss and td_error for\n # prioritized replay.\n return tf_agent.LossInfo(critic_loss, dqn_agent.DqnLossInfo(td_loss=(),\n td_error=()))\n\n def _next_q_distribution(self, next_time_steps, batch_squash=None):\n \"\"\"Compute the q distribution of the next state for TD error computation.\n\n Args:\n next_time_steps: A batch of next timesteps\n batch_squash: An optional BatchSquash for squashing outer dimensions\n of a Q network, e.g. the time dimension of a recurrent categorical\n policy network.\n\n Returns:\n A [batch_size, num_atoms] tensor representing the Q-distribution for the\n next state.\n \"\"\"\n next_target_logits, _ = self._target_q_network(next_time_steps.observation,\n next_time_steps.step_type)\n if batch_squash is not None:\n next_target_logits = batch_squash.flatten(next_target_logits)\n\n next_target_probabilities = tf.nn.softmax(next_target_logits)\n next_target_q_values = tf.reduce_sum(\n self._support * next_target_probabilities, axis=-1)\n next_qt_argmax = tf.argmax(next_target_q_values, axis=-1)[:, None]\n batch_indices = tf.range(\n tf.to_int64(tf.shape(next_target_q_values)[0]))[:, None]\n next_qt_argmax = tf.concat([batch_indices, next_qt_argmax], axis=-1)\n return tf.gather_nd(next_target_probabilities, next_qt_argmax)\n\n\n# The following method is copied from the Dopamine codebase with permission\n# (https://github.com/google/dopamine). Thanks to Marc Bellemare and also to\n# Pablo Castro, who wrote the original version of this method.\ndef project_distribution(supports, weights, target_support,\n validate_args=False):\n \"\"\"Projects a batch of (support, weights) onto target_support.\n\n Based on equation (7) in (Bellemare et al., 2017):\n https://arxiv.org/abs/1707.06887\n In the rest of the comments we will refer to this equation simply as Eq7.\n\n This code is not easy to digest, so we will use a running example to clarify\n what is going on, with the following sample inputs:\n\n * supports = [[0, 2, 4, 6, 8],\n [1, 3, 4, 5, 6]]\n * weights = [[0.1, 0.6, 0.1, 0.1, 0.1],\n [0.1, 0.2, 0.5, 0.1, 0.1]]\n * target_support = [4, 5, 6, 7, 8]\n\n In the code below, comments preceded with 'Ex:' will be referencing the above\n values.\n\n Args:\n supports: Tensor of shape (batch_size, num_dims) defining supports for the\n distribution.\n weights: Tensor of shape (batch_size, num_dims) defining weights on the\n original support points. Although for the CategoricalDQN agent these\n weights are probabilities, it is not required that they are.\n target_support: Tensor of shape (num_dims) defining support of the projected\n distribution. The values must be monotonically increasing. Vmin and Vmax\n will be inferred from the first and last elements of this tensor,\n respectively. The values in this tensor must be equally spaced.\n validate_args: Whether we will verify the contents of the\n target_support parameter.\n\n Returns:\n A Tensor of shape (batch_size, num_dims) with the projection of a batch of\n (support, weights) onto target_support.\n\n Raises:\n ValueError: If target_support has no dimensions, or if shapes of supports,\n weights, and target_support are incompatible.\n \"\"\"\n target_support_deltas = target_support[1:] - target_support[:-1]\n # delta_z = `\\Delta z` in Eq7.\n delta_z = target_support_deltas[0]\n validate_deps = []\n supports.shape.assert_is_compatible_with(weights.shape)\n supports[0].shape.assert_is_compatible_with(target_support.shape)\n target_support.shape.assert_has_rank(1)\n if validate_args:\n # Assert that supports and weights have the same shapes.\n validate_deps.append(\n tf.Assert(\n tf.reduce_all(tf.equal(tf.shape(supports), tf.shape(weights))),\n [supports, weights]))\n # Assert that elements of supports and target_support have the same shape.\n validate_deps.append(\n tf.Assert(\n tf.reduce_all(\n tf.equal(tf.shape(supports)[1], tf.shape(target_support))),\n [supports, target_support]))\n # Assert that target_support has a single dimension.\n validate_deps.append(\n tf.Assert(\n tf.equal(tf.size(tf.shape(target_support)), 1), [target_support]))\n # Assert that the target_support is monotonically increasing.\n validate_deps.append(\n tf.Assert(tf.reduce_all(target_support_deltas > 0), [target_support]))\n # Assert that the values in target_support are equally spaced.\n validate_deps.append(\n tf.Assert(\n tf.reduce_all(tf.equal(target_support_deltas, delta_z)),\n [target_support]))\n\n with tf.control_dependencies(validate_deps):\n # Ex: `v_min, v_max = 4, 8`.\n v_min, v_max = target_support[0], target_support[-1]\n # Ex: `batch_size = 2`.\n batch_size = tf.shape(supports)[0]\n # `N` in Eq7.\n # Ex: `num_dims = 5`.\n num_dims = tf.shape(target_support)[0]\n # clipped_support = `[\\hat{T}_{z_j}]^{V_max}_{V_min}` in Eq7.\n # Ex: `clipped_support = [[[ 4. 4. 4. 6. 8.]]\n # [[ 4. 4. 4. 5. 6.]]]`.\n clipped_support = tf.clip_by_value(supports, v_min, v_max)[:, None, :]\n # Ex: `tiled_support = [[[[ 4. 4. 4. 6. 8.]\n # [ 4. 4. 4. 6. 8.]\n # [ 4. 4. 4. 6. 8.]\n # [ 4. 4. 4. 6. 8.]\n # [ 4. 4. 4. 6. 8.]]\n # [[ 4. 4. 4. 5. 6.]\n # [ 4. 4. 4. 5. 6.]\n # [ 4. 4. 4. 5. 6.]\n # [ 4. 4. 4. 5. 6.]\n # [ 4. 4. 4. 5. 6.]]]]`.\n tiled_support = tf.tile([clipped_support], [1, 1, num_dims, 1])\n # Ex: `reshaped_target_support = [[[ 4.]\n # [ 5.]\n # [ 6.]\n # [ 7.]\n # [ 8.]]\n # [[ 4.]\n # [ 5.]\n # [ 6.]\n # [ 7.]\n # [ 8.]]]`.\n reshaped_target_support = tf.tile(target_support[:, None], [batch_size, 1])\n reshaped_target_support = tf.reshape(reshaped_target_support,\n [batch_size, num_dims, 1])\n # numerator = `|clipped_support - z_i|` in Eq7.\n # Ex: `numerator = [[[[ 0. 0. 0. 2. 4.]\n # [ 1. 1. 1. 1. 3.]\n # [ 2. 2. 2. 0. 2.]\n # [ 3. 3. 3. 1. 1.]\n # [ 4. 4. 4. 2. 0.]]\n # [[ 0. 0. 0. 1. 2.]\n # [ 1. 1. 1. 0. 1.]\n # [ 2. 2. 2. 1. 0.]\n # [ 3. 3. 3. 2. 1.]\n # [ 4. 4. 4. 3. 2.]]]]`.\n numerator = tf.abs(tiled_support - reshaped_target_support)\n quotient = 1 - (numerator / delta_z)\n # clipped_quotient = `[1 - numerator / (\\Delta z)]_0^1` in Eq7.\n # Ex: `clipped_quotient = [[[[ 1. 1. 1. 0. 0.]\n # [ 0. 0. 0. 0. 0.]\n # [ 0. 0. 0. 1. 0.]\n # [ 0. 0. 0. 0. 0.]\n # [ 0. 0. 0. 0. 1.]]\n # [[ 1. 1. 1. 0. 0.]\n # [ 0. 0. 0. 1. 0.]\n # [ 0. 0. 0. 0. 1.]\n # [ 0. 0. 0. 0. 0.]\n # [ 0. 0. 0. 0. 0.]]]]`.\n clipped_quotient = tf.clip_by_value(quotient, 0, 1)\n # Ex: `weights = [[ 0.1 0.6 0.1 0.1 0.1]\n # [ 0.1 0.2 0.5 0.1 0.1]]`.\n weights = weights[:, None, :]\n # inner_prod = `\\sum_{j=0}^{N-1} clipped_quotient * p_j(x', \\pi(x'))`\n # in Eq7.\n # Ex: `inner_prod = [[[[ 0.1 0.6 0.1 0. 0. ]\n # [ 0. 0. 0. 0. 0. ]\n # [ 0. 0. 0. 0.1 0. ]\n # [ 0. 0. 0. 0. 0. ]\n # [ 0. 0. 0. 0. 0.1]]\n # [[ 0.1 0.2 0.5 0. 0. ]\n # [ 0. 0. 0. 0.1 0. ]\n # [ 0. 0. 0. 0. 0.1]\n # [ 0. 0. 0. 0. 0. ]\n # [ 0. 0. 0. 0. 0. ]]]]`.\n inner_prod = clipped_quotient * weights\n # Ex: `projection = [[ 0.8 0.0 0.1 0.0 0.1]\n # [ 0.8 0.1 0.1 0.0 0.0]]`.\n projection = tf.reduce_sum(inner_prod, 3)\n projection = tf.reshape(projection, [batch_size, num_dims])\n return projection\n",
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Simple Policy for DQN.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gin\nimport tensorflow as tf\n\nfrom tf_agents.distributions import shifted_categorical\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.trajectories import policy_step\n\n\[email protected]\nclass QPolicy(tf_policy.Base):\n \"\"\"Class to build Q-Policies.\"\"\"\n\n def __init__(self,\n time_step_spec,\n action_spec,\n q_network,\n emit_log_probability=False,\n name=None):\n \"\"\"Builds a Q-Policy given a q_network.\n\n Args:\n time_step_spec: A `TimeStep` spec of the expected time_steps.\n action_spec: A nest of BoundedTensorSpec representing the actions.\n q_network: An instance of a `tf_agents.network.Network`,\n callable via `network(observation, step_type) -> (output, final_state)`.\n emit_log_probability: Whether to emit log-probs in info of `PolicyStep`.\n name: The name of this policy. All variables in this module will fall\n under that name. Defaults to the class name.\n\n Raises:\n ValueError: If `q_network.action_spec` exists and is not compatible with\n `action_spec`.\n NotImplementedError: If `action_spec` contains more than one\n `BoundedTensorSpec`.\n \"\"\"\n network_action_spec = getattr(q_network, 'action_spec', None)\n\n if network_action_spec is not None:\n if not action_spec.is_compatible_with(network_action_spec):\n raise ValueError(\n 'action_spec must be compatible with q_network.action_spec; '\n 'instead got action_spec=%s, q_network.action_spec=%s' % (\n action_spec, network_action_spec))\n\n flat_action_spec = tf.nest.flatten(action_spec)\n if len(flat_action_spec) > 1:\n raise NotImplementedError(\n 'action_spec can only contain a single BoundedTensorSpec.')\n # We need to maintain the flat action spec for dtype, shape and range.\n self._flat_action_spec = flat_action_spec[0]\n self._q_network = q_network\n super(QPolicy, self).__init__(\n time_step_spec,\n action_spec,\n policy_state_spec=q_network.state_spec,\n clip=False,\n emit_log_probability=emit_log_probability,\n name=name)\n\n def _variables(self):\n return self._q_network.variables\n\n def _distribution(self, time_step, policy_state):\n # In DQN, we always either take a uniformly random action, or the action\n # with the highest Q-value. However, to support more complicated policies,\n # we expose all Q-values as a categorical distribution with Q-values as\n # logits, and apply the GreedyPolicy wrapper in dqn_agent.py to select the\n # action with the highest Q-value.\n q_values, policy_state = self._q_network(\n time_step.observation, time_step.step_type, policy_state)\n\n # TODO(b/122314058): Validate and enforce that sampling distributions\n # created with the q_network logits generate the right action shapes. This\n # is curretly patching the problem.\n\n # If the action spec says each action should be shaped (1,), add another\n # dimension so the final shape is (B, 1, A), where A is the number of\n # actions. This will make Categorical emit events shaped (B, 1) rather than\n # (B,). Using axis -2 to allow for (B, T, 1, A) shaped q_values.\n if self._flat_action_spec.shape.ndims == 1:\n q_values = tf.expand_dims(q_values, -2)\n\n # TODO(kbanoop): Handle distributions over nests.\n distribution = shifted_categorical.ShiftedCategorical(\n logits=q_values,\n dtype=self._flat_action_spec.dtype,\n shift=self._flat_action_spec.minimum)\n distribution = tf.nest.pack_sequence_as(self._action_spec, [distribution])\n return policy_step.PolicyStep(distribution, policy_state)\n"
] | [
[
"tensorflow.python.util.tf_decorator.make_decorator",
"tensorflow.nest.assert_same_structure",
"tensorflow.expand_dims",
"tensorflow.python.util.tf_inspect.getargspec"
],
[
"tensorflow.concat",
"tensorflow.nest.assert_same_structure",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.abs",
"tensorflow.nest.flatten",
"tensorflow.linspace",
"tensorflow.add",
"tensorflow.name_scope",
"tensorflow.argmax",
"tensorflow.tile",
"tensorflow.gather_nd",
"tensorflow.shape",
"tensorflow.compat.v2.summary.scalar",
"tensorflow.reduce_prod",
"tensorflow.clip_by_value",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.multiply",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.reduce_min",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.reduce_all",
"tensorflow.nest.map_structure"
],
[
"tensorflow.expand_dims",
"tensorflow.nest.flatten",
"tensorflow.nest.pack_sequence_as"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ryohachiuma/DFU-challenge | [
"08401bfde9bcb1abcb32ef060e89b8c135e7f3f1"
] | [
"mmdet/datasets/custom.py"
] | [
"import os.path as osp\n\nimport mmcv\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom mmdet.core import eval_map, eval_recalls\nfrom .builder import DATASETS\nfrom .pipelines import Compose\n\n\[email protected]_module()\nclass CustomDataset(Dataset):\n \"\"\"Custom dataset for detection.\n\n The annotation format is shown as follows. The `ann` field is optional for\n testing.\n\n .. code-block:: none\n\n [\n {\n 'filename': 'a.jpg',\n 'width': 1280,\n 'height': 720,\n 'ann': {\n 'bboxes': <np.ndarray> (n, 4),\n 'labels': <np.ndarray> (n, ),\n 'bboxes_ignore': <np.ndarray> (k, 4), (optional field)\n 'labels_ignore': <np.ndarray> (k, 4) (optional field)\n }\n },\n ...\n ]\n \"\"\"\n\n CLASSES = None\n\n def __init__(self,\n ann_file,\n pipeline,\n classes=None,\n data_root=None,\n img_prefix='',\n seg_prefix=None,\n proposal_file=None,\n test_mode=False,\n filter_empty_gt=True):\n self.ann_file = ann_file\n self.data_root = data_root\n self.img_prefix = img_prefix\n self.seg_prefix = seg_prefix\n self.proposal_file = proposal_file\n self.test_mode = test_mode\n self.filter_empty_gt = filter_empty_gt\n self.CLASSES = self.get_classes(classes)\n\n # join paths if data_root is specified\n if self.data_root is not None:\n if not osp.isabs(self.ann_file):\n self.ann_file = osp.join(self.data_root, self.ann_file)\n if not (self.img_prefix is None or osp.isabs(self.img_prefix)):\n self.img_prefix = osp.join(self.data_root, self.img_prefix)\n if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):\n self.seg_prefix = osp.join(self.data_root, self.seg_prefix)\n if not (self.proposal_file is None\n or osp.isabs(self.proposal_file)):\n self.proposal_file = osp.join(self.data_root,\n self.proposal_file)\n # load annotations (and proposals)\n self.data_infos = self.load_annotations(self.ann_file)\n # filter data infos if classes are customized\n if self.custom_classes:\n self.data_infos = self.get_subset_by_classes()\n\n if self.proposal_file is not None:\n self.proposals = self.load_proposals(self.proposal_file)\n else:\n self.proposals = None\n\n # set group flag for the sampler\n if not self.test_mode:\n self._set_group_flag()\n # processing pipeline\n self.pipeline = Compose(pipeline)\n\n def __len__(self):\n return len(self.data_infos)\n\n def load_annotations(self, ann_file):\n return mmcv.load(ann_file)\n\n def load_proposals(self, proposal_file):\n return mmcv.load(proposal_file)\n\n def get_ann_info(self, idx):\n return self.data_infos[idx]['ann']\n\n def get_cat_ids(self, idx):\n return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()\n\n def pre_pipeline(self, results):\n results['img_prefix'] = self.img_prefix\n results['seg_prefix'] = self.seg_prefix\n results['proposal_file'] = self.proposal_file\n results['bbox_fields'] = []\n results['mask_fields'] = []\n results['seg_fields'] = []\n\n def _filter_imgs(self, min_size=32):\n \"\"\"Filter images too small.\"\"\"\n valid_inds = []\n for i, img_info in enumerate(self.data_infos):\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds\n\n def _set_group_flag(self):\n \"\"\"Set flag according to image aspect ratio.\n\n Images with aspect ratio greater than 1 will be set as group 1,\n otherwise group 0.\n \"\"\"\n self.flag = np.zeros(len(self), dtype=np.uint8)\n for i in range(len(self)):\n img_info = self.data_infos[i]\n if img_info['width'] / img_info['height'] > 1:\n self.flag[i] = 1\n\n def _rand_another(self, idx):\n pool = np.where(self.flag == self.flag[idx])[0]\n return np.random.choice(pool)\n\n def __getitem__(self, idx):\n #if self.test_mode:\n if 0:\n return self.prepare_test_img(idx)\n while True:\n data = self.prepare_train_img(idx)\n if data is None:\n idx = self._rand_another(idx)\n continue\n return data\n\n def prepare_train_img(self, idx):\n img_info = self.data_infos[idx]\n ann_info = self.get_ann_info(idx)\n results = dict(img_info=img_info, ann_info=ann_info)\n if self.proposals is not None:\n results['proposals'] = self.proposals[idx]\n self.pre_pipeline(results)\n return self.pipeline(results)\n\n def prepare_test_img(self, idx):\n img_info = self.data_infos[idx]\n results = dict(img_info=img_info)\n if self.proposals is not None:\n results['proposals'] = self.proposals[idx]\n self.pre_pipeline(results)\n return self.pipeline(results)\n\n @classmethod\n def get_classes(cls, classes=None):\n \"\"\"Get class names of current dataset\n\n Args:\n classes (Sequence[str] | str | None): If classes is None, use\n default CLASSES defined by builtin dataset. If classes is a\n string, take it as a file name. The file contains the name of\n classes where each line contains one class name. If classes is\n a tuple or list, override the CLASSES defined by the dataset.\n\n \"\"\"\n if classes is None:\n cls.custom_classes = False\n return cls.CLASSES\n\n cls.custom_classes = True\n if isinstance(classes, str):\n # take it as a file path\n class_names = mmcv.list_from_file(classes)\n elif isinstance(classes, (tuple, list)):\n class_names = classes\n else:\n raise ValueError(f'Unsupported type {type(classes)} of classes.')\n\n return class_names\n\n def get_subset_by_classes(self):\n return self.data_infos\n\n def format_results(self, results, **kwargs):\n pass\n\n def evaluate(self,\n results,\n metric='mAP',\n logger=None,\n proposal_nums=(100, 300, 1000),\n iou_thr=0.5,\n scale_ranges=None):\n \"\"\"Evaluate the dataset.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Defaault: None.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thr (float | list[float]): IoU threshold. It must be a float\n when evaluating mAP, and can be a list when evaluating recall.\n Default: 0.5.\n scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.\n Default: None.\n \"\"\"\n if not isinstance(metric, str):\n assert len(metric) == 1\n metric = metric[0]\n allowed_metrics = ['mAP', 'recall']\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n annotations = [self.get_ann_info(i) for i in range(len(self))]\n\n eval_results = {}\n if metric == 'mAP':\n assert isinstance(iou_thr, float)\n mean_ap, _ = eval_map(\n results,\n annotations,\n scale_ranges=scale_ranges,\n iou_thr=iou_thr,\n dataset=self.CLASSES,\n logger=logger)\n eval_results['mAP'] = mean_ap\n elif metric == 'recall':\n gt_bboxes = [ann['bboxes'] for ann in annotations]\n if isinstance(iou_thr, float):\n iou_thr = [iou_thr]\n recalls = eval_recalls(\n gt_bboxes, results, proposal_nums, iou_thr, logger=logger)\n for i, num in enumerate(proposal_nums):\n for j, iou in enumerate(iou_thr):\n eval_results[f'recall@{num}@{iou}'] = recalls[i, j]\n if recalls.shape[1] > 1:\n ar = recalls.mean(axis=1)\n for i, num in enumerate(proposal_nums):\n eval_results[f'AR@{num}'] = ar[i]\n\n return eval_results\n"
] | [
[
"numpy.where",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vanillagorillaa/rednose | [
"7e41d39b71f7888875a2fbf9cea770eabe0a8128"
] | [
"examples/live_kf.py"
] | [
"#!/usr/bin/env python3\nimport sys\nimport numpy as np\nimport sympy as sp\n\nfrom rednose.helpers import KalmanError\nfrom rednose.helpers.ekf_sym import EKF_sym, gen_code\nfrom rednose.helpers.sympy_helpers import (euler_rotate, quat_matrix_r, quat_rotate)\n\nEARTH_GM = 3.986005e14 # m^3/s^2 (gravitational constant * mass of earth)\n\n\nclass ObservationKind():\n UNKNOWN = 0\n NO_OBSERVATION = 1\n GPS_NED = 2\n ODOMETRIC_SPEED = 3\n PHONE_GYRO = 4\n GPS_VEL = 5\n PSEUDORANGE_GPS = 6\n PSEUDORANGE_RATE_GPS = 7\n SPEED = 8\n NO_ROT = 9\n PHONE_ACCEL = 10\n ORB_POINT = 11\n ECEF_POS = 12\n CAMERA_ODO_TRANSLATION = 13\n CAMERA_ODO_ROTATION = 14\n ORB_FEATURES = 15\n MSCKF_TEST = 16\n FEATURE_TRACK_TEST = 17\n LANE_PT = 18\n IMU_FRAME = 19\n PSEUDORANGE_GLONASS = 20\n PSEUDORANGE_RATE_GLONASS = 21\n PSEUDORANGE = 22\n PSEUDORANGE_RATE = 23\n\n names = [\n 'Unknown',\n 'No observation',\n 'GPS NED',\n 'Odometric speed',\n 'Phone gyro',\n 'GPS velocity',\n 'GPS pseudorange',\n 'GPS pseudorange rate',\n 'Speed',\n 'No rotation',\n 'Phone acceleration',\n 'ORB point',\n 'ECEF pos',\n 'camera odometric translation',\n 'camera odometric rotation',\n 'ORB features',\n 'MSCKF test',\n 'Feature track test',\n 'Lane ecef point',\n 'imu frame eulers',\n 'GLONASS pseudorange',\n 'GLONASS pseudorange rate',\n ]\n\n @classmethod\n def to_string(cls, kind):\n return cls.names[kind]\n\n\nclass States():\n ECEF_POS = slice(0, 3) # x, y and z in ECEF in meters\n ECEF_ORIENTATION = slice(3, 7) # quat for pose of phone in ecef\n ECEF_VELOCITY = slice(7, 10) # ecef velocity in m/s\n ANGULAR_VELOCITY = slice(10, 13) # roll, pitch and yaw rates in device frame in radians/s\n GYRO_BIAS = slice(13, 16) # roll, pitch and yaw biases\n ODO_SCALE = slice(16, 17) # odometer scale\n ACCELERATION = slice(17, 20) # Acceleration in device frame in m/s**2\n IMU_OFFSET = slice(20, 23) # imu offset angles in radians\n\n # Error-state has different slices because it is an ESKF\n ECEF_POS_ERR = slice(0, 3)\n ECEF_ORIENTATION_ERR = slice(3, 6) # euler angles for orientation error\n ECEF_VELOCITY_ERR = slice(6, 9)\n ANGULAR_VELOCITY_ERR = slice(9, 12)\n GYRO_BIAS_ERR = slice(12, 15)\n ODO_SCALE_ERR = slice(15, 16)\n ACCELERATION_ERR = slice(16, 19)\n IMU_OFFSET_ERR = slice(19, 22)\n\n\nclass LiveKalman():\n name = 'live'\n\n initial_x = np.array([-2.7e6, 4.2e6, 3.8e6,\n 1, 0, 0, 0,\n 0, 0, 0,\n 0, 0, 0,\n 0, 0, 0,\n 1,\n 0, 0, 0,\n 0, 0, 0])\n\n # state covariance\n initial_P_diag = np.array([10000**2, 10000**2, 10000**2,\n 10**2, 10**2, 10**2,\n 10**2, 10**2, 10**2,\n 1**2, 1**2, 1**2,\n 0.05**2, 0.05**2, 0.05**2,\n 0.02**2,\n 1**2, 1**2, 1**2,\n (0.01)**2, (0.01)**2, (0.01)**2])\n\n # process noise\n Q = np.diag([0.03**2, 0.03**2, 0.03**2,\n 0.0**2, 0.0**2, 0.0**2,\n 0.0**2, 0.0**2, 0.0**2,\n 0.1**2, 0.1**2, 0.1**2,\n (0.005 / 100)**2, (0.005 / 100)**2, (0.005 / 100)**2,\n (0.02 / 100)**2,\n 3**2, 3**2, 3**2,\n (0.05 / 60)**2, (0.05 / 60)**2, (0.05 / 60)**2])\n\n @staticmethod\n def generate_code(generated_dir):\n name = LiveKalman.name\n dim_state = LiveKalman.initial_x.shape[0]\n dim_state_err = LiveKalman.initial_P_diag.shape[0]\n\n state_sym = sp.MatrixSymbol('state', dim_state, 1)\n state = sp.Matrix(state_sym)\n x, y, z = state[States.ECEF_POS, :]\n q = state[States.ECEF_ORIENTATION, :]\n v = state[States.ECEF_VELOCITY, :]\n vx, vy, vz = v\n omega = state[States.ANGULAR_VELOCITY, :]\n vroll, vpitch, vyaw = omega\n roll_bias, pitch_bias, yaw_bias = state[States.GYRO_BIAS, :]\n odo_scale = state[States.ODO_SCALE, :][0,:]\n acceleration = state[States.ACCELERATION, :]\n imu_angles = state[States.IMU_OFFSET, :]\n\n dt = sp.Symbol('dt')\n\n # calibration and attitude rotation matrices\n quat_rot = quat_rotate(*q)\n\n # Got the quat predict equations from here\n # A New Quaternion-Based Kalman Filter for\n # Real-Time Attitude Estimation Using the Two-Step\n # Geometrically-Intuitive Correction Algorithm\n A = 0.5 * sp.Matrix([[0, -vroll, -vpitch, -vyaw],\n [vroll, 0, vyaw, -vpitch],\n [vpitch, -vyaw, 0, vroll],\n [vyaw, vpitch, -vroll, 0]])\n q_dot = A * q\n\n # Time derivative of the state as a function of state\n state_dot = sp.Matrix(np.zeros((dim_state, 1)))\n state_dot[States.ECEF_POS, :] = v\n state_dot[States.ECEF_ORIENTATION, :] = q_dot\n state_dot[States.ECEF_VELOCITY, 0] = quat_rot * acceleration\n\n # Basic descretization, 1st order intergrator\n # Can be pretty bad if dt is big\n f_sym = state + dt * state_dot\n\n state_err_sym = sp.MatrixSymbol('state_err', dim_state_err, 1)\n state_err = sp.Matrix(state_err_sym)\n quat_err = state_err[States.ECEF_ORIENTATION_ERR, :]\n v_err = state_err[States.ECEF_VELOCITY_ERR, :]\n omega_err = state_err[States.ANGULAR_VELOCITY_ERR, :]\n acceleration_err = state_err[States.ACCELERATION_ERR, :]\n\n # Time derivative of the state error as a function of state error and state\n quat_err_matrix = euler_rotate(quat_err[0], quat_err[1], quat_err[2])\n q_err_dot = quat_err_matrix * quat_rot * (omega + omega_err)\n state_err_dot = sp.Matrix(np.zeros((dim_state_err, 1)))\n state_err_dot[States.ECEF_POS_ERR, :] = v_err\n state_err_dot[States.ECEF_ORIENTATION_ERR, :] = q_err_dot\n state_err_dot[States.ECEF_VELOCITY_ERR, :] = quat_err_matrix * quat_rot * (acceleration + acceleration_err)\n f_err_sym = state_err + dt * state_err_dot\n\n # Observation matrix modifier\n H_mod_sym = sp.Matrix(np.zeros((dim_state, dim_state_err)))\n H_mod_sym[States.ECEF_POS, States.ECEF_POS_ERR] = np.eye(States.ECEF_POS.stop - States.ECEF_POS.start)\n H_mod_sym[States.ECEF_ORIENTATION, States.ECEF_ORIENTATION_ERR] = 0.5 * quat_matrix_r(state[3:7])[:, 1:]\n H_mod_sym[States.ECEF_ORIENTATION.stop:, States.ECEF_ORIENTATION_ERR.stop:] = np.eye(dim_state - States.ECEF_ORIENTATION.stop)\n\n # these error functions are defined so that say there\n # is a nominal x and true x:\n # true x = err_function(nominal x, delta x)\n # delta x = inv_err_function(nominal x, true x)\n nom_x = sp.MatrixSymbol('nom_x', dim_state, 1)\n true_x = sp.MatrixSymbol('true_x', dim_state, 1)\n delta_x = sp.MatrixSymbol('delta_x', dim_state_err, 1)\n\n err_function_sym = sp.Matrix(np.zeros((dim_state, 1)))\n delta_quat = sp.Matrix(np.ones((4)))\n delta_quat[1:, :] = sp.Matrix(0.5 * delta_x[States.ECEF_ORIENTATION_ERR, :])\n err_function_sym[States.ECEF_POS, :] = sp.Matrix(nom_x[States.ECEF_POS, :] + delta_x[States.ECEF_POS_ERR, :])\n err_function_sym[States.ECEF_ORIENTATION, 0] = quat_matrix_r(nom_x[States.ECEF_ORIENTATION, 0]) * delta_quat\n err_function_sym[States.ECEF_ORIENTATION.stop:, :] = sp.Matrix(nom_x[States.ECEF_ORIENTATION.stop:, :] + delta_x[States.ECEF_ORIENTATION_ERR.stop:, :])\n\n inv_err_function_sym = sp.Matrix(np.zeros((dim_state_err, 1)))\n inv_err_function_sym[States.ECEF_POS_ERR, 0] = sp.Matrix(-nom_x[States.ECEF_POS, 0] + true_x[States.ECEF_POS, 0])\n delta_quat = quat_matrix_r(nom_x[States.ECEF_ORIENTATION, 0]).T * true_x[States.ECEF_ORIENTATION, 0]\n inv_err_function_sym[States.ECEF_ORIENTATION_ERR, 0] = sp.Matrix(2 * delta_quat[1:])\n inv_err_function_sym[States.ECEF_ORIENTATION_ERR.stop:, 0] = sp.Matrix(-nom_x[States.ECEF_ORIENTATION.stop:, 0] + true_x[States.ECEF_ORIENTATION.stop:, 0])\n\n eskf_params = [[err_function_sym, nom_x, delta_x],\n [inv_err_function_sym, nom_x, true_x],\n H_mod_sym, f_err_sym, state_err_sym]\n #\n # Observation functions\n #\n imu_rot = euler_rotate(*imu_angles)\n h_gyro_sym = imu_rot * sp.Matrix([vroll + roll_bias,\n vpitch + pitch_bias,\n vyaw + yaw_bias])\n\n pos = sp.Matrix([x, y, z])\n gravity = quat_rot.T * ((EARTH_GM / ((x**2 + y**2 + z**2)**(3.0 / 2.0))) * pos)\n h_acc_sym = imu_rot * (gravity + acceleration)\n h_phone_rot_sym = sp.Matrix([vroll, vpitch, vyaw])\n\n speed = sp.sqrt(vx**2 + vy**2 + vz**2)\n h_speed_sym = sp.Matrix([speed * odo_scale])\n\n h_pos_sym = sp.Matrix([x, y, z])\n h_imu_frame_sym = sp.Matrix(imu_angles)\n\n h_relative_motion = sp.Matrix(quat_rot.T * v)\n\n obs_eqs = [[h_speed_sym, ObservationKind.ODOMETRIC_SPEED, None],\n [h_gyro_sym, ObservationKind.PHONE_GYRO, None],\n [h_phone_rot_sym, ObservationKind.NO_ROT, None],\n [h_acc_sym, ObservationKind.PHONE_ACCEL, None],\n [h_pos_sym, ObservationKind.ECEF_POS, None],\n [h_relative_motion, ObservationKind.CAMERA_ODO_TRANSLATION, None],\n [h_phone_rot_sym, ObservationKind.CAMERA_ODO_ROTATION, None],\n [h_imu_frame_sym, ObservationKind.IMU_FRAME, None]]\n\n gen_code(generated_dir, name, f_sym, dt, state_sym, obs_eqs, dim_state, dim_state_err, eskf_params)\n\n def __init__(self, generated_dir):\n self.dim_state = self.initial_x.shape[0]\n self.dim_state_err = self.initial_P_diag.shape[0]\n\n self.obs_noise = {ObservationKind.ODOMETRIC_SPEED: np.atleast_2d(0.2**2),\n ObservationKind.PHONE_GYRO: np.diag([0.025**2, 0.025**2, 0.025**2]),\n ObservationKind.PHONE_ACCEL: np.diag([.5**2, .5**2, .5**2]),\n ObservationKind.CAMERA_ODO_ROTATION: np.diag([0.05**2, 0.05**2, 0.05**2]),\n ObservationKind.IMU_FRAME: np.diag([0.05**2, 0.05**2, 0.05**2]),\n ObservationKind.NO_ROT: np.diag([0.00025**2, 0.00025**2, 0.00025**2]),\n ObservationKind.ECEF_POS: np.diag([5**2, 5**2, 5**2])}\n\n # init filter\n self.filter = EKF_sym(generated_dir, self.name, self.Q, self.initial_x, np.diag(self.initial_P_diag), self.dim_state, self.dim_state_err)\n\n @property\n def x(self):\n return self.filter.state()\n\n @property\n def t(self):\n return self.filter.filter_time\n\n @property\n def P(self):\n return self.filter.covs()\n\n def rts_smooth(self, estimates):\n return self.filter.rts_smooth(estimates, norm_quats=True)\n\n def init_state(self, state, covs_diag=None, covs=None, filter_time=None):\n if covs_diag is not None:\n P = np.diag(covs_diag)\n elif covs is not None:\n P = covs\n else:\n P = self.filter.covs()\n self.filter.init_state(state, P, filter_time)\n\n def predict_and_observe(self, t, kind, data):\n if len(data) > 0:\n data = np.atleast_2d(data)\n if kind == ObservationKind.CAMERA_ODO_TRANSLATION:\n r = self.predict_and_update_odo_trans(data, t, kind)\n elif kind == ObservationKind.CAMERA_ODO_ROTATION:\n r = self.predict_and_update_odo_rot(data, t, kind)\n elif kind == ObservationKind.ODOMETRIC_SPEED:\n r = self.predict_and_update_odo_speed(data, t, kind)\n else:\n r = self.filter.predict_and_update_batch(t, kind, data, self.get_R(kind, len(data)))\n\n # Normalize quats\n quat_norm = np.linalg.norm(self.filter.x[3:7, 0])\n\n # Should not continue if the quats behave this weirdly\n if not (0.1 < quat_norm < 10):\n raise KalmanError(\"Kalman filter quaternions unstable\")\n\n self.filter.x[States.ECEF_ORIENTATION, 0] = self.filter.x[States.ECEF_ORIENTATION, 0] / quat_norm\n\n return r\n\n def get_R(self, kind, n):\n obs_noise = self.obs_noise[kind]\n dim = obs_noise.shape[0]\n R = np.zeros((n, dim, dim))\n for i in range(n):\n R[i, :, :] = obs_noise\n return R\n\n def predict_and_update_odo_speed(self, speed, t, kind):\n z = np.array(speed)\n R = np.zeros((len(speed), 1, 1))\n for i, _ in enumerate(z):\n R[i, :, :] = np.diag([0.2**2])\n return self.filter.predict_and_update_batch(t, kind, z, R)\n\n def predict_and_update_odo_trans(self, trans, t, kind):\n z = trans[:, :3]\n R = np.zeros((len(trans), 3, 3))\n for i, _ in enumerate(z):\n R[i, :, :] = np.diag(trans[i, 3:]**2)\n return self.filter.predict_and_update_batch(t, kind, z, R)\n\n def predict_and_update_odo_rot(self, rot, t, kind):\n z = rot[:, :3]\n R = np.zeros((len(rot), 3, 3))\n for i, _ in enumerate(z):\n R[i, :, :] = np.diag(rot[i, 3:]**2)\n return self.filter.predict_and_update_batch(t, kind, z, R)\n\n\nif __name__ == \"__main__\":\n generated_dir = sys.argv[2]\n LiveKalman.generate_code(generated_dir)\n"
] | [
[
"numpy.diag",
"numpy.eye",
"numpy.linalg.norm",
"numpy.ones",
"numpy.atleast_2d",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ATMOcanes/tropycal | [
"10cad2e4ff5b9cb1949d315cb328878306a65a74"
] | [
"src/tropycal/recon/dataset.py"
] | [
"import os\nimport numpy as np\nfrom datetime import datetime as dt,timedelta\nimport pandas as pd\nimport requests\nimport pickle\n\nfrom scipy.interpolate import interp1d\nfrom scipy.ndimage import gaussian_filter as gfilt,gaussian_filter1d as gfilt1d\nfrom scipy.ndimage.filters import minimum_filter\nimport matplotlib.dates as mdates\n\ntry:\n import matplotlib as mlib\n import matplotlib.lines as mlines\n import matplotlib.colors as mcolors\n import matplotlib.patheffects as path_effects\n import matplotlib.pyplot as plt\n import matplotlib.ticker as mticker\nexcept:\n warnings.warn(\"Warning: Matplotlib is not installed in your python environment. Plotting functions will not work.\")\n\nfrom .plot import ReconPlot\n\n#Import tools\nfrom .tools import *\nfrom ..utils import *\n\nclass ReconDataset:\n\n r\"\"\"\n Creates an instance of a ReconDataset object containing all recon data for a single storm.\n \n Parameters\n ----------\n stormtuple : tuple or list\n Requested storm. Can be either tuple or list containing storm name and year (e.g., (\"Matthew\",2016)).\n save_path : str, optional\n Filepath to save recon data in. Recommended in order to avoid having to re-read in the data.\n read_path : str, optional\n Filepath to read saved recon data from. If specified, \"save_path\" cannot be passed as an argument.\n \n Returns\n -------\n Dataset\n An instance of ReconDataset, initialized with the following:\n \n * **missiondata** - A dictionary of missions.\n Each entry is a dateframe from a single mission.\n Dictionary keys are given by mission number and agency (e.g. '15_NOAA').\n * **recentered** - A dataframe with all missions concatenated together, and columns 'xdist' and 'ydist'\n indicating the distance (km) of the ob from the interpolated center of the storm.\n \n Notes\n -----\n Recon data is currently read in via Tropical Atlantic. Future releases of Tropycal will incorporate NHC recon archives.\n \"\"\"\n\n def __init__(self, storm, deltap_thresh=8, mission_url_list=None, save_path=\"\", read_path=\"\", update=False):\n \n #Error check\n #if save_path != \"\" and read_path != \"\":\n # raise ValueError(\"Error: Cannot read in and save a file at the same time.\")\n \n #Create URL prefix for reading in recon data\n self.url_prefix = 'http://tropicalatlantic.com/recon/recon.cgi?'\n self.storm_obj = storm\n self.storm = str(storm.name)\n self.year = str(storm.year)\n self.deltap_thresh = deltap_thresh\n self.UPDATE = update\n self.mission_url_list = mission_url_list\n \n #If reading in a pickled file, load it in\n if read_path != \"\":\n self.missiondata = pickle.load(open(read_path,'rb'))\n if self.UPDATE:\n self.missiondata = self.allMissions()\n\n #Otherwise, retrieve all mission data for this storm\n else:\n self.missiondata = self.allMissions()\n\n #Save mission data as a pickle if necessary\n if save_path != \"\": pickle.dump(self.missiondata,open(save_path,'wb'),-1)\n\n #Convert recon data to storm-centered coordinates\n self.recentered = self.recenter()\n\n #print(f'Most recent data: {max(self.recentered['time']):%Y %b %d %H:%M} UTC')\n #print(f'Most recent center pass: {max(self.recentered.loc[self.recentered['iscenter']>0]['time']):%Y %b %d %H:%M} UTC')\n\n def getMission(self,agency,mission_num,url_mission=None):\n if url_mission is None:\n url_mission = f'{self.url_prefix}basin=al&year={self.year}&product=hdob&storm={self.storm}&mission={mission_num}&agency={agency}'\n content = np.array(requests.get(url_mission).content.decode(\"utf-8\").split('\\n'))\n obs = [line.split('\\\"')[1] for line in content if 'option value=' in line][::-1]\n for i,ob in enumerate(obs):\n url_ob = url_mission+'&ob='+ob\n data = pd.read_html(url_ob)[0]\n data = data.rename(columns = {[name for name in data if 'Time' in name][0]:'Time'})\n if i==0:\n mission = data[:-1]\n day0 = dt.strptime(self.year+ob[:5],'%Y%m-%d')\n else:\n mission = mission.append(data[:-1],ignore_index=True)\n \n def getVar(x,name):\n a = np.nan\n if x!='-' and '*' not in x and x!='No Wind':\n if name == 'Time':\n a = x\n if name == 'Coordinates':\n lat,lon = x.split(' ')\n lat = float(lat[:-1])*[1,-1][lat[-1]=='S']\n lon = float(lon[:-1])*[1,-1][lon[-1]=='W']\n a = np.array((lon,lat))\n elif name == 'Aircraft Static Air Pressure':\n a=float(x.split(' mb')[0])\n elif name == 'Aircraft Geo. Height':\n a=float(x.split(' meters')[0].replace(',', ''))\n elif name == 'Extrapolated Sfc. Pressure':\n a=float(x.split(' mb')[0])\n elif name == 'Flight Level Wind (30 sec. Avg.)':\n a=x.split(' ')\n wdir = float(a[1][:-1])\n wspd = float(a[3])\n a = np.array((wdir,wspd))\n elif name == 'Peak (10 sec. Avg.) Flight Level Wind':\n a=float(x.split(' knots')[0])\n elif name == 'SFMR Peak (10s Avg.) Sfc. Wind':\n a=x.split(' knots')\n a=float(a[0])\n if name in ['Coordinates','Flight Level Wind (30 sec. Avg.)'] and type(a)==float:\n a=np.array([a]*2)\n return a\n \n varnames = ['Time','Coordinates','Aircraft Static Air Pressure','Aircraft Geo. Height',\n 'Extrapolated Sfc. Pressure','Flight Level Wind (30 sec. Avg.)',\n 'Peak (10 sec. Avg.) Flight Level Wind','SFMR Peak (10s Avg.) Sfc. Wind']\n mission = {name:[getVar(item,name) for item in mission[name]] for name in varnames}\n for i,t in enumerate(mission['Time']):\n mission['Time'][i] = day0.replace(hour=int(t[:2]),minute=int(t[3:5]),second=int(t[6:8]))\n if i>0 and (mission['Time'][i]-mission['Time'][i-1]).total_seconds()<0:\n mission['Time'][i]+=timedelta(days=1)\n data={}\n data['lon'],data['lat'] = zip(*mission['Coordinates'])\n data['time'] = mission['Time']\n data['p_sfc'] = mission['Extrapolated Sfc. Pressure']\n data['wdir'],data['wspd'] = zip(*mission['Flight Level Wind (30 sec. Avg.)'])\n data['pkwnd'] = mission['Peak (10 sec. Avg.) Flight Level Wind']\n data['sfmr'] = mission['SFMR Peak (10s Avg.) Sfc. Wind']\n data['plane_p'] = mission['Aircraft Static Air Pressure']\n data['plane_z'] = mission['Aircraft Geo. Height']\n return_data = pd.DataFrame.from_dict(data)\n return_data['time'] = [pd.to_datetime(i) for i in return_data['time']]\n \n #remove nan's for lat/lon coordinates\n return_data = return_data.dropna(subset=['lat', 'lon'])\n \n return return_data\n \n\n def allMissions(self):\n url_storm = f'{self.url_prefix}basin=al&year={self.year}&storm={self.storm}&product=hdob'\n if self.mission_url_list is None:\n missions = pd.read_html(url_storm)[0]\n else:\n URL_LIST = self.mission_url_list\n missions = pd.DataFrame.from_dict({'Agency':['listedurl']*len(URL_LIST),'MissionNumber':[f'{n:02}' for n in range(len(URL_LIST))],'URL':URL_LIST})\n if self.UPDATE:\n missiondata = self.missiondata\n lastMissionNumber = max([int(x.split('_')[0]) for x in list(missiondata.keys())])\n idxf = [x for x in missions['MissionNumber']].index(lastMissionNumber)+1\n idxf = min([idxf+1,len(missions)]) # update last two missions\n else:\n idxf = len(missions)\n missiondata={}\n timer_start = dt.now()\n print(f'--> Starting to read in recon missions')\n for i_mission in range(0,idxf):\n if self.mission_url_list is None:\n mission_num = str(missions['MissionNumber'][i_mission]).zfill(2)\n agency = ''.join(filter(str.isalpha, missions['Agency'][i_mission]))\n missiondata[f'{mission_num}_{agency}'] = self.getMission(agency,mission_num)\n else:\n mission_num = missions['MissionNumber'][i_mission]\n agency = missions['Agency'][i_mission]\n url = missions['URL'][i_mission]\n missiondata[f'{mission_num}{agency}'] = self.getMission(agency,mission_num,url)\n print(f'{mission_num}_{agency}')\n print('--> Completed reading in recon missions (%.2f seconds)' % (dt.now()-timer_start).total_seconds())\n return missiondata\n\n def find_centers(self,data):\n \n def fill_nan(A):\n #Interpolate to fill nan values\n A = np.array(A)\n inds = np.arange(len(A))\n good = np.where(np.isfinite(A))\n good_grad = np.gradient(good[0])\n if len(good[0])>=3:\n f = interp1d(inds[good], A[good],bounds_error=False,kind='quadratic')\n B = np.where(np.isfinite(A)[good[0][0]:good[0][-1]+1],\n A[good[0][0]:good[0][-1]+1],\n f(inds[good[0][0]:good[0][-1]+1]))\n return [np.nan]*good[0][0]+list(B)+[np.nan]*(inds[-1]-good[0][-1])\n else:\n return [np.nan]*len(A)\n \n #Check that sfc pressure spread is big enough to identify real minima\n if np.nanpercentile(data['p_sfc'],90)-np.nanpercentile(data['p_sfc'],10)>self.deltap_thresh:\n data['p_sfc'][:20]=[np.nan]*20 #NaN out the first 10 minutes of the flight\n p_sfc_interp = fill_nan(data['p_sfc']) #Interp p_sfc across missing data\n wspd_interp = fill_nan(data['wspd']) #Interp wspd across missing data\n #Smooth p_sfc and wspd\n p_sfc_smooth = [np.nan]*1+list(np.convolve(p_sfc_interp,[1/3]*3,mode='valid'))+[np.nan]*1\n wspd_smooth = [np.nan]*1+list(np.convolve(wspd_interp,[1/3]*3,mode='valid'))+[np.nan]*1\n #Add wspd to p_sfc to encourage finding p mins with wspd mins \n #and prevent finding p mins in intense thunderstorms\n pw_test = np.array(p_sfc_smooth)+np.array(wspd_smooth)*.1\n #Find mins in 15-minute windows\n imin = np.nonzero(pw_test == minimum_filter(pw_test,30))[0]\n #Only use mins if below 15th %ile of mission p_sfc data and when plane p is 500-900mb\n imin = [i for i in imin if 800<p_sfc_interp[i]<np.nanpercentile(data['p_sfc'],15) and \\\n 550<data['plane_p'][i]<950]\n else:\n imin=[]\n data['iscenter'] = np.zeros(len(data['p_sfc']))\n for i in imin:\n j = data.index.values[i]\n data['iscenter'][j] = 1\n return data\n\n def recenter(self,use='all'): \n self.use = use \n def stitchMissions():\n list_of_dfs=[]\n for name in self.missiondata:\n if self.use == 'all' or self.use in name:\n mission = self.missiondata[name]\n tmp = self.find_centers(mission)\n list_of_dfs.append( tmp )\n data_concat = pd.concat(list_of_dfs,ignore_index=True)\n data_chron = data_concat.sort_values(by='time').reset_index(drop=True)\n return data_chron\n\n data = stitchMissions()\n centers = data.loc[data['iscenter']>0]\n \n if len(centers)<2:\n print('Sorry, less than 2 center passes')\n else:\n print(f'Found {len(centers)} center passes!')\n timer_start = dt.now()\n \n #Interpolate center position to time of each ob\n f1 = interp1d(mdates.date2num(centers['time']),centers['lon'],fill_value='extrapolate',kind='linear')\n interp_clon = f1(mdates.date2num(data['time']))\n f2 = interp1d(mdates.date2num(centers['time']),centers['lat'],fill_value='extrapolate',kind='linear')\n interp_clat = f2(mdates.date2num(data['time']))\n\n #Get x,y distance of each ob from coinciding interped center position\n data['xdist'] = [great_circle( (interp_clat[i],interp_clon[i]), \\\n (interp_clat[i],data['lon'][i]) ).kilometers* \\\n [1,-1][int(data['lon'][i] < interp_clon[i])] for i in range(len(data))]\n data['ydist'] = [great_circle( (interp_clat[i],interp_clon[i]), \\\n (data['lat'][i],interp_clon[i]) ).kilometers* \\\n [1,-1][int(data['lat'][i] < interp_clat[i])] for i in range(len(data))]\n \n print('--> Completed recentering recon data (%.2f seconds)' % (dt.now()-timer_start).total_seconds())\n return data\n \n def __getSubTime(self,time):\n \n if isinstance(time,(tuple,list)):\n t1=min(time)\n t2=max(time)\n else:\n t1 = time-timedelta(hours=6)\n t2 = time+timedelta(hours=6)\n subRecon = self.recentered.loc[(self.recentered['time']>=t1) & \\\n (self.recentered['time']<t2)]\n return subRecon\n \n \n def findMission(self,time):\n \n r\"\"\"\n Returns the name of a mission or list of missions given a specified time.\n \n Parameters\n ----------\n time : datetime.datetime or list\n Datetime object or list of datetime objects representing the time of the requested mission.\n \n Returns\n -------\n list\n The names of any/all missions that had in-storm observations during the specified time.\n \"\"\"\n \n if isinstance(time,list):\n t1=min(time)\n t2=max(time)\n else:\n t1 = t2 = time\n selected=[]\n for name in self.missiondata:\n t_start = min(self.missiondata[name]['time'])\n t_end = max(self.missiondata[name]['time'])\n if (t_start<t1<t_end) or (t_start<t2<t_end) or (t1<t_start<t2):\n selected.append(name)\n if len(selected)==0:\n print('There were no in-storm recon missions during this time')\n return selected\n\n\n def plot_points(self,recon_select=None,varname='wspd',domain=\"dynamic\",plane_p_range=None,\\\n ax=None,return_ax=False,cartopy_proj=None,**kwargs):\n \n r\"\"\"\n Creates a plot of recon data points.\n \n Parameters\n ----------\n recon_select : Requested recon data\n pandas.DataFrame or dict,\n or string referencing the mission name (e.g. '12_NOAA'), \n or datetime or list of start/end datetimes.\n varname : str\n Variable to plot. Can be one of the following keys in recon_select dataframe:\n \n * **\"sfmr\"** = SFMR surface wind\n * **\"wspd\"** = 30-second flight level wind (default)\n * **\"pkwnd\"** = 10-second flight level wind\n * **\"p_sfc\"** = extrapolated surface pressure\n domain : str\n Domain for the plot. Default is \"dynamic\". Please refer to :ref:`options-domain` for available domain options.\n ax : axes\n Instance of axes to plot on. If none, one will be generated. Default is none.\n return_ax : bool\n If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.\n cartopy_proj : ccrs\n Instance of a cartopy projection to use. If none, one will be generated. Default is none.\n \n Other Parameters\n ----------------\n prop : dict\n Customization properties of recon plot. Please refer to :ref:`options-prop-recon-plot` for available options.\n map_prop : dict\n Customization properties of Cartopy map. Please refer to :ref:`options-map-prop` for available options.\n \"\"\"\n \n #Pop kwargs\n prop = kwargs.pop('prop',{})\n map_prop = kwargs.pop('map_prop',{})\n \n #Get plot data\n \n if recon_select is None:\n dfRecon = self.recentered\n elif isinstance(recon_select,pd.core.frame.DataFrame):\n dfRecon = recon_select\n elif isinstance(recon_select,dict):\n dfRecon = pd.DataFrame.from_dict(recon_select)\n elif isinstance(recon_select,str):\n dfRecon = self.missiondata[recon_select]\n else:\n dfRecon = self.__getSubTime(recon_select)\n \n #Apply flight level filter\n if plane_p_range is not None:\n dfRecon = dfRecon.loc[(dfRecon['plane_p']>min(plane_p_range)) & (dfRecon['plane_p']<max(plane_p_range))]\n \n #Create instance of plot object\n self.plot_obj = ReconPlot()\n \n #Create cartopy projection\n if cartopy_proj is None:\n self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=0.0)\n cartopy_proj = self.plot_obj.proj\n \n #Plot recon\n plot_info = self.plot_obj.plot_points(self.storm_obj,dfRecon,domain,varname=varname,\\\n ax=ax,return_ax=return_ax,prop=prop,map_prop=map_prop)\n \n #Return axis\n if ax is not None or return_ax==True:\n return plot_info\n\n \n def plot_hovmoller(self,recon_select=None,varname='wspd',radlim=None,track_dict=None,plane_p_range=None,\\\n window=6,align='center',ax=None,return_ax=False,**kwargs):\n \n r\"\"\"\n Creates a hovmoller plot of azimuthally-averaged recon data.\n \n Parameters\n ----------\n recon_select : Requested recon data\n pandas.DataFrame or dict,\n or datetime or list of start/end datetimes.\n varname : Variable to average and plot (e.g. 'wspd').\n String\n ax : axes\n Instance of axes to plot on. If none, one will be generated. Default is none.\n return_ax : bool\n If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.\n cartopy_proj : ccrs\n Instance of a cartopy projection to use. If none, one will be generated. Default is none.\n \n Other Parameters\n ----------------\n prop : dict\n Customization properties for recon plot. Please refer to :ref:`options-prop-recon-hovmoller` for available options.\n \"\"\"\n \n #Pop kwargs\n prop = kwargs.pop('prop',{})\n default_prop = {'cmap':'category','levels':None,'smooth_contourf':False}\n for key in default_prop.keys():\n if key not in prop.keys():\n prop[key]=default_prop[key]\n \n #Get recon data based on recon_select\n if recon_select is None:\n dfRecon = self.recentered\n elif isinstance(recon_select,pd.core.frame.DataFrame):\n dfRecon = recon_select\n elif isinstance(recon_select,dict):\n dfRecon = pd.DataFrame.from_dict(recon_select)\n else:\n dfRecon = self.__getSubTime(recon_select)\n \n #Apply flight level filter\n if plane_p_range is not None:\n dfRecon = dfRecon.loc[(dfRecon['plane_p']>min(plane_p_range)) & (dfRecon['plane_p']<max(plane_p_range))]\n \n #Retrieve track dictionary if none is specified\n if track_dict is None:\n track_dict = self.storm_obj.dict\n \n #Interpolate recon data to a hovmoller\n iRecon = interpRecon(dfRecon,varname,radlim,window=window,align=align)\n Hov_dict = iRecon.interpHovmoller(track_dict)\n\n #title = get_recon_title(varname) #may not be necessary\n #If no contour levels specified, generate levels based on data min and max\n if prop['levels'] is None:\n prop['levels'] = (np.nanmin(Hov_dict['hovmoller']),np.nanmax(Hov_dict['hovmoller']))\n \n #Retrieve updated contour levels and colormap based on input arguments and variable type\n cmap,clevs = get_cmap_levels(varname,prop['cmap'],prop['levels'])\n \n #Retrieve hovmoller times, radii and data\n time = Hov_dict['time']\n radius = Hov_dict['radius']\n vardata = Hov_dict['hovmoller']\n \n #Error check time\n time = [dt.strptime((i.strftime('%Y%m%d%H%M')),'%Y%m%d%H%M') for i in time]\n \n #------------------------------------------------------------------------------\n \n #Create plot \n #plt.figure(figsize=(9,11),dpi=150)\n plt.figure(figsize=(9,9),dpi=150) #CHANGE THIS OR ELSE\n ax = plt.subplot()\n \n #Plot surface category colors individually, necessitating normalizing colormap\n if varname in ['vmax','sfmr','fl_to_sfc'] and prop['cmap'] == 'category':\n norm = mcolors.BoundaryNorm(clevs,cmap.N)\n cf = ax.contourf(radius,time,gfilt1d(vardata,sigma=3,axis=1),\n levels=clevs,cmap=cmap,norm=norm)\n \n #Multiple clevels or without smooth contouring\n elif len(prop['levels']) > 2 or prop['smooth_contourf'] == False:\n cf = ax.contourf(radius,time,gfilt1d(vardata,sigma=3,axis=1),\n levels=clevs,cmap=cmap)\n \n #Automatically generated levels with smooth contouring\n else:\n cf = ax.contourf(radius,time,gfilt1d(vardata,sigma=3,axis=1),\n cmap=cmap,levels=np.linspace(min(prop['levels']),max(prop['levels']),256))\n ax.axis([0,max(radius),min(time),max(time)])\n \n #Plot colorbar\n cbar = plt.colorbar(cf,orientation='horizontal',pad=0.1)\n \n #Format y-label ticks and labels as dates\n ax.yaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H'))\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(14)\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(14)\n \n #Set axes labels\n ax.set_ylabel('UTC Time (MM-DD HH)',fontsize=15)\n ax.set_xlabel('Radius (km)',fontsize=15)\n \n #--------------------------------------------------------------------------------------\n \n #Generate left and right title strings\n title_left, title_right = hovmoller_plot_title(self.storm_obj,Hov_dict,varname)\n ax.set_title(title_left,loc='left',fontsize=16,fontweight='bold')\n ax.set_title(title_right,loc='right',fontsize=12)\n \n #Return axis\n if return_ax:\n return ax\n\n\n #PLOT FUNCTION FOR RECON MAPS\n def plot_maps(self,recon_select=None,varname='wspd',track_dict=None,recon_stats=None,domain=\"dynamic\",\\\n window=6,align='center',radlim=None,plane_p_range=None,ax=None,return_ax=False,savetopath=None,cartopy_proj=None,**kwargs):\n \n #plot_time, plot_mission (only for dots)\n \n r\"\"\"\n Creates maps of interpolated recon data. \n \n Parameters\n ----------\n recon_select : Requested recon data\n pandas.DataFrame or dict,\n or string referencing the mission name (e.g. '12_NOAA'), \n or datetime or list of start/end datetimes.\n varname : str\n Variable to plot. Can be one of the following keys in recon_select dataframe:\n \n * **\"sfmr\"** = SFMR surface wind\n * **\"wspd\"** = 30-second flight level wind (default)\n * **\"pkwnd\"** = 10-second flight level wind\n * **\"p_sfc\"** = extrapolated surface pressure\n domain : str\n Domain for the plot. Default is \"dynamic\". Please refer to :ref:`options-domain` for available domain options.\n ax : axes\n Instance of axes to plot on. If none, one will be generated. Default is none.\n return_ax : bool\n If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.\n cartopy_proj : ccrs\n Instance of a cartopy projection to use. If none, one will be generated. Default is none.\n \n Other Parameters\n ----------------\n prop : dict\n Customization properties of recon plot. Please refer to :ref:`options-prop-recon-swath` for available options.\n map_prop : dict\n Customization properties of Cartopy map. Please refer to :ref:`options-map-prop` for available options.\n \"\"\"\n \n #Pop kwargs\n prop = kwargs.pop('prop',{})\n map_prop = kwargs.pop('map_prop',{})\n \n #Get plot data\n ONE_MAP = False\n if recon_select is None:\n dfRecon = self.recentered \n elif isinstance(recon_select,pd.core.frame.DataFrame):\n dfRecon = recon_select\n elif isinstance(recon_select,dict):\n dfRecon = pd.DataFrame.from_dict(recon_select)\n elif isinstance(recon_select,str):\n dfRecon = self.missiondata[recon_select]\n else:\n dfRecon = self.__getSubTime(recon_select)\n if not isinstance(recon_select,(tuple,list)):\n ONE_MAP = True\n \n MULTIVAR=False\n if isinstance(varname,(tuple,list)):\n MULTIVAR=True \n \n #Apply flight level filter\n if plane_p_range is not None:\n dfRecon = dfRecon.loc[(dfRecon['plane_p']>min(plane_p_range)) & (dfRecon['plane_p']<max(plane_p_range))]\n \n if track_dict is None:\n track_dict = self.storm_obj.dict\n \n #Error check for time dimension name\n if 'time' not in track_dict.keys():\n track_dict['time'] = track_dict['date']\n \n if ONE_MAP:\n f = interp1d(mdates.date2num(track_dict['time']),track_dict['lon'], fill_value='extrapolate')\n clon = f(mdates.date2num(recon_select))\n f = interp1d(mdates.date2num(track_dict['time']),track_dict['lat'], fill_value='extrapolate')\n clat = f(mdates.date2num(recon_select))\n \n #clon = np.interp(mdates.date2num(recon_select),mdates.date2num(track_dict['time']),track_dict['lon'])\n #clat = np.interp(mdates.date2num(recon_select),mdates.date2num(track_dict['time']),track_dict['lat'])\n track_dict = {'time':recon_select,'lon':clon,'lat':clat}\n \n if MULTIVAR:\n Maps=[]\n for v in varname:\n iRecon = interpRecon(dfRecon,v,radlim,window=window,align=align)\n tmpMaps = iRecon.interpMaps(track_dict)\n Maps.append(tmpMaps)\n else:\n iRecon = interpRecon(dfRecon,varname,radlim,window=window,align=align)\n Maps = iRecon.interpMaps(track_dict)\n \n #titlename,units = get_recon_title(varname)\n \n if 'levels' not in prop.keys() or 'levels' in prop.keys() and prop['levels'] is None:\n prop['levels'] = np.arange(np.floor(np.nanmin(Maps['maps'])/10)*10,\n np.ceil(np.nanmax(Maps['maps'])/10)*10+1,10)\n \n if not ONE_MAP:\n \n if savetopath is True:\n #savetopath = f'{self.storm}{self.year}_{varname}_maps'\n savetopath = f'{self.storm}{self.year}_maps'\n try:\n os.system(f'mkdir {savetopath}')\n except:\n pass\n \n if MULTIVAR:\n Maps2 = Maps[1]\n Maps = Maps[0]\n \n print(np.nanmax(Maps['maps']),np.nanmin(Maps2['maps']))\n \n figs = []\n for i,t in enumerate(Maps['time']):\n Maps_sub = {'time':t,'grid_x':Maps['grid_x'],'grid_y':Maps['grid_y'],'maps':Maps['maps'][i],\\\n 'center_lon':Maps['center_lon'][i],'center_lat':Maps['center_lat'][i],'stats':Maps['stats']}\n\n #Create instance of plot object\n self.plot_obj = ReconPlot()\n \n #Create cartopy projection\n self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=0.0)\n cartopy_proj = self.plot_obj.proj\n \n #Maintain the same lat / lon dimensions for all dynamic maps\n #Determined by the dynamic domain from the first map\n if i>0 and domain is 'dynamic':\n d1 = {'n':Maps_sub['center_lat']+dlat,\\\n 's':Maps_sub['center_lat']-dlat,\\\n 'e':Maps_sub['center_lon']+dlon,\\\n 'w':Maps_sub['center_lon']-dlon}\n else:\n d1 = domain\n \n #Plot recon\n \n if MULTIVAR:\n Maps_sub1 = dict(Maps_sub)\n Maps_sub2 = dict(Maps_sub)\n Maps_sub = [Maps_sub1,Maps_sub2]\n Maps_sub[1]['maps'] = Maps2['maps'][i]\n \n print(np.nanmax(Maps_sub[0]['maps']),np.nanmin(Maps_sub[1]['maps']))\n \n plot_ax,d0 = self.plot_obj.plot_maps(self.storm_obj,Maps_sub,varname,recon_stats,\\\n domain=d1,ax=ax,return_ax=True,return_domain=True,prop=prop,map_prop=map_prop)\n \n #Get domain dimensions from the first map\n if i==0:\n dlat = .5*(d0['n']-d0['s'])\n dlon = .5*(d0['e']-d0['w'])\n \n figs.append(plot_ax)\n \n if savetopath is not None:\n plt.savefig(f'{savetopath}/{t.strftime(\"%Y%m%d%H%M\")}',bbox_inches='tight')\n plt.close()\n \n if savetopath is None:\n return figs\n \n\n else:\n #Create instance of plot object\n self.plot_obj = ReconPlot()\n \n #Create cartopy projection\n if cartopy_proj is None:\n self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=0.0)\n cartopy_proj = self.plot_obj.proj\n \n #Plot recon\n plot_info = self.plot_obj.plot_maps(self.storm_obj,Maps,varname,recon_stats,\\\n domain,ax,return_ax,prop=prop,map_prop=map_prop)\n \n #Return axis\n if ax is not None or return_ax:\n return plot_info\n \n \n \n #PLOT FUNCTION FOR RECON SWATH\n def plot_swath(self,recon_select=None,varname='wspd',swathfunc=None,track_dict=None,radlim=None,\\\n domain=\"dynamic\",plane_p_range=None,ax=None,return_ax=False,cartopy_proj=None,**kwargs):\n \n r\"\"\"\n Creates a map plot of a swath of interpolated recon data.\n \n Parameters\n ----------\n recon_select : Requested recon data\n pandas.DataFrame or dict,\n or string referencing the mission name (e.g. '12_NOAA'), \n or datetime or list of start/end datetimes.\n varname : str\n Variable to plot. Can be one of the following keys in recon_select dataframe:\n \n * **\"sfmr\"** = SFMR surface wind\n * **\"wspd\"** = 30-second flight level wind (default)\n * **\"pkwnd\"** = 10-second flight level wind\n * **\"p_sfc\"** = extrapolated surface pressure\n swathfunc : function\n Function to operate on interpolated recon data.\n e.g., np.max, np.min, or percentile function\n domain : str\n Domain for the plot. Default is \"dynamic\". Please refer to :ref:`options-domain` for available domain options.\n ax : axes\n Instance of axes to plot on. If none, one will be generated. Default is none.\n return_ax : bool\n If True, returns the axes instance on which the plot was generated for the user to further modify. Default is False.\n cartopy_proj : ccrs\n Instance of a cartopy projection to use. If none, one will be generated. Default is none.\n \n Other Parameters\n ----------------\n prop : dict\n Customization properties of recon plot. Please refer to :ref:`options-prop-recon-swath` for available options.\n map_prop : dict\n Customization properties of Cartopy map. Please refer to :ref:`options-map-prop` for available options.\n \"\"\"\n \n #Pop kwargs\n prop = kwargs.pop('prop',{})\n map_prop = kwargs.pop('map_prop',{})\n \n #Get plot data\n if recon_select is None:\n dfRecon = self.recentered \n elif isinstance(recon_select,pd.core.frame.DataFrame):\n dfRecon = recon_select\n elif isinstance(recon_select,dict):\n dfRecon = pd.DataFrame.from_dict(recon_select)\n elif isinstance(recon_select,str):\n dfRecon = self.missiondata[recon_select]\n else:\n dfRecon = self.__getSubTime(recon_select)\n\n #Apply flight level filter\n if plane_p_range is not None:\n dfRecon = dfRecon.loc[(dfRecon['plane_p']>min(plane_p_range)) & (dfRecon['plane_p']<max(plane_p_range))]\n \n if track_dict is None:\n track_dict = self.storm_obj.dict\n \n if swathfunc is None:\n if varname == 'p_sfc':\n swathfunc = np.min\n else:\n swathfunc = np.max\n \n iRecon = interpRecon(dfRecon,varname)\n Maps = iRecon.interpMaps(track_dict,interval=.2)\n \n #Create instance of plot object\n self.plot_obj = ReconPlot()\n \n #Create cartopy projection\n if cartopy_proj is None:\n self.plot_obj.create_cartopy(proj='PlateCarree',central_longitude=0.0)\n cartopy_proj = self.plot_obj.proj\n \n #Plot recon\n plot_info = self.plot_obj.plot_swath(self.storm_obj,Maps,varname,swathfunc,track_dict,radlim,\\\n domain,ax,return_ax,prop=prop,map_prop=map_prop)\n \n #Return axis\n if ax is not None or return_ax==True:\n return plot_info\n"
] | [
[
"numpy.nanmax",
"pandas.to_datetime",
"matplotlib.colors.BoundaryNorm",
"numpy.nanmin",
"scipy.ndimage.gaussian_filter1d",
"pandas.read_html",
"matplotlib.pyplot.subplot",
"scipy.interpolate.interp1d",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"pandas.concat",
"matplotlib.dates.DateFormatter",
"pandas.DataFrame.from_dict",
"numpy.array",
"numpy.convolve",
"numpy.nanpercentile",
"numpy.isfinite",
"numpy.gradient",
"matplotlib.pyplot.colorbar",
"matplotlib.dates.date2num",
"scipy.ndimage.filters.minimum_filter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
abditag2/DCGAN-tensorflow | [
"432b0d91bd8252c48869c205b86701993eb37618"
] | [
"utils.py"
] | [
"\"\"\"\nSome codes from https://github.com/Newmu/dcgan_code\n\"\"\"\nfrom __future__ import division\n\nimport math\nimport pprint\nimport random\nfrom time import gmtime, strftime\n\nimport numpy as np\nimport scipy.misc\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom six.moves import xrange\n\npp = pprint.PrettyPrinter()\n\nget_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])\n\ndef show_all_variables():\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n\ndef get_image(image_path, input_height, input_width,\n resize_height=64, resize_width=64,\n crop=True, grayscale=False):\n image = imread(image_path, grayscale)\n return transform(image, input_height, input_width,\n resize_height, resize_width, crop)\n\ndef save_images(images, size, image_path):\n return imsave(inverse_transform(images), size, image_path)\n\ndef imread(path, grayscale = False):\n if (grayscale):\n return scipy.misc.imread(path, flatten = True).astype(np.float)\n else:\n return scipy.misc.imread(path).astype(np.float)\n\ndef merge_images(images, size):\n return inverse_transform(images)\n\ndef merge(images, size):\n h, w = images.shape[1], images.shape[2]\n if (images.shape[3] in (3,4)):\n c = images.shape[3]\n img = np.zeros((h * size[0], w * size[1], c))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j * h:j * h + h, i * w:i * w + w, :] = image\n return img\n elif images.shape[3]==1:\n img = np.zeros((h * size[0], w * size[1]))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]\n return img\n else:\n raise ValueError('in merge(images,size) images parameter '\n 'must have dimensions: HxW or HxWx3 or HxWx4')\n\ndef imsave(images, size, path):\n image = np.squeeze(merge(images, size))\n return scipy.misc.imsave(path, image)\n\ndef center_crop(x, crop_h, crop_w,\n resize_h=64, resize_w=64):\n if crop_w is None:\n crop_w = crop_h\n h, w = x.shape[:2]\n j = int(round((h - crop_h)/2.))\n i = int(round((w - crop_w)/2.))\n return scipy.misc.imresize(\n x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])\n\ndef transform(image, input_height, input_width, \n resize_height=64, resize_width=64, crop=True):\n if crop:\n cropped_image = center_crop(\n image, input_height, input_width, \n resize_height, resize_width)\n else:\n cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])\n return np.array(cropped_image)/127.5 - 1.\n\ndef inverse_transform(images):\n return (images+1.)/2.\n\ndef to_json(output_path, *layers):\n with open(output_path, \"w\") as layer_f:\n lines = \"\"\n for w, b, bn in layers:\n layer_idx = w.name.split('/')[0].split('h')[1]\n\n B = b.eval()\n\n if \"lin/\" in w.name:\n W = w.eval()\n depth = W.shape[1]\n else:\n W = np.rollaxis(w.eval(), 2, 0)\n depth = W.shape[0]\n\n biases = {\"sy\": 1, \"sx\": 1, \"depth\": depth, \"w\": ['%.2f' % elem for elem in list(B)]}\n if bn != None:\n gamma = bn.gamma.eval()\n beta = bn.beta.eval()\n\n gamma = {\"sy\": 1, \"sx\": 1, \"depth\": depth, \"w\": ['%.2f' % elem for elem in list(gamma)]}\n beta = {\"sy\": 1, \"sx\": 1, \"depth\": depth, \"w\": ['%.2f' % elem for elem in list(beta)]}\n else:\n gamma = {\"sy\": 1, \"sx\": 1, \"depth\": 0, \"w\": []}\n beta = {\"sy\": 1, \"sx\": 1, \"depth\": 0, \"w\": []}\n\n if \"lin/\" in w.name:\n fs = []\n for w in W.T:\n fs.append({\"sy\": 1, \"sx\": 1, \"depth\": W.shape[0], \"w\": ['%.2f' % elem for elem in list(w)]})\n\n lines += \"\"\"\n var layer_%s = {\n \"layer_type\": \"fc\", \n \"sy\": 1, \"sx\": 1, \n \"out_sx\": 1, \"out_sy\": 1,\n \"stride\": 1, \"pad\": 0,\n \"out_depth\": %s, \"in_depth\": %s,\n \"biases\": %s,\n \"gamma\": %s,\n \"beta\": %s,\n \"filters\": %s\n };\"\"\" % (layer_idx.split('_')[0], W.shape[1], W.shape[0], biases, gamma, beta, fs)\n else:\n fs = []\n for w_ in W:\n fs.append({\"sy\": 5, \"sx\": 5, \"depth\": W.shape[3], \"w\": ['%.2f' % elem for elem in list(w_.flatten())]})\n\n lines += \"\"\"\n var layer_%s = {\n \"layer_type\": \"deconv\", \n \"sy\": 5, \"sx\": 5,\n \"out_sx\": %s, \"out_sy\": %s,\n \"stride\": 2, \"pad\": 1,\n \"out_depth\": %s, \"in_depth\": %s,\n \"biases\": %s,\n \"gamma\": %s,\n \"beta\": %s,\n \"filters\": %s\n };\"\"\" % (layer_idx, 2**(int(layer_idx)+2), 2**(int(layer_idx)+2),\n W.shape[0], W.shape[3], biases, gamma, beta, fs)\n layer_f.write(\" \".join(lines.replace(\"'\",\"\").split()))\n\ndef make_gif(images, fname, duration=2, true_image=False):\n import moviepy.editor as mpy\n\n def make_frame(t):\n try:\n x = images[int(len(images)/duration*t)]\n except:\n x = images[-1]\n\n if true_image:\n return x.astype(np.uint8)\n else:\n return ((x+1)/2*255).astype(np.uint8)\n\n clip = mpy.VideoClip(make_frame, duration=duration)\n clip.write_gif(fname, fps = len(images) / duration)\n\ndef visualize(sess, dcgan, config, batch_size, option):\n print('dcgan.z_dim:', dcgan.z_dim)\n print('xrange(dcgan.z_dim):', xrange(dcgan.z_dim))\n print('config.generate_test_images:', config.generate_test_images)\n\n if option == 0:\n z_sample = np.random.uniform(-0.5, 0.5, size=(batch_size, dcgan.z_dim))\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})\n save_images(samples, [config.grid_height, config.grid_width], './samples/test_%s.png' % strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime()))\n elif option == 1:\n values = np.arange(0, 1, 1./batch_size)\n for idx in xrange(config.generate_test_images):\n print(\" [*] %d\" % idx)\n z_sample = np.random.uniform(-1, 1, size=(batch_size, dcgan.z_dim))\n for kdx, z in enumerate(z_sample):\n z[idx] = values[kdx]\n\n if config.dataset == \"mnist\":\n y = np.random.choice(10, batch_size)\n y_one_hot = np.zeros((batch_size, 10))\n y_one_hot[np.arange(batch_size), y] = 1\n\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})\n else:\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})\n\n save_images(samples, [config.grid_height, config.grid_width], './samples/test_arange_%s.png' % (idx))\n elif option == 2:\n values = np.arange(0, 1, 1./batch_size)\n for idx in [random.randint(0, dcgan.z_dim - 1) for _ in xrange(dcgan.z_dim)]:\n print(\" [*] %d\" % idx)\n z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))\n z_sample = np.tile(z, (batch_size, 1))\n #z_sample = np.zeros([batch_size, dcgan.z_dim])\n for kdx, z in enumerate(z_sample):\n z[idx] = values[kdx]\n\n if config.dataset == \"mnist\":\n y = np.random.choice(10, batch_size)\n y_one_hot = np.zeros((batch_size, 10))\n y_one_hot[np.arange(batch_size), y] = 1\n\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})\n else:\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})\n\n try:\n make_gif(samples, './samples/test_gif_%s.gif' % (idx))\n except:\n save_images(samples, [config.grid_height, config.grid_width], './samples/test_%s.png' % strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime()))\n elif option == 3:\n values = np.arange(0, 1, 1./batch_size)\n for idx in xrange(dcgan.z_dim):\n print(\" [*] %d\" % idx)\n z_sample = np.zeros([batch_size, dcgan.z_dim])\n for kdx, z in enumerate(z_sample):\n z[idx] = values[kdx]\n\n samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})\n make_gif(samples, './samples/test_gif_%s.gif' % (idx))\n elif option == 4:\n image_set = []\n values = np.arange(0, 1, 1./batch_size)\n\n for idx in xrange(dcgan.z_dim):\n print(\" [*] %d\" % idx)\n z_sample = np.zeros([batch_size, dcgan.z_dim])\n for kdx, z in enumerate(z_sample): z[idx] = values[kdx]\n\n image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))\n make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))\n\n new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) \\\n for idx in range(64) + range(63, -1, -1)]\n make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)\n"
] | [
[
"numpy.random.choice",
"numpy.arange",
"numpy.tile",
"tensorflow.trainable_variables",
"numpy.random.uniform",
"numpy.array",
"tensorflow.contrib.slim.model_analyzer.analyze_vars",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
scikit-spark/scikit-spark | [
"1b1291f14ce0c18d7ea358fe25687649a5b74ecd",
"1b1291f14ce0c18d7ea358fe25687649a5b74ecd"
] | [
"python/test/sklearn_version_specific_utils.py",
"python/test/test_parameterised_tests.py"
] | [
"import sklearn\n\n\ndef sklearn_version_is(version):\n if sklearn.__version__.startswith(version):\n return True\n return False\n\n\ndef sklearn_is_at_least(version):\n if sklearn.__version__ >= version:\n return True\n return False\n\n\ndef get_refactored_tests_to_skip():\n \"\"\"These tests have been edited in order to work with spark.\n They have been moved into this repo e.g. in resource_warning_tests.py\"\"\"\n if sklearn_version_is(\"0.19\"):\n return [\n \"test_return_train_score_warn\", # moved to resource_warning_tests.py\n ]\n elif sklearn_version_is(\"0.20\"):\n return [\n \"test_return_train_score_warn\", # moved to resource_warning_tests.py\n \"test_deprecated_grid_search_iid\", # moved to resource_warning_tests.py\n \"test_validate_parameter_grid_input\" # a function, not a test\n ]\n elif sklearn_version_is(\"0.21\"):\n return [\n \"test_refit_callable_out_bound\", # parameterized test, moved to test_parameterised_tests\n \"test_deprecated_grid_search_iid\", # moved to resource_warning_tests.py\n \"test_validate_parameter_grid_input\", # parameterized test, moved to test_parameterised_tests\n ]\n elif sklearn_version_is(\"0.22\"):\n return [\n \"test_refit_callable_out_bound\", # parameterized test, moved to test_parameterised_tests\n \"test_deprecated_grid_search_iid\", # moved to resource_warning_tests.py\n \"test_validate_parameter_grid_input\", # parameterized test, moved to test_parameterised_tests\n \"test_SearchCV_with_fit_params\", # moved to test_parameterised_tests\n \"test_scalar_fit_param\", # moved to test_parameterised_tests\n \"test_scalar_fit_param_compat\", # moved to test_parameterised_tests\n \"test_search_default_iid\", # moved to test_parameterised_tests\n \"test_validate_parameter_input\", # moved to test_parameterised_tests\n ]\n else:\n raise NotImplementedError(\n \"Unsupported sklearn version {}\".format(sklearn.__version__))\n",
"from functools import partial\nfrom unittest import skipIf\n\nimport pytest\nfrom scipy.stats import uniform\nfrom sklearn.datasets import make_classification\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import ParameterGrid, ParameterSampler, train_test_split\n\nfrom skspark.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.svm import LinearSVC\n\nfrom test.pyspark_test import PySparkTest\nfrom test.sklearn_version_specific_utils import sklearn_is_at_least\n\n\nclass _FitParamClassifier(SGDClassifier):\n\n def fit(self, X, y, sample_weight=None, tuple_of_arrays=None,\n scalar_param=None, callable_param=None):\n super().fit(X, y, sample_weight=sample_weight)\n assert scalar_param > 0\n assert callable(callable_param)\n\n # The tuple of arrays should be preserved as tuple.\n assert isinstance(tuple_of_arrays, tuple)\n assert tuple_of_arrays[0].ndim == 2\n assert tuple_of_arrays[1].ndim == 1\n return self\n\n\ndef _fit_param_callable():\n pass\n\n\nclass TestParameterisedTests(PySparkTest):\n @skipIf(not sklearn_is_at_least(\"0.21\"), \"test for sklearn 0.21 and above\")\n def test_refit_callable_out_bound(self):\n \"\"\"\n Test implementation catches the errors when 'best_index_' returns an\n out of bound result.\n \"\"\"\n out_bound_values = [-1, 2]\n search_cvs = [RandomizedSearchCV, GridSearchCV]\n\n for out_bound_value, search_cv in zip(out_bound_values, search_cvs):\n def refit_callable_out_bound(cv_results):\n \"\"\"\n A dummy function tests when returned 'best_index_' is out of bounds.\n \"\"\"\n return out_bound_value\n\n X, y = make_classification(n_samples=100, n_features=4,\n random_state=42)\n\n clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]},\n scoring='precision', refit=refit_callable_out_bound, cv=5)\n with pytest.raises(IndexError, match='best_index_ index out of range'):\n clf.fit(X, y)\n\n @skipIf(not sklearn_is_at_least(\"0.21\"), \"test for sklearn 0.21 and above\")\n def test_validate_parameter_grid_input_wrapper(self):\n def test_validate_parameter_grid_input(input, error_type, error_message):\n with pytest.raises(error_type, match=error_message):\n ParameterGrid(input)\n\n parameters = [\n (0, TypeError, r'Parameter grid is not a dict or a list \\(0\\)'),\n ([{'foo': [0]}, 0], TypeError, r'Parameter grid is not a dict \\(0\\)'),\n ({'foo': 0}, TypeError, \"Parameter grid value is not iterable \" \n r\"\\(key='foo', value=0\\)\")\n ]\n for input, error_type, error_message in parameters:\n test_validate_parameter_grid_input(input, error_type, error_message)\n\n @skipIf(not sklearn_is_at_least(\"0.22\"), \"test for sklearn 0.22 and above\")\n def test_validate_parameter_input_wrapper(self):\n from sklearn.model_selection.tests.test_search import test_validate_parameter_input\n\n parameters = [\n (0, TypeError, r'Parameter .* is not a dict or a list \\(0\\)'),\n ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \\(0\\)'),\n ({'foo': 0}, TypeError, \"Parameter.* value is not iterable .*\" r\"\\(key='foo', value=0\\)\")\n ]\n\n for klass in [ParameterGrid, partial(ParameterSampler, n_iter=10)]:\n for input, error_type, error_message in parameters:\n test_validate_parameter_input(klass, input, error_type, error_message)\n\n @skipIf(not sklearn_is_at_least(\"0.22\"), \"test for sklearn 0.22 and above\")\n def test_search_default_iid_wrapper(self):\n from sklearn.model_selection.tests.test_search import test_search_default_iid\n\n parameters = [\n (GridSearchCV, {'param_grid': {'C': [1, 10]}}),\n (RandomizedSearchCV, {'param_distributions': {'C': [1, 10]}, 'n_iter': 2})\n ]\n\n for SearchCV, specialized_params in parameters:\n test_search_default_iid(SearchCV, specialized_params)\n\n @skipIf(not sklearn_is_at_least(\"0.22\"), \"test for sklearn 0.22 and above\")\n def test_SearchCV_with_fit_params_wrapper(self):\n from sklearn.model_selection.tests.test_search import test_SearchCV_with_fit_params\n\n for SearchCV in [GridSearchCV, RandomizedSearchCV]:\n test_SearchCV_with_fit_params(SearchCV)\n\n @skipIf(not sklearn_is_at_least(\"0.22\"), \"test for sklearn 0.22 and above\")\n def test_scalar_fit_param_wrapper(self):\n from sklearn.model_selection.tests.test_search import test_scalar_fit_param\n\n for SearchCV, param_search in [\n (GridSearchCV, {'a': [0.1, 0.01]}),\n (RandomizedSearchCV, {'a': uniform(1, 3)})\n ]:\n test_scalar_fit_param(SearchCV, param_search)\n\n @skipIf(not sklearn_is_at_least(\"0.22\"), \"test for sklearn 0.22 and above\")\n def test_scalar_fit_param_compat_wrapper(self):\n\n def test_scalar_fit_param_compat(SearchCV, param_search):\n \"\"\"\n The other test can't be pickled as the _FitParamClassifier is not accessible globally.\n So the code has to be duplicated here rather than imported\n \"\"\"\n X_train, X_valid, y_train, y_valid = train_test_split(\n *make_classification(random_state=42), random_state=42\n )\n\n model = SearchCV(\n _FitParamClassifier(), param_search\n )\n\n fit_params = {\n 'tuple_of_arrays': (X_valid, y_valid),\n 'callable_param': _fit_param_callable,\n 'scalar_param': 42,\n }\n model.fit(X_train, y_train, **fit_params)\n\n for SearchCV, param_search in [\n (GridSearchCV, {'alpha': [0.1, 0.01]}),\n (RandomizedSearchCV, {'alpha': uniform(0.01, 0.1)})\n ]:\n test_scalar_fit_param_compat(SearchCV, param_search)\n"
] | [
[
"sklearn.__version__.startswith"
],
[
"sklearn.datasets.make_classification",
"sklearn.model_selection.tests.test_search.test_validate_parameter_input",
"sklearn.model_selection.tests.test_search.test_SearchCV_with_fit_params",
"sklearn.model_selection.tests.test_search.test_search_default_iid",
"sklearn.model_selection.ParameterGrid",
"scipy.stats.uniform",
"sklearn.svm.LinearSVC",
"sklearn.model_selection.tests.test_search.test_scalar_fit_param"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SIOS-Svalbard/darwinsheet | [
"7ac85861156ca195c8a3563df0f08a141d805384"
] | [
"scripts/get_niskin_data.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 21 08:24:22 2021\n\n@author: lukem\n\"\"\"\n\nimport pandas as pd\nimport os\nimport sys\nimport re\nimport uuid\nimport requests\n\nimport os.path\naen_config_dir = (os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..')))\n\nsys.path.append(aen_config_dir+'/scripts')\nimport toktlogger_json_to_df as tl\n\nbtl_files_folder = '/home/pal/kph-ctd/' \n\ncolumns = [\n 'eventID',\n 'parentEventID',\n 'bottleNumber',\n 'sampleDepthInMeters',\n 'eventDate',\n 'eventTime',\n 'decimalLatitude',\n 'decimalLongitude',\n 'bottomDepthInMeters',\n 'sampleType',\n 'gearType',\n 'eventRemarks',\n 'stationName',\n 'statID',\n 'samplingProtocol',\n 'recordedBy',\n 'pi_name',\n 'pi_institution',\n 'pi_email',\n 'sampleLocation',\n 'dataFilename'\n ]\n\ndef get_cruise_number():\n '''\n Getting the cruise number from the toklogger\n\n Returns\n -------\n cruiseNum: Integer of cruise number\n\n '''\n \n toktlogger = 'toktlogger-khaakon.hi.no'\n url = \"http://\"+toktlogger+\"/api/cruises/current?format=json\"\n response = requests.get(url)\n json_cruise = response.json()\n cruisenum = int(json_cruise['cruiseNumber'])\n return cruisenum\n\ndef create_dataframe():\n '''\n Create empty dataframe to append data from each file to\n\n Returns\n -------\n df : pandas dataframe\n '''\n \n df = pd.DataFrame(columns=columns)\n \n return df\n\ndef generate_UUID(id_url):\n '''\n Generates a v5 UUID. This can be repeatedly generated from the same string.\n\n Parameters\n ----------\n id_url : string, text to be used to generate UUID\n\n Returns\n -------\n Version 5 UUID, string\n\n '''\n return str(uuid.uuid5(uuid.NAMESPACE_URL,id_url))\n\ndef pull_columns(df_ctd,ctd_file):\n '''\n Pull columns from .btl file to a pandas dataframe\n '''\n # Creating a new temporary file to read from as .btl file needs cleaning to be understood by Pandas.\n # Note that some columns that I am not interested in are still merged together.\n with open(ctd_file, 'r') as f:\n n = 0 # counter of lines in new temporary file\n try:\n os.remove('/tmp/'+ctd_file)\n except OSError:\n pass\n with open('/tmp/'+ctd_file, 'a') as tmpfile:\n for line in f: # Iterate over lines\n if not line.startswith('*') and not line.startswith('#'): # Ignore header rows\n if 'sdev' not in line and 'Position' not in line:\n line = line.replace('(avg)','') # Removing (avg) from end of line - not a column value\n line = re.sub(r\"^\\s+\", \"\", line) # Removing whitespace at beginning of line\n if n == 0: # For header line only\n line = re.sub(\"\\s+\", \",\", line)\n line = re.sub(\"\\s\\s+\" , \",\", line)\n tmpfile.write(line+'\\n')\n n += 1\n \n data = pd.read_csv('/tmp/'+ctd_file, delimiter=',', usecols=['Bottle', 'PrDM'])\n \n df_ctd['bottleNumber'] = data['Bottle']\n df_ctd['sampleDepthInMeters'] = data['PrDM']\n \n cruisenum = get_cruise_number()\n \n data['eventID'] = ''\n for index, row in data.iterrows():\n id_url = f'File {ctd_file} niskin bottle {row[\"Bottle\"]} cruise {cruisenum}' \n eventID = generate_UUID(id_url)\n df_ctd['eventID'].iloc[index] = eventID\n #df_ctd['sampleDepthInMeters'].iloc[index] = row['sampleDepthInMeters']\n\n return df_ctd\n\n\ndef pull_from_toktlogger():\n '''\n Pull data from toktlogger to a dataframe that can be used to generate attributes consistent for each activity.\n '''\n df_tl = tl.json_to_df('toktlogger-khaakon.hi.no')\n \n return df_tl\n\n\ndef pull_global_attributes(df_ctd,ctd_file, df_tl):\n '''\n Add global attributes that are constant for each individual .btl file (corresponding to one CTD cast)\n\n Parameters\n ----------\n df_ctd : pandas dataframe\n Dataframe to be written to niskin log, for a single CTD deployment \n ctd_file : string\n Name of .btl file\n df_tl : pandas dataframe\n Data from toktlogger\n\n Returns\n -------\n df_ctd : pandas dataframe\n Dataframe to be written to niskin log, for a single CTD deployment \n\n '''\n df_ctd['dataFilename'] = ctd_file\n \n localStationNumber = int(ctd_file.split('.')[0].split('sta')[1])\n #with open(ctd_file, \"rt\") as f:\n # for line in f:\n # print(line)\n \n df_tmp = df_tl.loc[df_tl['statID'] == localStationNumber]\n \n df_ctd['statID'] = localStationNumber\n df_ctd['eventDate'] = df_tmp.loc[df_tmp['gearType'] == 'CTD w/bottles', 'eventDate'].item()\n df_ctd['parentEventID'] = df_tmp.loc[df_tmp['gearType'] == 'CTD w/bottles', 'eventID'].item()\n df_ctd['eventTime'] = df_tmp.loc[df_tmp['gearType'] == 'CTD w/bottles', 'eventTime'].item()\n df_ctd['decimalLatitude'] = df_tmp.loc[df_tmp['gearType'] == 'CTD w/bottles', 'decimalLatitude'].item()\n df_ctd['decimalLongitude'] = df_tmp.loc[df_tmp['gearType'] == 'CTD w/bottles', 'decimalLongitude'].item()\n df_ctd['bottomDepthInMeters'] = df_tmp.loc[df_tmp['gearType'] == 'CTD w/bottles', 'bottomDepthInMeters'].item()\n \n return df_ctd\n\n\ndef get_niskin_data():\n '''\n Read data from Niskin files into a single pandas dataframe\n This dataframe can be used to create a sample log.\n\n Returns\n -------\n df_cruise : pandas dataframe\n Dataframe to be written to niskin log \n\n '''\n df_cruise = create_dataframe()\n df_tl = pull_from_toktlogger()\n \n for ctd_file in sorted(os.listdir(btl_files_folder)):\n if ctd_file.endswith('.btl'):\n df_ctd = create_dataframe()\n df_ctd = pull_columns(df_ctd, ctd_file)\n df_ctd = pull_global_attributes(df_ctd, ctd_file, df_tl)\n df_cruise = df_cruise.append(df_ctd, ignore_index=True)\n \n df_cruise['stationName'] = ''\n df_cruise['sampleLocation'] = 'Water distributed around children of this event'\n df_cruise['eventRemarks'] = ''\n df_cruise['recordedBy'] = ''\n df_cruise['pi_name'] = ''\n df_cruise['pi_institution'] = ''\n df_cruise['pi_email'] = ''\n df_cruise['sampleType'] = 'Niskin Bottle'\n df_cruise['gearType'] = 'Niskin'\n \n return df_cruise\n\n\n#df_cruise = get_niskin_data()\n\n#df_cruise.to_csv('niskin_test.csv')\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
NihalHarish/datasets | [
"67574a8d74796bc065a8b9b49ec02f7b1200c172"
] | [
"src/datasets/utils/py_utils.py"
] | [
"# coding=utf-8\n# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Some python utils function and classes.\n\n\"\"\"\n\nimport contextlib\nimport functools\nimport itertools\nimport os\nimport pickle\nimport sys\nimport types\nfrom io import BytesIO as StringIO\nfrom multiprocessing import Pool, RLock\nfrom shutil import disk_usage\nfrom types import CodeType, FunctionType\nfrom typing import Callable, ClassVar, Generic, Optional, Tuple, Union\n\nimport dill\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom .logging import INFO, WARNING, get_logger, get_verbosity, set_verbosity_warning\n\n\ntry: # pragma: no branch\n import typing_extensions as _typing_extensions\n from typing_extensions import Final, Literal\nexcept ImportError:\n _typing_extensions = Literal = Final = None\n\n\nlogger = get_logger(__name__)\n\n\n# NOTE: When used on an instance method, the cache is shared across all\n# instances and IS NOT per-instance.\n# See\n# https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance\n# For @property methods, use @memoized_property below.\nmemoize = functools.lru_cache\n\n\ndef size_str(size_in_bytes):\n \"\"\"Returns a human readable size string.\n\n If size_in_bytes is None, then returns \"Unknown size\".\n\n For example `size_str(1.5 * datasets.units.GiB) == \"1.50 GiB\"`.\n\n Args:\n size_in_bytes: `int` or `None`, the size, in bytes, that we want to\n format as a human-readable size string.\n \"\"\"\n if not size_in_bytes:\n return \"Unknown size\"\n\n _NAME_LIST = [(\"PiB\", 2 ** 50), (\"TiB\", 2 ** 40), (\"GiB\", 2 ** 30), (\"MiB\", 2 ** 20), (\"KiB\", 2 ** 10)]\n\n size_in_bytes = float(size_in_bytes)\n for (name, size_bytes) in _NAME_LIST:\n value = size_in_bytes / size_bytes\n if value >= 1.0:\n return \"{:.2f} {}\".format(value, name)\n return \"{} {}\".format(int(size_in_bytes), \"bytes\")\n\n\[email protected]\ndef temporary_assignment(obj, attr, value):\n \"\"\"Temporarily assign obj.attr to value.\"\"\"\n original = getattr(obj, attr, None)\n setattr(obj, attr, value)\n try:\n yield\n finally:\n setattr(obj, attr, original)\n\n\ndef zip_dict(*dicts):\n \"\"\"Iterate over items of dictionaries grouped by their keys.\"\"\"\n for key in set(itertools.chain(*dicts)): # set merge all keys\n # Will raise KeyError if the dict don't have the same keys\n yield key, tuple(d[key] for d in dicts)\n\n\nclass NonMutableDict(dict):\n \"\"\"Dict where keys can only be added but not modified.\n\n Will raise an error if the user try to overwrite one key. The error message\n can be customized during construction. It will be formatted using {key} for\n the overwritten key.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._error_msg = kwargs.pop(\n \"error_msg\",\n \"Try to overwrite existing key: {key}\",\n )\n if kwargs:\n raise ValueError(\"NonMutableDict cannot be initialized with kwargs.\")\n super(NonMutableDict, self).__init__(*args, **kwargs)\n\n def __setitem__(self, key, value):\n if key in self:\n raise ValueError(self._error_msg.format(key=key))\n return super(NonMutableDict, self).__setitem__(key, value)\n\n def update(self, other):\n if any(k in self for k in other):\n raise ValueError(self._error_msg.format(key=set(self) & set(other)))\n return super(NonMutableDict, self).update(other)\n\n\nclass classproperty(property): # pylint: disable=invalid-name\n \"\"\"Descriptor to be used as decorator for @classmethods.\"\"\"\n\n def __get__(self, obj, objtype=None):\n return self.fget.__get__(None, objtype)()\n\n\ndef _single_map_nested(args):\n \"\"\"Apply a function recursively to each element of a nested data struct.\"\"\"\n function, data_struct, types, rank, disable_tqdm = args\n\n # Singleton first to spare some computation\n if not isinstance(data_struct, dict) and not isinstance(data_struct, types):\n return function(data_struct)\n\n # Reduce logging to keep things readable in multiprocessing with tqdm\n if rank is not None and get_verbosity() < WARNING:\n set_verbosity_warning()\n # Print at least one thing to fix tqdm in notebooks in multiprocessing\n # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308\n if rank is not None and \"notebook\" in tqdm.__name__:\n print(\" \", end=\"\", flush=True)\n\n # Loop over single examples or batches and write to buffer/file if examples are to be updated\n pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct\n pbar_desc = \"#\" + str(rank) if rank is not None else None\n pbar = tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit=\"obj\", desc=pbar_desc)\n\n if isinstance(data_struct, dict):\n return {k: _single_map_nested((function, v, types, None, True)) for k, v in pbar}\n else:\n mapped = [_single_map_nested((function, v, types, None, True)) for v in pbar]\n if isinstance(data_struct, list):\n return mapped\n elif isinstance(data_struct, tuple):\n return tuple(mapped)\n else:\n return np.array(mapped)\n\n\ndef map_nested(\n function,\n data_struct,\n dict_only: bool = False,\n map_list: bool = True,\n map_tuple: bool = False,\n map_numpy: bool = False,\n num_proc: Optional[int] = None,\n types=None,\n):\n \"\"\"Apply a function recursively to each element of a nested data struct.\n If num_proc > 1 and the length of data_struct is longer than num_proc: use multi-processing\n \"\"\"\n if types is None:\n types = []\n if not dict_only:\n if map_list:\n types.append(list)\n if map_tuple:\n types.append(tuple)\n if map_numpy:\n types.append(np.ndarray)\n types = tuple(types)\n\n # Singleton\n if not isinstance(data_struct, dict) and not isinstance(data_struct, types):\n return function(data_struct)\n\n disable_tqdm = bool(logger.getEffectiveLevel() > INFO)\n iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct\n\n if num_proc is None:\n num_proc = 1\n if num_proc <= 1 or len(iterable) <= num_proc:\n mapped = [\n _single_map_nested((function, obj, types, None, True)) for obj in tqdm(iterable, disable=disable_tqdm)\n ]\n else:\n split_kwds = [] # We organize the splits ourselve (contiguous splits)\n for index in range(num_proc):\n div = len(iterable) // num_proc\n mod = len(iterable) % num_proc\n start = div * index + min(index, mod)\n end = start + div + (1 if index < mod else 0)\n split_kwds.append((function, iterable[start:end], types, index, disable_tqdm))\n assert len(iterable) == sum(len(i[1]) for i in split_kwds), (\n f\"Error dividing inputs iterable among processes. \"\n f\"Total number of objects {len(iterable)}, \"\n f\"length: {sum(len(i[1]) for i in split_kwds)}\"\n )\n logger.info(\n \"Spawning {} processes for {} objects in slices of {}\".format(\n num_proc, len(iterable), [len(i[1]) for i in split_kwds]\n )\n )\n with Pool(num_proc, initargs=(RLock(),), initializer=tqdm.set_lock) as pool:\n mapped = pool.map(_single_map_nested, split_kwds)\n logger.info(\"Finished {} processes\".format(num_proc))\n mapped = [obj for proc_res in mapped for obj in proc_res]\n logger.info(\"Unpacked {} objects\".format(len(mapped)))\n\n if isinstance(data_struct, dict):\n return dict(zip(data_struct.keys(), mapped))\n else:\n if isinstance(data_struct, list):\n return mapped\n elif isinstance(data_struct, tuple):\n return tuple(mapped)\n else:\n return np.array(mapped)\n\n\ndef zip_nested(arg0, *args, **kwargs):\n \"\"\"Zip data struct together and return a data struct with the same shape.\"\"\"\n # Python 2 do not support kwargs only arguments\n dict_only = kwargs.pop(\"dict_only\", False)\n assert not kwargs\n\n # Could add support for more exotic data_struct, like OrderedDict\n if isinstance(arg0, dict):\n return {k: zip_nested(*a, dict_only=dict_only) for k, a in zip_dict(arg0, *args)}\n elif not dict_only:\n if isinstance(arg0, list):\n return [zip_nested(*a, dict_only=dict_only) for a in zip(arg0, *args)]\n # Singleton\n return (arg0,) + args\n\n\ndef flatten_nest_dict(d):\n \"\"\"Return the dict with all nested keys flattened joined with '/'.\"\"\"\n # Use NonMutableDict to ensure there is no collision between features keys\n flat_dict = NonMutableDict()\n for k, v in d.items():\n if isinstance(v, dict):\n flat_dict.update({\"{}/{}\".format(k, k2): v2 for k2, v2 in flatten_nest_dict(v).items()})\n else:\n flat_dict[k] = v\n return flat_dict\n\n\nclass NestedDataStructure:\n def __init__(self, data=None):\n self.data = data if data is not None else []\n\n def flatten(self, data=None):\n data = data if data is not None else self.data\n if isinstance(data, dict):\n return self.flatten(list(data.values()))\n elif isinstance(data, (list, tuple)):\n return [flattened for item in data for flattened in self.flatten(item)]\n else:\n return [data]\n\n\ndef has_sufficient_disk_space(needed_bytes, directory=\".\"):\n try:\n free_bytes = disk_usage(os.path.abspath(directory)).free\n except OSError:\n return True\n return needed_bytes < free_bytes\n\n\nclass Pickler(dill.Pickler):\n \"\"\"Same Pickler as the one from dill, but improved for notebooks and shells\"\"\"\n\n dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy())\n\n def save_global(self, obj, name=None):\n if sys.version_info[:2] < (3, 7) and _CloudPickleTypeHintFix._is_parametrized_type_hint(\n obj\n ): # noqa # pragma: no branch\n # Parametrized typing constructs in Python < 3.7 are not compatible\n # with type checks and ``isinstance`` semantics. For this reason,\n # it is easier to detect them using a duck-typing-based check\n # (``_is_parametrized_type_hint``) than to populate the Pickler's\n # dispatch with type-specific savers.\n _CloudPickleTypeHintFix._save_parametrized_type_hint(self, obj)\n else:\n dill.Pickler.save_global(self, obj, name=name)\n\n\ndef dump(obj, file):\n \"\"\"pickle an object to a file\"\"\"\n Pickler(file, recurse=True).dump(obj)\n return\n\n\[email protected]\ndef _no_cache_fields(obj):\n try:\n import transformers as tr\n\n if (\n hasattr(tr, \"PreTrainedTokenizerBase\")\n and isinstance(obj, tr.PreTrainedTokenizerBase)\n and hasattr(obj, \"cache\")\n and isinstance(obj.cache, dict)\n ):\n with temporary_assignment(obj, \"cache\", {}):\n yield\n else:\n yield\n\n except ImportError:\n yield\n\n\ndef dumps(obj):\n \"\"\"pickle an object to a string\"\"\"\n file = StringIO()\n with _no_cache_fields(obj):\n dump(obj, file)\n return file.getvalue()\n\n\ndef pklregister(t):\n def proxy(func):\n Pickler.dispatch[t] = func\n return func\n\n return proxy\n\n\nclass _CloudPickleTypeHintFix:\n \"\"\"\n Type hints can't be properly pickled in python < 3.7\n CloudPickle provided a way to make it work in older versions.\n This class provide utilities to fix pickling of type hints in older versions.\n from https://github.com/cloudpipe/cloudpickle/pull/318/files\n \"\"\"\n\n def _is_parametrized_type_hint(obj):\n # This is very cheap but might generate false positives.\n origin = getattr(obj, \"__origin__\", None) # typing Constructs\n values = getattr(obj, \"__values__\", None) # typing_extensions.Literal\n type_ = getattr(obj, \"__type__\", None) # typing_extensions.Final\n return origin is not None or values is not None or type_ is not None\n\n def _create_parametrized_type_hint(origin, args):\n return origin[args]\n\n def _save_parametrized_type_hint(pickler, obj):\n # The distorted type check sematic for typing construct becomes:\n # ``type(obj) is type(TypeHint)``, which means \"obj is a\n # parametrized TypeHint\"\n if type(obj) is type(Literal): # pragma: no branch\n initargs = (Literal, obj.__values__)\n elif type(obj) is type(Final): # pragma: no branch\n initargs = (Final, obj.__type__)\n elif type(obj) is type(ClassVar):\n initargs = (ClassVar, obj.__type__)\n elif type(obj) in [type(Union), type(Tuple), type(Generic)]:\n initargs = (obj.__origin__, obj.__args__)\n elif type(obj) is type(Callable):\n args = obj.__args__\n if args[0] is Ellipsis:\n initargs = (obj.__origin__, args)\n else:\n initargs = (obj.__origin__, (list(args[:-1]), args[-1]))\n else: # pragma: no cover\n raise pickle.PicklingError(\"Datasets pickle Error: Unknown type {}\".format(type(obj)))\n pickler.save_reduce(_CloudPickleTypeHintFix._create_parametrized_type_hint, initargs, obj=obj)\n\n\n@pklregister(CodeType)\ndef _save_code(pickler, obj):\n \"\"\"\n From dill._dill.save_code\n This is a modified version that removes the origin (filename + line no.)\n of functions created in notebooks or shells for example.\n \"\"\"\n dill._dill.log.info(\"Co: %s\" % obj)\n # The filename of a function is the .py file where it is defined.\n # Filenames of functions created in notebooks or shells start with '<'\n # ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell\n # Moreover lambda functions have a special name: '<lambda>'\n # ex: (lambda x: x).__code__.co_name == \"<lambda>\" # True\n # For the hashing mechanism we ignore where the function has been defined\n # More specifically:\n # - we ignore the filename of special functions (filename starts with '<')\n # - we always ignore the line number\n # Only those two lines are different from the original implementation:\n co_filename = \"\" if obj.co_filename.startswith(\"<\") or obj.co_name == \"<lambda>\" else obj.co_filename\n co_firstlineno = 1\n # The rest is the same as in the original dill implementation\n if dill._dill.PY3:\n if hasattr(obj, \"co_posonlyargcount\"):\n args = (\n obj.co_argcount,\n obj.co_posonlyargcount,\n obj.co_kwonlyargcount,\n obj.co_nlocals,\n obj.co_stacksize,\n obj.co_flags,\n obj.co_code,\n obj.co_consts,\n obj.co_names,\n obj.co_varnames,\n co_filename,\n obj.co_name,\n co_firstlineno,\n obj.co_lnotab,\n obj.co_freevars,\n obj.co_cellvars,\n )\n else:\n args = (\n obj.co_argcount,\n obj.co_kwonlyargcount,\n obj.co_nlocals,\n obj.co_stacksize,\n obj.co_flags,\n obj.co_code,\n obj.co_consts,\n obj.co_names,\n obj.co_varnames,\n co_filename,\n obj.co_name,\n co_firstlineno,\n obj.co_lnotab,\n obj.co_freevars,\n obj.co_cellvars,\n )\n else:\n args = (\n obj.co_argcount,\n obj.co_nlocals,\n obj.co_stacksize,\n obj.co_flags,\n obj.co_code,\n obj.co_consts,\n obj.co_names,\n obj.co_varnames,\n co_filename,\n obj.co_name,\n co_firstlineno,\n obj.co_lnotab,\n obj.co_freevars,\n obj.co_cellvars,\n )\n pickler.save_reduce(CodeType, args, obj=obj)\n dill._dill.log.info(\"# Co\")\n return\n\n\n@pklregister(FunctionType)\ndef save_function(pickler, obj):\n \"\"\"\n From dill._dill.save_function\n This is a modified version that make globs deterministic since the order of\n the keys in the output dictionary of globalvars can change.\n \"\"\"\n if not dill._dill._locate_function(obj):\n dill._dill.log.info(\"F1: %s\" % obj)\n if getattr(pickler, \"_recurse\", False):\n # recurse to get all globals referred to by obj\n globalvars = dill.detect.globalvars\n globs = globalvars(obj, recurse=True, builtin=True)\n if id(obj) in dill._dill.stack:\n globs = obj.__globals__ if dill._dill.PY3 else obj.func_globals\n else:\n globs = obj.__globals__ if dill._dill.PY3 else obj.func_globals\n # globs is a dictionary with keys = var names (str) and values = python objects\n # however the dictionary is not always loaded in the same order\n # therefore we have to sort the keys to make deterministic.\n # This is important to make `dump` deterministic.\n # Only this line is different from the original implementation:\n globs = {k: globs[k] for k in sorted(globs.keys())}\n # The rest is the same as in the original dill implementation\n _byref = getattr(pickler, \"_byref\", None)\n _recurse = getattr(pickler, \"_recurse\", None)\n _memo = (id(obj) in dill._dill.stack) and (_recurse is not None)\n dill._dill.stack[id(obj)] = len(dill._dill.stack), obj\n if dill._dill.PY3:\n _super = (\"super\" in getattr(obj.__code__, \"co_names\", ())) and (_byref is not None)\n if _super:\n pickler._byref = True\n if _memo:\n pickler._recurse = False\n fkwdefaults = getattr(obj, \"__kwdefaults__\", None)\n pickler.save_reduce(\n dill._dill._create_function,\n (obj.__code__, globs, obj.__name__, obj.__defaults__, obj.__closure__, obj.__dict__, fkwdefaults),\n obj=obj,\n )\n else:\n _super = (\n (\"super\" in getattr(obj.func_code, \"co_names\", ()))\n and (_byref is not None)\n and getattr(pickler, \"_recurse\", False)\n )\n if _super:\n pickler._byref = True\n if _memo:\n pickler._recurse = False\n pickler.save_reduce(\n dill._dill._create_function,\n (obj.func_code, globs, obj.func_name, obj.func_defaults, obj.func_closure, obj.__dict__),\n obj=obj,\n )\n if _super:\n pickler._byref = _byref\n if _memo:\n pickler._recurse = _recurse\n if (\n dill._dill.OLDER\n and not _byref\n and (_super or (not _super and _memo) or (not _super and not _memo and _recurse))\n ):\n pickler.clear_memo()\n dill._dill.log.info(\"# F1\")\n else:\n dill._dill.log.info(\"F2: %s\" % obj)\n name = getattr(obj, \"__qualname__\", getattr(obj, \"__name__\", None))\n dill._dill.StockPickler.save_global(pickler, obj, name=name)\n dill._dill.log.info(\"# F2\")\n return\n\n\ndef copyfunc(func):\n result = types.FunctionType(func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__)\n result.__kwdefaults__ = func.__kwdefaults__\n return result\n\n\ntry:\n import regex\n\n @pklregister(type(regex.Regex(\"\", 0)))\n def _save_regex(pickler, obj):\n dill._dill.log.info(\"Re: %s\" % obj)\n args = (\n obj.pattern,\n obj.flags,\n )\n pickler.save_reduce(regex.compile, args, obj=obj)\n dill._dill.log.info(\"# Re\")\n return\n\n\nexcept ImportError:\n pass\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhaogev5/BBAVectors-Oriented-Object-Detection | [
"b9e86404082761dd49a652670898f6d3a98c30aa"
] | [
"DOTA_devkit/DOTA.py"
] | [
"#The code is used for visulization, inspired from cocoapi\n# Licensed under the Simplified BSD License [see bsd.txt]\n\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Polygon, Circle\nimport numpy as np\nimport dota_utils as util\nfrom collections import defaultdict\nimport cv2\n\ndef _isArrayLike(obj):\n if type(obj) == str:\n return False\n return hasattr(obj, '__iter__') and hasattr(obj, '__len__')\n\nclass DOTA:\n def __init__(self, basepath):\n self.basepath = basepath\n self.labelpath = os.path.join(basepath, 'labelTxt')\n self.imagepath = os.path.join(basepath, 'images')\n self.imgpaths = util.GetFileFromThisRootDir(self.labelpath)\n self.imglist = [util.custombasename(x) for x in self.imgpaths]\n self.catToImgs = defaultdict(list)\n self.ImgToAnns = defaultdict(list)\n self.createIndex()\n\n def createIndex(self):\n for filename in self.imgpaths:\n objects = util.parse_dota_poly(filename)\n imgid = util.custombasename(filename)\n self.ImgToAnns[imgid] = objects\n for obj in objects:\n cat = obj['name']\n self.catToImgs[cat].append(imgid)\n\n def getImgIds(self, catNms=[]):\n \"\"\"\n :param catNms: category names\n :return: all the image ids contain the categories\n \"\"\"\n catNms = catNms if _isArrayLike(catNms) else [catNms]\n if len(catNms) == 0:\n return self.imglist\n else:\n imgids = []\n for i, cat in enumerate(catNms):\n if i == 0:\n imgids = set(self.catToImgs[cat])\n else:\n imgids &= set(self.catToImgs[cat])\n return list(imgids)\n\n def loadAnns(self, catNms=[], imgId = None, difficult=None):\n \"\"\"\n :param catNms: category names\n :param imgId: the img to load anns\n :return: objects\n \"\"\"\n catNms = catNms if _isArrayLike(catNms) else [catNms]\n objects = self.ImgToAnns[imgId]\n if len(catNms) == 0:\n return objects\n outobjects = [obj for obj in objects if (obj['name'] in catNms)]\n return outobjects\n def showAnns(self, objects, imgId, range, out_dir):\n \"\"\"\n :param catNms: category names\n :param objects: objects to show\n :param imgId: img to show\n :param range: display range in the img\n :return:\n \"\"\"\n plt.cla()\n img = self.loadImgs(imgId)[0]\n ypixels, xpixels, bands = img.shape\n dpi = 72.\n xinch = xpixels / dpi\n yinch = ypixels / dpi\n plt.figure(figsize=(xinch,yinch))\n plt.imshow(img)\n plt.axis('off')\n\n ax = plt.gca()\n ax.set_autoscale_on(False)\n polygons = []\n color = []\n circles = []\n r = 5\n for obj in objects:\n c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]\n poly = obj['poly']\n polygons.append(Polygon(poly))\n color.append(c)\n point = poly[0]\n circle = Circle((point[0], point[1]), r)\n circles.append(circle)\n p = PatchCollection(polygons, facecolors=color, linewidths=0, alpha=0.4)\n ax.add_collection(p)\n p = PatchCollection(polygons, facecolors='none', edgecolors=color, linewidths=2)\n ax.add_collection(p)\n p = PatchCollection(circles, facecolors='red')\n ax.add_collection(p)\n out_path = os.path.join(out_dir,imgId+'.jpg')\n plt.savefig(out_path,dpi=dpi,transparent=True)\n def loadImgs(self, imgids=[]):\n \"\"\"\n :param imgids: integer ids specifying img\n :return: loaded img objects\n \"\"\"\n print('isarralike:', _isArrayLike(imgids))\n imgids = imgids if _isArrayLike(imgids) else [imgids]\n print('imgids:', imgids)\n imgs = []\n for imgid in imgids:\n filename = os.path.join(self.imagepath, imgid + '.jpg')\n print('filename:', filename)\n img = cv2.imread(filename)\n imgs.append(img)\n return imgs\n\n# if __name__ == '__main__':\n# examplesplit = DOTA('data/tianzhi_demo_data')\n# imgids = examplesplit.getImgIds(catNms=['obj'])\n# img = examplesplit.loadImgs(imgids)\n# for imgid in imgids:\n# anns = examplesplit.loadAnns(imgId=imgid)\n# examplesplit.showAnns(anns, imgid, 2)"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.imshow",
"matplotlib.collections.PatchCollection",
"numpy.random.random",
"matplotlib.pyplot.cla",
"matplotlib.patches.Circle",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axis",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mojones/pandas | [
"3d4f9dc19d784526f71a197bfb6e36b0409e0760",
"3d4f9dc19d784526f71a197bfb6e36b0409e0760"
] | [
"pandas/core/array_algos/transforms.py",
"pandas/tests/resample/test_resampler_grouper.py"
] | [
"\"\"\"\ntransforms.py is for shape-preserving functions.\n\"\"\"\n\nimport numpy as np\n\nfrom pandas.core.dtypes.common import ensure_platform_int\n\n\ndef shift(values: np.ndarray, periods: int, axis: int, fill_value) -> np.ndarray:\n new_values = values\n\n # make sure array sent to np.roll is c_contiguous\n f_ordered = values.flags.f_contiguous\n if f_ordered:\n new_values = new_values.T\n axis = new_values.ndim - axis - 1\n\n if np.prod(new_values.shape):\n new_values = np.roll(new_values, ensure_platform_int(periods), axis=axis)\n\n axis_indexer = [slice(None)] * values.ndim\n if periods > 0:\n axis_indexer[axis] = slice(None, periods)\n else:\n axis_indexer[axis] = slice(periods, None)\n new_values[tuple(axis_indexer)] = fill_value\n\n # restore original order\n if f_ordered:\n new_values = new_values.T\n\n return new_values\n",
"from textwrap import dedent\n\nimport numpy as np\n\nfrom pandas.util._test_decorators import async_mark\n\nimport pandas as pd\nfrom pandas import DataFrame, Series, Timestamp\nimport pandas._testing as tm\nfrom pandas.core.indexes.datetimes import date_range\n\ntest_frame = DataFrame(\n {\"A\": [1] * 20 + [2] * 12 + [3] * 8, \"B\": np.arange(40)},\n index=date_range(\"1/1/2000\", freq=\"s\", periods=40),\n)\n\n\n@async_mark()\nasync def test_tab_complete_ipython6_warning(ip):\n from IPython.core.completer import provisionalcompleter\n\n code = dedent(\n \"\"\"\\\n import pandas._testing as tm\n s = tm.makeTimeSeries()\n rs = s.resample(\"D\")\n \"\"\"\n )\n await ip.run_code(code)\n\n # TODO: remove it when Ipython updates\n # GH 33567, jedi version raises Deprecation warning in Ipython\n import jedi\n\n if jedi.__version__ < \"0.17.0\":\n warning = tm.assert_produces_warning(None)\n else:\n warning = tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False)\n with warning:\n with provisionalcompleter(\"ignore\"):\n list(ip.Completer.completions(\"rs.\", 1))\n\n\ndef test_deferred_with_groupby():\n\n # GH 12486\n # support deferred resample ops with groupby\n data = [\n [\"2010-01-01\", \"A\", 2],\n [\"2010-01-02\", \"A\", 3],\n [\"2010-01-05\", \"A\", 8],\n [\"2010-01-10\", \"A\", 7],\n [\"2010-01-13\", \"A\", 3],\n [\"2010-01-01\", \"B\", 5],\n [\"2010-01-03\", \"B\", 2],\n [\"2010-01-04\", \"B\", 1],\n [\"2010-01-11\", \"B\", 7],\n [\"2010-01-14\", \"B\", 3],\n ]\n\n df = DataFrame(data, columns=[\"date\", \"id\", \"score\"])\n df.date = pd.to_datetime(df.date)\n\n def f(x):\n return x.set_index(\"date\").resample(\"D\").asfreq()\n\n expected = df.groupby(\"id\").apply(f)\n result = df.set_index(\"date\").groupby(\"id\").resample(\"D\").asfreq()\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame(\n {\n \"date\": pd.date_range(start=\"2016-01-01\", periods=4, freq=\"W\"),\n \"group\": [1, 1, 2, 2],\n \"val\": [5, 6, 7, 8],\n }\n ).set_index(\"date\")\n\n def f(x):\n return x.resample(\"1D\").ffill()\n\n expected = df.groupby(\"group\").apply(f)\n result = df.groupby(\"group\").resample(\"1D\").ffill()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_getitem():\n g = test_frame.groupby(\"A\")\n\n expected = g.B.apply(lambda x: x.resample(\"2s\").mean())\n\n result = g.resample(\"2s\").B.mean()\n tm.assert_series_equal(result, expected)\n\n result = g.B.resample(\"2s\").mean()\n tm.assert_series_equal(result, expected)\n\n result = g.resample(\"2s\").mean().B\n tm.assert_series_equal(result, expected)\n\n\ndef test_getitem_multiple():\n\n # GH 13174\n # multiple calls after selection causing an issue with aliasing\n data = [{\"id\": 1, \"buyer\": \"A\"}, {\"id\": 2, \"buyer\": \"B\"}]\n df = DataFrame(data, index=pd.date_range(\"2016-01-01\", periods=2))\n r = df.groupby(\"id\").resample(\"1D\")\n result = r[\"buyer\"].count()\n expected = Series(\n [1, 1],\n index=pd.MultiIndex.from_tuples(\n [(1, Timestamp(\"2016-01-01\")), (2, Timestamp(\"2016-01-02\"))],\n names=[\"id\", None],\n ),\n name=\"buyer\",\n )\n tm.assert_series_equal(result, expected)\n\n result = r[\"buyer\"].count()\n tm.assert_series_equal(result, expected)\n\n\ndef test_groupby_resample_on_api_with_getitem():\n # GH 17813\n df = pd.DataFrame(\n {\"id\": list(\"aabbb\"), \"date\": pd.date_range(\"1-1-2016\", periods=5), \"data\": 1}\n )\n exp = df.set_index(\"date\").groupby(\"id\").resample(\"2D\")[\"data\"].sum()\n result = df.groupby(\"id\").resample(\"2D\", on=\"date\")[\"data\"].sum()\n tm.assert_series_equal(result, exp)\n\n\ndef test_nearest():\n\n # GH 17496\n # Resample nearest\n index = pd.date_range(\"1/1/2000\", periods=3, freq=\"T\")\n result = Series(range(3), index=index).resample(\"20s\").nearest()\n\n expected = Series(\n [0, 0, 1, 1, 1, 2, 2],\n index=pd.DatetimeIndex(\n [\n \"2000-01-01 00:00:00\",\n \"2000-01-01 00:00:20\",\n \"2000-01-01 00:00:40\",\n \"2000-01-01 00:01:00\",\n \"2000-01-01 00:01:20\",\n \"2000-01-01 00:01:40\",\n \"2000-01-01 00:02:00\",\n ],\n dtype=\"datetime64[ns]\",\n freq=\"20S\",\n ),\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_methods():\n g = test_frame.groupby(\"A\")\n r = g.resample(\"2s\")\n\n for f in [\"first\", \"last\", \"median\", \"sem\", \"sum\", \"mean\", \"min\", \"max\"]:\n result = getattr(r, f)()\n expected = g.apply(lambda x: getattr(x.resample(\"2s\"), f)())\n tm.assert_frame_equal(result, expected)\n\n for f in [\"size\"]:\n result = getattr(r, f)()\n expected = g.apply(lambda x: getattr(x.resample(\"2s\"), f)())\n tm.assert_series_equal(result, expected)\n\n for f in [\"count\"]:\n result = getattr(r, f)()\n expected = g.apply(lambda x: getattr(x.resample(\"2s\"), f)())\n tm.assert_frame_equal(result, expected)\n\n # series only\n for f in [\"nunique\"]:\n result = getattr(r.B, f)()\n expected = g.B.apply(lambda x: getattr(x.resample(\"2s\"), f)())\n tm.assert_series_equal(result, expected)\n\n for f in [\"nearest\", \"backfill\", \"ffill\", \"asfreq\"]:\n result = getattr(r, f)()\n expected = g.apply(lambda x: getattr(x.resample(\"2s\"), f)())\n tm.assert_frame_equal(result, expected)\n\n result = r.ohlc()\n expected = g.apply(lambda x: x.resample(\"2s\").ohlc())\n tm.assert_frame_equal(result, expected)\n\n for f in [\"std\", \"var\"]:\n result = getattr(r, f)(ddof=1)\n expected = g.apply(lambda x: getattr(x.resample(\"2s\"), f)(ddof=1))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply():\n\n g = test_frame.groupby(\"A\")\n r = g.resample(\"2s\")\n\n # reduction\n expected = g.resample(\"2s\").sum()\n\n def f(x):\n return x.resample(\"2s\").sum()\n\n result = r.apply(f)\n tm.assert_frame_equal(result, expected)\n\n def f(x):\n return x.resample(\"2s\").apply(lambda y: y.sum())\n\n result = g.apply(f)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_apply_with_mutated_index():\n # GH 15169\n index = pd.date_range(\"1-1-2015\", \"12-31-15\", freq=\"D\")\n df = DataFrame(data={\"col1\": np.random.rand(len(index))}, index=index)\n\n def f(x):\n s = Series([1, 2], index=[\"a\", \"b\"])\n return s\n\n expected = df.groupby(pd.Grouper(freq=\"M\")).apply(f)\n\n result = df.resample(\"M\").apply(f)\n tm.assert_frame_equal(result, expected)\n\n # A case for series\n expected = df[\"col1\"].groupby(pd.Grouper(freq=\"M\")).apply(f)\n result = df[\"col1\"].resample(\"M\").apply(f)\n tm.assert_series_equal(result, expected)\n\n\ndef test_apply_columns_multilevel():\n # GH 16231\n cols = pd.MultiIndex.from_tuples([(\"A\", \"a\", \"\", \"one\"), (\"B\", \"b\", \"i\", \"two\")])\n ind = date_range(start=\"2017-01-01\", freq=\"15Min\", periods=8)\n df = DataFrame(np.array([0] * 16).reshape(8, 2), index=ind, columns=cols)\n agg_dict = {col: (np.sum if col[3] == \"one\" else np.mean) for col in df.columns}\n result = df.resample(\"H\").apply(lambda x: agg_dict[x.name](x))\n expected = DataFrame(\n np.array([0] * 4).reshape(2, 2),\n index=date_range(start=\"2017-01-01\", freq=\"1H\", periods=2),\n columns=pd.MultiIndex.from_tuples(\n [(\"A\", \"a\", \"\", \"one\"), (\"B\", \"b\", \"i\", \"two\")]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_resample_groupby_with_label():\n # GH 13235\n index = date_range(\"2000-01-01\", freq=\"2D\", periods=5)\n df = DataFrame(index=index, data={\"col0\": [0, 0, 1, 1, 2], \"col1\": [1, 1, 1, 1, 1]})\n result = df.groupby(\"col0\").resample(\"1W\", label=\"left\").sum()\n\n mi = [\n np.array([0, 0, 1, 2]),\n pd.to_datetime(\n np.array([\"1999-12-26\", \"2000-01-02\", \"2000-01-02\", \"2000-01-02\"])\n ),\n ]\n mindex = pd.MultiIndex.from_arrays(mi, names=[\"col0\", None])\n expected = DataFrame(\n data={\"col0\": [0, 0, 2, 2], \"col1\": [1, 1, 2, 1]}, index=mindex\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_consistency_with_window():\n\n # consistent return values with window\n df = test_frame\n expected = pd.Int64Index([1, 2, 3], name=\"A\")\n result = df.groupby(\"A\").resample(\"2s\").mean()\n assert result.index.nlevels == 2\n tm.assert_index_equal(result.index.levels[0], expected)\n\n result = df.groupby(\"A\").rolling(20).mean()\n assert result.index.nlevels == 2\n tm.assert_index_equal(result.index.levels[0], expected)\n\n\ndef test_median_duplicate_columns():\n # GH 14233\n\n df = DataFrame(\n np.random.randn(20, 3),\n columns=list(\"aaa\"),\n index=pd.date_range(\"2012-01-01\", periods=20, freq=\"s\"),\n )\n df2 = df.copy()\n df2.columns = [\"a\", \"b\", \"c\"]\n expected = df2.resample(\"5s\").median()\n result = df.resample(\"5s\").median()\n expected.columns = result.columns\n tm.assert_frame_equal(result, expected)\n"
] | [
[
"pandas.core.dtypes.common.ensure_platform_int",
"numpy.prod"
],
[
"pandas.to_datetime",
"pandas.Series",
"pandas.util._test_decorators.async_mark",
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"pandas.core.indexes.datetimes.date_range",
"numpy.random.randn",
"pandas._testing.assert_frame_equal",
"numpy.arange",
"pandas.DatetimeIndex",
"pandas.Int64Index",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_index_equal",
"pandas._testing.assert_produces_warning",
"pandas.date_range",
"numpy.array",
"pandas.Grouper",
"pandas.MultiIndex.from_arrays",
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sandialabs/Spitfire | [
"65670e3ba5d1ccb4ac72524b77957706345c5bf6",
"65670e3ba5d1ccb4ac72524b77957706345c5bf6"
] | [
"tests/tabulation/adiabatic_slfm/rebless.py",
"docs/source/demo/flamelet2d/igniter.py"
] | [
"from os.path import abspath, join\n\n\ndef run():\n from spitfire.chemistry.mechanism import ChemicalMechanismSpec\n from spitfire.chemistry.tabulation import build_adiabatic_slfm_library\n import spitfire.chemistry.analysis as sca\n import numpy as np\n\n test_xml = abspath(join('tests', 'test_mechanisms', 'h2-burke.xml'))\n m = ChemicalMechanismSpec(cantera_xml=test_xml, group_name='h2-burke')\n pressure = 101325.\n air = m.stream(stp_air=True)\n air.TP = 1200., pressure\n fuel = m.stream('TPY', (300., pressure, 'H2:1'))\n\n flamelet_specs = {'mech_spec': m, 'oxy_stream': air, 'fuel_stream': fuel, 'grid_points': 34}\n\n l = build_adiabatic_slfm_library(flamelet_specs, verbose=False, diss_rate_values=np.logspace(0, 1, 8), diss_rate_log_scaled=True)\n l = sca.compute_specific_enthalpy(m, l)\n l = sca.compute_isochoric_specific_heat(m, l)\n l = sca.compute_isobaric_specific_heat(m, l)\n l = sca.compute_density(m, l)\n l = sca.compute_pressure(m, l)\n l = sca.compute_viscosity(m, l)\n\n return l\n\n\nif __name__ == '__main__':\n gold_pkl = abspath(join('tests', 'tabulation', 'adiabatic_slfm', 'gold.pkl'))\n output_library = run()\n output_library.save_to_file(gold_pkl)\n",
"from spitfire.chemistry.flamelet2d import _Flamelet2D\nfrom spitfire.chemistry.flamelet import Flamelet\nfrom spitfire.chemistry.mechanism import ChemicalMechanismSpec\nfrom spitfire.time.integrator import Governor, NumberOfTimeSteps, FinalTime, Steady, SaveAllDataToList\nfrom spitfire.time.methods import AdaptiveERK54CashKarp, ESDIRK64, BackwardEulerWithError\nfrom spitfire.time.nonlinear import SimpleNewtonSolver\nfrom spitfire.time.stepcontrol import PIController\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import Normalize\n\nm = ChemicalMechanismSpec(cantera_xml='coh2-hawkes.xml', group_name='coh2-hawkes')\nprint(m.n_species, m.n_reactions)\n\npressure = 101325.\n\nair = m.stream(stp_air=True)\nair.TP = 300., pressure\nsynthesis_gas = m.stream('X', 'H2:1, CO:1')\nexhaust_gas = m.stream('X', 'CO2:1, H2O:1, CO:0.5, H2:0.001')\n\nfuel1 = m.copy_stream(synthesis_gas)\nfuel1.TP = 1000., pressure\nfuel2 = m.copy_stream(exhaust_gas)\nfuel2.TP = 400., pressure\n\nfuel1_name = 'SG'\nfuel2_name = 'EG'\n\nx_cp = m.stoich_mixture_fraction(fuel2, air)\ny_cp = m.stoich_mixture_fraction(fuel1, air)\nx_cp = x_cp if x_cp < 0.5 else 1. - x_cp\ny_cp = y_cp if y_cp < 0.5 else 1. - y_cp\n\nx_cc = 6.\ny_cc = 6.\n\n\ndef make_clustered_grid(nx, ny, x_cp, y_cp, x_cc, y_cc):\n x_half1 = Flamelet._clustered_grid(nx // 2, x_cp * 2., x_cc)[0] * 0.5\n y_half1 = Flamelet._clustered_grid(nx // 2, y_cp * 2., y_cc)[0] * 0.5\n x_half2 = (0.5 - x_half1)[::-1]\n y_half2 = (0.5 - y_half1)[::-1]\n x_range = np.hstack((x_half1, 0.5 + x_half2))\n y_range = np.hstack((y_half1, 0.5 + y_half2))\n dx_mid = x_range[nx // 2 - 1] - x_range[nx // 2 - 2]\n x_range[nx // 2 - 1] -= dx_mid / 3.\n x_range[nx // 2 + 0] += dx_mid / 3.\n dy_mid = y_range[ny // 2 - 1] - y_range[ny // 2 - 2]\n y_range[ny // 2 - 1] -= dy_mid / 3.\n y_range[ny // 2 + 0] += dy_mid / 3.\n return x_range, y_range\n\n\nnx = 32\nny = nx\nx_range, y_range = make_clustered_grid(nx, ny, x_cp, y_cp, x_cc, y_cc)\nx_grid, y_grid = np.meshgrid(x_range, y_range)\n\nchi11_max = 1.\nchi22_max = 1.\n\nf = _Flamelet2D(m, 'unreacted', pressure, air, fuel1, fuel2, chi11_max, chi22_max, grid_1=x_range, grid_2=y_range)\nnq = f._n_equations\n\nphi0 = np.copy(f._initial_state)\n\n\ndef plot_contours(phi, variable, i):\n fig = plt.figure()\n iq = 0 if variable == 'T' else m.species_index(variable) + 1\n phi2d = phi[iq::nq].reshape((ny, nx), order='F')\n phi02d = phi0[iq::nq].reshape((ny, nx), order='F')\n ax = plt.subplot2grid((3, 4), (2, 1), rowspan=1, colspan=2)\n ax.cla()\n ax.plot(x_range, phi02d[0, :], 'b--', label='EQ')\n ax.plot(x_range, phi2d[0, :], 'g-', label='SLFM')\n Tmin = 200.\n Tmax = int(np.max(phi) // 100 + 1) * 100\n if variable == 'T':\n ax.set_ylim([Tmin, Tmax])\n ax.yaxis.tick_right()\n ax.set_xlabel('$Z_1$')\n ax.set_xlim([0, 1])\n ax.grid(True)\n # ax.legend(loc='best')\n ax.legend(loc='center left', bbox_to_anchor=(-0.4, 0.5), ncol=1, borderaxespad=0, frameon=False)\n ax = plt.subplot2grid((3, 4), (0, 0), rowspan=2, colspan=1)\n ax.cla()\n ax.plot(phi02d[:, 0], y_range, 'b--', label='EQ')\n ax.plot(phi2d[:, 0], y_range, 'g-', label='SLFM')\n if variable == 'T':\n ax.set_xlim([Tmin, Tmax])\n ax.set_ylabel('$Z_2$')\n ax.set_ylim([0, 1])\n ax.grid(True)\n # ax.legend(loc='best')\n ax = plt.subplot2grid((3, 4), (0, 1), rowspan=2, colspan=2)\n cax = plt.subplot2grid((3, 4), (0, 3), rowspan=2, colspan=1)\n ax.cla()\n if variable == 'T':\n contour = ax.contourf(x_grid, y_grid, phi2d, cmap=plt.get_cmap('magma'),\n norm=Normalize(Tmin, Tmax), levels=np.linspace(Tmin, Tmax, 20))\n else:\n contour = ax.contourf(x_grid, y_grid, phi2d, cmap=plt.get_cmap('magma'),\n levels=np.linspace(np.min(phi2d), np.max(phi2d), 20))\n plt.colorbar(contour, cax=cax)\n ax.plot([0, 0, 1, 0], [0, 1, 0, 0], 'k-', linewidth=0.5, zorder=4)\n # ax.contour(x_grid, y_grid, phi2d, cmap=plt.get_cmap('rainbow'),\n # norm=Normalize(Tmin, Tmax), levels=np.linspace(Tmin, Tmax, 20))\n t1 = plt.Polygon(np.array([[1, 0], [1, 1], [0, 1]]), color='w', zorder=3)\n ax.add_patch(t1)\n ax.text(-0.1, 1.04, fuel1_name, fontdict={'fontweight': 'bold'},\n bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.7), zorder=10)\n ax.text(0.95, -0.05, fuel2_name, fontdict={'fontweight': 'bold'},\n bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.7), zorder=11)\n ax.text(-0.08, -0.05, 'air', fontdict={'fontweight': 'bold'},\n bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.7), zorder=12)\n # ax.set_xlabel('$Z_1$')\n # ax.set_ylabel('$Z_2$', rotation=0)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1])\n ax.grid(True)\n cax.set_title('SLFM T' if variable == 'T' else 'SLFM ' + variable)\n plt.tight_layout()\n plt.savefig(f'img_{variable}_{i}.png')\n\n\ng = Governor()\ng.log_rate = 10\ng.clip_to_positive = True\ng.norm_weighting = 1. / f._variable_scales\ng.projector_setup_rate = 20\ng.time_step_increase_factor_to_force_jacobian = 1.1\ng.time_step_decrease_factor_to_force_jacobian = 0.8\ndata = SaveAllDataToList(initial_solution=phi0,\n save_frequency=100,\n file_prefix='ip',\n file_first_and_last_only=True,\n save_first_and_last_only=True)\ng.custom_post_process_step = data.save_data\nnewton = SimpleNewtonSolver(evaluate_jacobian_every_iter=False,\n norm_weighting=g.norm_weighting,\n tolerance=1.e-12,\n max_nonlinear_iter=8)\nesdirk = ESDIRK64(norm_weighting=g.norm_weighting, nonlinear_solver=newton)\npi = PIController(first_step=1.e-8, target_error=1.e-10, max_step=1.e0)\n\nviz_dt = 1.e-3\nviz_nt = 100\n\nplot_contours(phi0, 'T', 'ic')\nphi = np.copy(phi0)\ndt = 1.e-8\nfor i in range(viz_nt):\n g.termination_criteria = FinalTime((i + 1) * viz_dt)\n pi._first_step = dt\n _, phi, _, dt = g.integrate(right_hand_side=f.rhs,\n linear_setup=f.block_Jacobi_setup,\n linear_solve=f.block_Jacobi_solve,\n initial_condition=phi,\n method=esdirk,\n controller=pi,\n initial_time=i * viz_dt)\n plot_contours(phi, 'T', i)\n"
] | [
[
"numpy.logspace"
],
[
"numpy.hstack",
"matplotlib.pyplot.tight_layout",
"numpy.linspace",
"numpy.min",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.savefig",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.colorbar",
"numpy.copy",
"numpy.max",
"numpy.array",
"numpy.meshgrid",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
me-grimjoww/Covid-Sutra | [
"ef07bf61ae3b1adc19affe5e040a9ba2f06fb5a8"
] | [
"Django_mask_attendance/main_base/face_verification.py"
] | [
" \r\nimport os\r\nfrom django.urls import path, include\r\nimport face_recognition\r\nimport cv2\r\nfrom imutils.video import VideoStream\r\nimport imutils\r\nimport numpy as np\r\nfrom tensorflow.keras.models import load_model\r\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\r\nfrom tensorflow.keras.preprocessing.image import img_to_array\r\n\r\n\r\n\r\n# load our serialized face detector model from disk\r\nprototxtPath = r\"face_detector\\deploy.prototxt\"\r\nweightsPath = r\"face_detector\\res10_300x300_ssd_iter_140000.caffemodel\"\r\nfaceNet = cv2.dnn.readNet(prototxtPath, weightsPath)\r\n\r\n# load the face mask detector model from disk\r\nmaskNet = load_model(r\"C:\\Users\\mkjsr\\OneDrive\\Desktop\\Django_mask_attendance\\main_base\\mask_detector.model\")\r\n\r\n\r\ndef detect_faces(frame,email):\r\n # grab the dimensions of the frame and then construct a blob\r\n # from it\r\n (h, w) = frame.shape[:2]\r\n blob = cv2.dnn.blobFromImage(frame, 1.0, (224, 224),\r\n (104.0, 177.0, 123.0))\r\n\r\n # pass the blob through the network and obtain the face detections\r\n faceNet.setInput(blob)\r\n detections = faceNet.forward()\r\n print(detections.shape)\r\n\r\n # initialize our list of faces, their corresponding locations,\r\n # and the list of predictions from our face mask network\r\n faces = []\r\n locs = []\r\n lable = \"Not Verified\"\r\n\r\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\r\n MEDIA_ROOT = os.path.join(BASE_DIR,'face_dataset')\r\n loc=(str(MEDIA_ROOT)+'\\\\'+str(email)+'.jpg')\r\n face_1_image = face_recognition.load_image_file(loc)\r\n small_frame_1 = cv2.resize(face_1_image, (0, 0), fx=0.25, fy=0.25)\r\n rgb_small_frame_1 = small_frame_1[:, :, ::-1]\r\n face_1_face_encoding = face_recognition.face_encodings(rgb_small_frame_1)[0]\r\n\r\n # loop over the detections\r\n for i in range(0, detections.shape[2]):\r\n # extract the confidence (i.e., probability) associated with\r\n # the detection\r\n confidence = detections[0, 0, i, 2]\r\n\r\n # filter out weak detections by ensuring the confidence is\r\n # greater than the minimum confidence\r\n if confidence > 0.5:\r\n # compute the (x, y)-coordinates of the bounding box for\r\n # the object\r\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\r\n (startX, startY, endX, endY) = box.astype(\"int\")\r\n\r\n # ensure the bounding boxes fall within the dimensions of\r\n # the frame\r\n (startX, startY) = (max(0, startX), max(0, startY))\r\n (endX, endY) = (min(w - 1, endX), min(h - 1, endY))\r\n\r\n # extract the face ROI, convert it from BGR to RGB channel\r\n # ordering, resize it to 224x224, and preprocess it\r\n face = frame[startY:endY, startX:endX]\r\n face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\r\n face = cv2.resize(face, (224, 224))\r\n face = img_to_array(face)\r\n face = preprocess_input(face)\r\n\r\n # add the face and bounding boxes to their respective\r\n # lists\r\n faces.append(face)\r\n locs.append((startX, startY, endX, endY))\r\n\r\n if len(faces) > 0:\r\n\t\t# for faster inference we'll make batch predictions on *all*\r\n\t\t# faces at the same time rather than one-by-one predictions\r\n # in the above `for` loop\r\n faces = np.array(faces, dtype=\"float32\")\r\n rgb_small_frame = frame[:, :, ::-1]\r\n face_locations = face_recognition.face_locations(rgb_small_frame)\r\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\r\n if len(face_encodings):\r\n\r\n check = face_recognition.compare_faces(face_1_face_encoding, face_encodings)\r\n if check[0]:\r\n lable = 'Verified'\r\n print(lable)\r\n\r\n else :\r\n lable = 'Not Verified'\r\n print(lable)\r\n\r\n\r\n return (locs,lable)\r\n\r\n# initialize the camera\r\ndef facedect(email):\r\n\r\n cam = VideoStream(src=0).start() # 0 -> index of camera\r\n lab = 'Not Verified'\r\n while True:\r\n img = cam.read()\r\n small_frame = imutils.resize(img, width=400)\r\n # rgb_small_frame = small_frame[:, :, ::-1]\r\n # face_locations = face_recognition.face_locations(rgb_small_frame)\r\n # face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\r\n # check=face_recognition.compare_faces(face_1_face_encoding, face_encodings)\r\n\r\n \r\n # if check[0]:\r\n # label = 'Verified'\r\n # print(label)\r\n\r\n # else :\r\n # label = 'Verified'\r\n # print(label)\r\n\r\n \r\n (locs,lable) = detect_faces(small_frame,email)\r\n\r\n # loop over the detected face locations and their corresponding\r\n # locations\r\n for box in locs:\r\n # unpack the bounding box and predictions\r\n (startX, startY, endX, endY) = box\r\n\r\n # determine the class label and color we'll use to draw\r\n # the bounding box and text\r\n # display the label and bounding box rectangle on the output\r\n # frame\r\n color = (0, 255, 0) if lable == \"Verified\" else (0, 0, 255)\r\n\r\n cv2.putText(small_frame, lable, (startX, startY - 10),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\r\n cv2.rectangle(small_frame, (startX, startY), (endX, endY), color, 2)\r\n\r\n cv2.imshow(\"Frame\", small_frame)\r\n key = cv2.waitKey(2) & 0xFF\r\n\r\n # if the `q` key was pressed, break from the loop\r\n if key == ord(\"q\"):\r\n lab = lable\r\n break\r\n cv2.destroyAllWindows()\r\n cam.stop()\r\n return lab\r\n\r\n"
] | [
[
"tensorflow.keras.models.load_model",
"tensorflow.keras.applications.mobilenet_v2.preprocess_input",
"numpy.array",
"tensorflow.keras.preprocessing.image.img_to_array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
fgulan/masters-seminar | [
"cd14b305170fa619dc6e6cc9661fa213822e4faa"
] | [
"source/clcd.py"
] | [
"import sys\nimport cro_mapper\nimport os\nimport unicodedata\nimport numpy as np\nfrom scipy import misc\n\ndef _get_all_file_paths(path):\n file_paths = []\n for root, dirs, files in os.walk(path):\n for file_ in files:\n full_path = os.path.join(root, file_)\n if os.path.isfile(full_path) and full_path.endswith(\".png\"):\n file_paths.append(unicodedata.normalize('NFC', full_path))\n return file_paths\n \ndef load_dataset(path):\n print(\"Loading dataset at path:\", path)\n files = _get_all_file_paths(path)\n X = []\n y = []\n for file in files:\n image = misc.imread(file, mode='F')\n X.append(image)\n folder_path = os.path.dirname(file)\n letter = os.path.basename(folder_path)\n letter_int = cro_mapper.map_letter_to_int(letter)\n y.append(letter_int)\n return np.asarray(X), np.asarray(y)\n "
] | [
[
"numpy.asarray",
"scipy.misc.imread"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"1.0",
"0.19",
"0.18",
"1.2",
"0.12",
"0.10",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
Praveenstein/bigGanMicro | [
"d669874c0226907fa41b2140cdc8c46bdef2a283"
] | [
"app/gan_app.py"
] | [
"import numpy as np\nimport os\nimport json\nfrom PIL import Image\nimport pickle\nimport streamlit as st\nfrom streamlit.hashing import _CodeHasher\nfrom streamlit.report_thread import get_report_ctx\nfrom streamlit.server.server import Server\nimport sys\nimport urllib\nimport torch\nimport random\nimport biggan\nfrom torchvision.utils import make_grid\nfrom io import BytesIO\nimport base64\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n\ndef main():\n first_run = not os.path.exists('state.json') \n state = {}\n st.title(\"Microstructure GAN demo\")\n \"\"\"This is a demonstration of conditional image generation of micrographs using [BigGAN-deep architecture](https://arxiv.org/abs/1809.11096)\n The images generated are using three conditional inputs Annealing Temperature, Annealing Time and the type of cooling used.\n GAN is trained using [Omni Loss](https://arxiv.org/abs/2011.13074) on [UHCSDB](http://uhcsdb.materials.cmu.edu/) images\"\"\"\n \n st.sidebar.title('Processing Conditions',)\n state['anneal_temp'] = st.sidebar.selectbox('Annealing Temperature °C',[700,750,800,900,970,1000,1100])\n state['anneal_time'] = st.sidebar.selectbox('Annealing Time (M: Minutes, H: Hours)',['5M','90M','1H','3H','8H','24H','48H','85H'])\n state['cooling'] = st.sidebar.selectbox('Cooling Type',['Quench','Furnace Cool','Air Cool','650C-1H'])\n temp_dict = {970: 0, 800: 1, 900: 2, 1100: 3, 1000: 4, 700: 5, 750: 6}\n time_dict = {'90M': 0, '24H': 1, '3H': 2, '5M': 3, '8H': 4, '85H': 5, '1H': 6, '48H': 7}\n cool_dict = {'Quench': 0, 'Air Cool': 1, 'Furnace Cool': 2, '650C-1H': 3}\n model = load_gan()\n st.sidebar.subheader('Generate a new latent Vector')\n state['seed'] = 7\n if st.sidebar.button('New z'):\n state['seed'] = random.randint(0,1000)\n rng = np.random.RandomState(state['seed'])\n noise = torch.tensor(rng.normal(0, 1, (1, 384))).float()\n state['noise'] = noise.numpy()\n y_temp = temp_dict[state['anneal_temp']]\n y_time = time_dict[state['anneal_time']]\n y_cool = cool_dict[state['cooling']]\n\n state['image_out'] = generate_img(model, noise, y_temp, y_time, y_cool)\n st.subheader('Generated Microstructure for the given processing conditions')\n st.text(\"\")\n st.text(f\"Random seed: {state['seed']}\")\n st.image(np.array(state['image_out']), use_column_width=False)\n\n save_bool = st.button('Save Image')\n if save_bool:\n with open('state.json', 'r') as fp:\n state_old = json.load(fp)\n st.text(f\"The following image was saved. It was generated using a random seed: {state_old['seed']}\")\n st.image(np.array(state_old['image_out']), use_column_width=False)\n if not os.path.exists('Generated Images'):\n os.makedirs('Generated Images')\n im = Image.fromarray((np.array(state_old['image_out']).reshape(256,256) * 255).astype(np.uint8))\n im.save(f\"./Generated Images/{state_old['anneal_temp']}-{state_old['anneal_time']}-{state_old['cooling']}-{state_old['seed']}.png\")\n \n state['save_bool'] = save_bool\n with open('state.json', 'w') as fp:\n json.dump(state, fp, cls=NumpyEncoder)\n \[email protected](suppress_st_warning=True)\ndef load_gan():\n model = biggan.Generator()\n model.load_state_dict(torch.load('BigGAN-deep.pth', map_location=torch.device('cpu')))\n return model\n\[email protected](suppress_st_warning=True)\ndef generate_img(model,noise, y_temp, y_time, y_cool):\n\ty_temp = torch.tensor([y_temp])\n\ty_time = torch.tensor([y_time])\n\ty_cool = torch.tensor([y_cool])\n\twith torch.no_grad():\n\t\tsynthetic = model(noise, y_temp, y_time, y_cool)[0]\n\t\tsynthetic = 0.5 * synthetic + 0.5\n\t#synthetic = make_grid(synthetic, normalize=True)\n\treturn np.transpose(synthetic.numpy() ,(1,2,0))\n\n\nmain()"
] | [
[
"torch.tensor",
"torch.no_grad",
"torch.device",
"numpy.array",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shivachawala/PumpItUp | [
"41c8f3be0808009dbd13fda7a6f6f1ebfd916646",
"41c8f3be0808009dbd13fda7a6f6f1ebfd916646",
"41c8f3be0808009dbd13fda7a6f6f1ebfd916646"
] | [
"Code/Final/GridSearch/SVM.py",
"Code/Final/BestModel/DeepLearningBest.py",
"Code/Final/GridSearch/AdaBoost.py"
] | [
"\n# coding: utf-8\n\n# In[ ]:\n\n\n#[GridSearch] SVM Learning Classification\nimport pandas as pd\nimport numpy as np\nimport sys\n# Read dataset\ndata_values = pd.read_csv(\"../../../Datasets/train_values_processed.csv\")\ndata_labels = data_values[\"status_group\"]\ndata_values.drop(['status_group'], axis=1, inplace=True)\n\n\n# In[ ]:\n\n\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\n\n#Splitting the dataset in to train and test, splitting percentage taken is 25%\nX_train, X_test, y_train, y_test = train_test_split(data_values, data_labels, test_size=0.25, random_state=42)\n\n\n# In[ ]:\n\n\n#[Model]: SVM \nmodels ={\"SVM\":SVC()}\n#[Grid Search]: Combination of features based on our trails which are best suit for this model \nparameters = {\"SVM\":{\"C\":[1,10,100],\n \"kernel\":('sigmoid', 'rbf'),\n \"gamma\":(0.01,0.1,0.5,1),\n \"max_iter\":[2000,5000,10000],\n \"random_state\":[10]}}\nclassifier = [\"SVM\"]\n#Running Grid Search on the parameters mentioned above\nfor c in classifier:\n SvmClf = GridSearchCV(models[c],parameters[c],cv=5)\n SvmClf = SvmClf.fit(X_train,y_train)\n score = SvmClf.score(X_test,y_test)\n prediction = SvmClf.predict(X_test)\n print(\"Accuracy using \",c,\" classifier is: \",score)\n print(\"-------------------------------------------\")\n print(\"Below is the confusion Matrix for \",c )\n print(metrics.confusion_matrix(y_test,prediction))\n print(\"-------------------------------------------\")\n print(\"Classification Report for \",c,\" is below\")\n print(classification_report(prediction, y_test))\n print(\"-------------------------------------------\")\n\n\n# In[ ]:\n\n\nSvmClf.best_params_\nSvmClf.best_estimator_\nSvmClf.best_score_\n\n\n# In[ ]:\n\n\nscore = SvmClf.score(X_test, y_test)\nprint(score)\n\n",
"\n# coding: utf-8\n\n# In[1]:\n\n\n# Deep Learning Classification\nimport pandas as pd \nfrom sklearn import model_selection\nfrom sklearn.neural_network import MLPClassifier\nimport numpy as np\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\n\n# Read dataset\ndata_values = pd.read_csv(\"../../../Datasets/train_values_processed.csv\")\ndata_labels = data_values[\"status_group\"]\ndata_values.drop(['status_group'], axis=1, inplace=True)\ndata_values.head()\n#Train-Test split: 75%-25%\nX_train, X_test, y_train, y_test = train_test_split(data_values, data_labels, test_size=0.25, random_state=42)\n\n\n# In[2]:\n\n\nMlpClf = MLPClassifier(solver='adam',activation='relu',learning_rate='constant',learning_rate_init=0.01,alpha=0.0001,hidden_layer_sizes=(100))\nMlpClf.fit(X_train, y_train)\nprint(\"Accuracy:\",MlpClf.score(X_test, y_test))\n\n\n# In[4]:\n\n\nfrom sklearn.metrics import confusion_matrix\nMlpClfPred = MlpClf.predict(X_test)\ntestConfusionMtrx = confusion_matrix(y_test, MlpClfPred)\nprint(\"Confusion Matrix: \\n\",testConfusionMtrx)\n\n\n# In[5]:\n\n\n#Classification report\nprint(\"Classification Report:\\n\",classification_report(y_test, MlpClfPred))\n\n\n# In[6]:\n\n\n#To avoid overfitting use kfold cross validation\nfrom sklearn import model_selection\nk = 10\n\nkFold = model_selection.KFold(n_splits=k, random_state=7)\nMlpClf = MLPClassifier(solver='adam',activation='relu',learning_rate='constant',learning_rate_init=0.01,alpha=0.0001,\n hidden_layer_sizes=(100))\naccuracy = model_selection.cross_val_score(MlpClf, data_values, data_labels, cv=kFold)\nprint(\"Accuracy with 10fold Cross Valid:\",accuracy.mean())\n\n",
"\n# coding: utf-8\n\n# In[1]:\n\n\n#[GridSearch] AdaBoost Classification\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\n\n# read the training_data set\ndata_values = pd.read_csv(\"../../../Datasets/train_values_processed.csv\")\ndata_labels = data_values[\"status_group\"]\ndata_values.drop(['status_group'], axis=1, inplace=True)\n#Train-Test Split : 75%-25%\nX_train, X_test, y_train, y_test = train_test_split(data_values, data_labels, test_size=0.25, random_state=42)\n\n\n# In[ ]:\n\n\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn import metrics\nfrom sklearn.tree import DecisionTreeClassifier,ExtraTreeClassifier\nparameters={'base_estimator': [DecisionTreeClassifier(max_depth=3), DecisionTreeClassifier(max_depth=4),\n ExtraTreeClassifier(max_depth=4)],\n 'learning_rate': [0.01, 0.1, 0.5, 1.],\n 'n_estimators': [5, 10, 15, 20, 30, 40, 50, 75, 100, 125],\n 'algorithm': ['SAMME', 'SAMME.R']}\nmodel = AdaBoostClassifier()\n\nAdaBoostClf = GridSearchCV(model,param_grid=parameters)\nAdaBoostClf.fit(X_train, y_train)\nscore = AdaBoostClf.score(X_test,y_test)\nprediction = AdaBoostClf.predict(X_test)\nprint(\"Accuracy using \",AdaBoostClf,\" classifier is: \",score)\nprint(\"-------------------------------------------\")\nprint(\"Below is the confusion Matrix for \",AdaBoostClf )\nprint(metrics.confusion_matrix(y_test,prediction))\nprint(\"-------------------------------------------\")\nprint(\"Classification Report for \",c,\" is below\")\nprint(classification_report(prediction, y_test))\nprint(\"-------------------------------------------\")\n\n\n# In[ ]:\n\n\nAdaBoostClf.best_params_\nAdaBoostClf.best_estimator_\nAdaBoostClf.best_score_\n\n\n# In[ ]:\n\n\n#Accuracy \nscore = AdaBoostClf.score(X_test, y_test)\nprint(score)\n\n"
] | [
[
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.train_test_split",
"sklearn.svm.SVC",
"sklearn.metrics.classification_report"
],
[
"sklearn.neural_network.MLPClassifier",
"pandas.read_csv",
"sklearn.model_selection.cross_val_score",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.KFold",
"sklearn.metrics.classification_report"
],
[
"pandas.read_csv",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.train_test_split",
"sklearn.grid_search.GridSearchCV",
"sklearn.tree.ExtraTreeClassifier",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.metrics.classification_report"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
maskjp/mmdetection3d | [
"98f332372b1a4c82bc2d57588a5d764f4176c869",
"98f332372b1a4c82bc2d57588a5d764f4176c869",
"98f332372b1a4c82bc2d57588a5d764f4176c869",
"98f332372b1a4c82bc2d57588a5d764f4176c869",
"98f332372b1a4c82bc2d57588a5d764f4176c869",
"98f332372b1a4c82bc2d57588a5d764f4176c869",
"98f332372b1a4c82bc2d57588a5d764f4176c869",
"98f332372b1a4c82bc2d57588a5d764f4176c869"
] | [
"mmdet3d/datasets/lyft_dataset.py",
"mmdet3d/models/dense_heads/anchor3d_head.py",
"mmdet3d/models/dense_heads/anchor_free_mono3d_head.py",
"mmdet3d/apis/train.py",
"tests/test_models/test_common_modules/test_dgcnn_modules.py",
"mmdet3d/ops/voxel/scatter_points.py",
"mmdet3d/core/bbox/coders/groupfree3d_bbox_coder.py",
"tests/test_utils/test_box_np_ops.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport tempfile\nfrom os import path as osp\n\nimport mmcv\nimport numpy as np\nimport pandas as pd\nfrom lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft\nfrom lyft_dataset_sdk.utils.data_classes import Box as LyftBox\nfrom pyquaternion import Quaternion\n\nfrom mmdet3d.core.evaluation.lyft_eval import lyft_eval\nfrom mmdet.datasets import DATASETS\nfrom ..core import show_result\nfrom ..core.bbox import Box3DMode, Coord3DMode, LiDARInstance3DBoxes\nfrom .custom_3d import Custom3DDataset\nfrom .pipelines import Compose\n\n\[email protected]_module()\nclass LyftDataset(Custom3DDataset):\n r\"\"\"Lyft Dataset.\n\n This class serves as the API for experiments on the Lyft Dataset.\n\n Please refer to\n `<https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/data>`_\n for data downloading.\n\n Args:\n ann_file (str): Path of annotation file.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n data_root (str): Path of dataset root.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n load_interval (int, optional): Interval of loading the dataset. It is\n used to uniformly sample the dataset. Defaults to 1.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n box_type_3d (str, optional): Type of 3D box of this dataset.\n Based on the `box_type_3d`, the dataset will encapsulate the box\n to its original format then converted them to `box_type_3d`.\n Defaults to 'LiDAR' in this dataset. Available options includes\n\n - 'LiDAR': Box in LiDAR coordinates.\n - 'Depth': Box in depth coordinates, usually for indoor dataset.\n - 'Camera': Box in camera coordinates.\n filter_empty_gt (bool, optional): Whether to filter empty GT.\n Defaults to True.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n \"\"\" # noqa: E501\n NameMapping = {\n 'bicycle': 'bicycle',\n 'bus': 'bus',\n 'car': 'car',\n 'emergency_vehicle': 'emergency_vehicle',\n 'motorcycle': 'motorcycle',\n 'other_vehicle': 'other_vehicle',\n 'pedestrian': 'pedestrian',\n 'truck': 'truck',\n 'animal': 'animal'\n }\n DefaultAttribute = {\n 'car': 'is_stationary',\n 'truck': 'is_stationary',\n 'bus': 'is_stationary',\n 'emergency_vehicle': 'is_stationary',\n 'other_vehicle': 'is_stationary',\n 'motorcycle': 'is_stationary',\n 'bicycle': 'is_stationary',\n 'pedestrian': 'is_stationary',\n 'animal': 'is_stationary'\n }\n CLASSES = ('car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle',\n 'motorcycle', 'bicycle', 'pedestrian', 'animal')\n\n def __init__(self,\n ann_file,\n pipeline=None,\n data_root=None,\n classes=None,\n load_interval=1,\n modality=None,\n box_type_3d='LiDAR',\n filter_empty_gt=True,\n test_mode=False):\n self.load_interval = load_interval\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n modality=modality,\n box_type_3d=box_type_3d,\n filter_empty_gt=filter_empty_gt,\n test_mode=test_mode)\n\n if self.modality is None:\n self.modality = dict(\n use_camera=False,\n use_lidar=True,\n use_radar=False,\n use_map=False,\n use_external=False,\n )\n\n def load_annotations(self, ann_file):\n \"\"\"Load annotations from ann_file.\n\n Args:\n ann_file (str): Path of the annotation file.\n\n Returns:\n list[dict]: List of annotations sorted by timestamps.\n \"\"\"\n data = mmcv.load(ann_file)\n data_infos = list(sorted(data['infos'], key=lambda e: e['timestamp']))\n data_infos = data_infos[::self.load_interval]\n self.metadata = data['metadata']\n self.version = self.metadata['version']\n return data_infos\n\n def get_data_info(self, index):\n \"\"\"Get data info according to the given index.\n\n Args:\n index (int): Index of the sample data to get.\n\n Returns:\n dict: Data information that will be passed to the data\n preprocessing pipelines. It includes the following keys:\n\n - sample_idx (str): sample index\n - pts_filename (str): filename of point clouds\n - sweeps (list[dict]): infos of sweeps\n - timestamp (float): sample timestamp\n - img_filename (str, optional): image filename\n - lidar2img (list[np.ndarray], optional): transformations\n from lidar to different cameras\n - ann_info (dict): annotation info\n \"\"\"\n info = self.data_infos[index]\n\n # standard protocol modified from SECOND.Pytorch\n input_dict = dict(\n sample_idx=info['token'],\n pts_filename=info['lidar_path'],\n sweeps=info['sweeps'],\n timestamp=info['timestamp'] / 1e6,\n )\n\n if self.modality['use_camera']:\n image_paths = []\n lidar2img_rts = []\n for cam_type, cam_info in info['cams'].items():\n image_paths.append(cam_info['data_path'])\n # obtain lidar to image transformation matrix\n lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation'])\n lidar2cam_t = cam_info[\n 'sensor2lidar_translation'] @ lidar2cam_r.T\n lidar2cam_rt = np.eye(4)\n lidar2cam_rt[:3, :3] = lidar2cam_r.T\n lidar2cam_rt[3, :3] = -lidar2cam_t\n intrinsic = cam_info['cam_intrinsic']\n viewpad = np.eye(4)\n viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic\n lidar2img_rt = (viewpad @ lidar2cam_rt.T)\n lidar2img_rts.append(lidar2img_rt)\n\n input_dict.update(\n dict(\n img_filename=image_paths,\n lidar2img=lidar2img_rts,\n ))\n\n if not self.test_mode:\n annos = self.get_ann_info(index)\n input_dict['ann_info'] = annos\n\n return input_dict\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: Annotation information consists of the following keys:\n\n - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`):\n 3D ground truth bboxes.\n - gt_labels_3d (np.ndarray): Labels of ground truths.\n - gt_names (list[str]): Class names of ground truths.\n \"\"\"\n info = self.data_infos[index]\n gt_bboxes_3d = info['gt_boxes']\n gt_names_3d = info['gt_names']\n gt_labels_3d = []\n for cat in gt_names_3d:\n if cat in self.CLASSES:\n gt_labels_3d.append(self.CLASSES.index(cat))\n else:\n gt_labels_3d.append(-1)\n gt_labels_3d = np.array(gt_labels_3d)\n\n if 'gt_shape' in info:\n gt_shape = info['gt_shape']\n gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_shape], axis=-1)\n\n # the lyft box center is [0.5, 0.5, 0.5], we change it to be\n # the same as KITTI (0.5, 0.5, 0)\n gt_bboxes_3d = LiDARInstance3DBoxes(\n gt_bboxes_3d,\n box_dim=gt_bboxes_3d.shape[-1],\n origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)\n\n anns_results = dict(\n gt_bboxes_3d=gt_bboxes_3d,\n gt_labels_3d=gt_labels_3d,\n )\n return anns_results\n\n def _format_bbox(self, results, jsonfile_prefix=None):\n \"\"\"Convert the results to the standard format.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n jsonfile_prefix (str): The prefix of the output jsonfile.\n You can specify the output directory/filename by\n modifying the jsonfile_prefix. Default: None.\n\n Returns:\n str: Path of the output json file.\n \"\"\"\n lyft_annos = {}\n mapped_class_names = self.CLASSES\n\n print('Start to convert detection format...')\n for sample_id, det in enumerate(mmcv.track_iter_progress(results)):\n annos = []\n boxes = output_to_lyft_box(det)\n sample_token = self.data_infos[sample_id]['token']\n boxes = lidar_lyft_box_to_global(self.data_infos[sample_id], boxes)\n for i, box in enumerate(boxes):\n name = mapped_class_names[box.label]\n lyft_anno = dict(\n sample_token=sample_token,\n translation=box.center.tolist(),\n size=box.wlh.tolist(),\n rotation=box.orientation.elements.tolist(),\n name=name,\n score=box.score)\n annos.append(lyft_anno)\n lyft_annos[sample_token] = annos\n lyft_submissions = {\n 'meta': self.modality,\n 'results': lyft_annos,\n }\n\n mmcv.mkdir_or_exist(jsonfile_prefix)\n res_path = osp.join(jsonfile_prefix, 'results_lyft.json')\n print('Results writes to', res_path)\n mmcv.dump(lyft_submissions, res_path)\n return res_path\n\n def _evaluate_single(self,\n result_path,\n logger=None,\n metric='bbox',\n result_name='pts_bbox'):\n \"\"\"Evaluation for a single model in Lyft protocol.\n\n Args:\n result_path (str): Path of the result file.\n logger (logging.Logger | str, optional): Logger used for printing\n related information during evaluation. Default: None.\n metric (str, optional): Metric name used for evaluation.\n Default: 'bbox'.\n result_name (str, optional): Result name in the metric prefix.\n Default: 'pts_bbox'.\n\n Returns:\n dict: Dictionary of evaluation details.\n \"\"\"\n\n output_dir = osp.join(*osp.split(result_path)[:-1])\n lyft = Lyft(\n data_path=osp.join(self.data_root, self.version),\n json_path=osp.join(self.data_root, self.version, self.version),\n verbose=True)\n eval_set_map = {\n 'v1.01-train': 'val',\n }\n metrics = lyft_eval(lyft, self.data_root, result_path,\n eval_set_map[self.version], output_dir, logger)\n\n # record metrics\n detail = dict()\n metric_prefix = f'{result_name}_Lyft'\n\n for i, name in enumerate(metrics['class_names']):\n AP = float(metrics['mAPs_cate'][i])\n detail[f'{metric_prefix}/{name}_AP'] = AP\n\n detail[f'{metric_prefix}/mAP'] = metrics['Final mAP']\n return detail\n\n def format_results(self, results, jsonfile_prefix=None, csv_savepath=None):\n \"\"\"Format the results to json (standard format for COCO evaluation).\n\n Args:\n results (list[dict]): Testing results of the dataset.\n jsonfile_prefix (str): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n csv_savepath (str): The path for saving csv files.\n It includes the file path and the csv filename,\n e.g., \"a/b/filename.csv\". If not specified,\n the result will not be converted to csv file.\n\n Returns:\n tuple: Returns (result_files, tmp_dir), where `result_files` is a\n dict containing the json filepaths, `tmp_dir` is the temporal\n directory created for saving json files when\n `jsonfile_prefix` is not specified.\n \"\"\"\n assert isinstance(results, list), 'results must be a list'\n assert len(results) == len(self), (\n 'The length of results is not equal to the dataset len: {} != {}'.\n format(len(results), len(self)))\n\n if jsonfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n jsonfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n\n # currently the output prediction results could be in two formats\n # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)\n # 2. list of dict('pts_bbox' or 'img_bbox':\n # dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...))\n # this is a workaround to enable evaluation of both formats on Lyft\n # refer to https://github.com/open-mmlab/mmdetection3d/issues/449\n if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]):\n result_files = self._format_bbox(results, jsonfile_prefix)\n else:\n # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict\n result_files = dict()\n for name in results[0]:\n print(f'\\nFormating bboxes of {name}')\n results_ = [out[name] for out in results]\n tmp_file_ = osp.join(jsonfile_prefix, name)\n result_files.update(\n {name: self._format_bbox(results_, tmp_file_)})\n if csv_savepath is not None:\n self.json2csv(result_files['pts_bbox'], csv_savepath)\n return result_files, tmp_dir\n\n def evaluate(self,\n results,\n metric='bbox',\n logger=None,\n jsonfile_prefix=None,\n csv_savepath=None,\n result_names=['pts_bbox'],\n show=False,\n out_dir=None,\n pipeline=None):\n \"\"\"Evaluation in Lyft protocol.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n metric (str | list[str], optional): Metrics to be evaluated.\n Default: 'bbox'.\n logger (logging.Logger | str, optional): Logger used for printing\n related information during evaluation. Default: None.\n jsonfile_prefix (str, optional): The prefix of json files including\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n csv_savepath (str, optional): The path for saving csv files.\n It includes the file path and the csv filename,\n e.g., \"a/b/filename.csv\". If not specified,\n the result will not be converted to csv file.\n result_names (list[str], optional): Result names in the\n metric prefix. Default: ['pts_bbox'].\n show (bool, optional): Whether to visualize.\n Default: False.\n out_dir (str, optional): Path to save the visualization results.\n Default: None.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n\n Returns:\n dict[str, float]: Evaluation results.\n \"\"\"\n result_files, tmp_dir = self.format_results(results, jsonfile_prefix,\n csv_savepath)\n\n if isinstance(result_files, dict):\n results_dict = dict()\n for name in result_names:\n print(f'Evaluating bboxes of {name}')\n ret_dict = self._evaluate_single(result_files[name])\n results_dict.update(ret_dict)\n elif isinstance(result_files, str):\n results_dict = self._evaluate_single(result_files)\n\n if tmp_dir is not None:\n tmp_dir.cleanup()\n\n if show or out_dir:\n self.show(results, out_dir, show=show, pipeline=pipeline)\n return results_dict\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=5,\n use_dim=5,\n file_client_args=dict(backend='disk')),\n dict(\n type='LoadPointsFromMultiSweeps',\n sweeps_num=10,\n file_client_args=dict(backend='disk')),\n dict(\n type='DefaultFormatBundle3D',\n class_names=self.CLASSES,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ]\n return Compose(pipeline)\n\n def show(self, results, out_dir, show=False, pipeline=None):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Whether to visualize the results online.\n Default: False.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n pipeline = self._get_pipeline(pipeline)\n for i, result in enumerate(results):\n if 'pts_bbox' in result.keys():\n result = result['pts_bbox']\n data_info = self.data_infos[i]\n pts_path = data_info['lidar_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points = self._extract_data(i, pipeline, 'points').numpy()\n points = Coord3DMode.convert_point(points, Coord3DMode.LIDAR,\n Coord3DMode.DEPTH)\n inds = result['scores_3d'] > 0.1\n gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor.numpy()\n show_gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR,\n Box3DMode.DEPTH)\n pred_bboxes = result['boxes_3d'][inds].tensor.numpy()\n show_pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR,\n Box3DMode.DEPTH)\n show_result(points, show_gt_bboxes, show_pred_bboxes, out_dir,\n file_name, show)\n\n def json2csv(self, json_path, csv_savepath):\n \"\"\"Convert the json file to csv format for submission.\n\n Args:\n json_path (str): Path of the result json file.\n csv_savepath (str): Path to save the csv file.\n \"\"\"\n results = mmcv.load(json_path)['results']\n sample_list_path = osp.join(self.data_root, 'sample_submission.csv')\n data = pd.read_csv(sample_list_path)\n Id_list = list(data['Id'])\n pred_list = list(data['PredictionString'])\n cnt = 0\n print('Converting the json to csv...')\n for token in results.keys():\n cnt += 1\n predictions = results[token]\n prediction_str = ''\n for i in range(len(predictions)):\n prediction_str += \\\n str(predictions[i]['score']) + ' ' + \\\n str(predictions[i]['translation'][0]) + ' ' + \\\n str(predictions[i]['translation'][1]) + ' ' + \\\n str(predictions[i]['translation'][2]) + ' ' + \\\n str(predictions[i]['size'][0]) + ' ' + \\\n str(predictions[i]['size'][1]) + ' ' + \\\n str(predictions[i]['size'][2]) + ' ' + \\\n str(Quaternion(list(predictions[i]['rotation']))\n .yaw_pitch_roll[0]) + ' ' + \\\n predictions[i]['name'] + ' '\n prediction_str = prediction_str[:-1]\n idx = Id_list.index(token)\n pred_list[idx] = prediction_str\n df = pd.DataFrame({'Id': Id_list, 'PredictionString': pred_list})\n mmcv.mkdir_or_exist(os.path.dirname(csv_savepath))\n df.to_csv(csv_savepath, index=False)\n\n\ndef output_to_lyft_box(detection):\n \"\"\"Convert the output to the box class in the Lyft.\n\n Args:\n detection (dict): Detection results.\n\n Returns:\n list[:obj:`LyftBox`]: List of standard LyftBoxes.\n \"\"\"\n box3d = detection['boxes_3d']\n scores = detection['scores_3d'].numpy()\n labels = detection['labels_3d'].numpy()\n\n box_gravity_center = box3d.gravity_center.numpy()\n box_dims = box3d.dims.numpy()\n box_yaw = box3d.yaw.numpy()\n\n # our LiDAR coordinate system -> Lyft box coordinate system\n lyft_box_dims = box_dims[:, [1, 0, 2]]\n\n box_list = []\n for i in range(len(box3d)):\n quat = Quaternion(axis=[0, 0, 1], radians=box_yaw[i])\n box = LyftBox(\n box_gravity_center[i],\n lyft_box_dims[i],\n quat,\n label=labels[i],\n score=scores[i])\n box_list.append(box)\n return box_list\n\n\ndef lidar_lyft_box_to_global(info, boxes):\n \"\"\"Convert the box from ego to global coordinate.\n\n Args:\n info (dict): Info for a specific sample data, including the\n calibration information.\n boxes (list[:obj:`LyftBox`]): List of predicted LyftBoxes.\n\n Returns:\n list: List of standard LyftBoxes in the global\n coordinate.\n \"\"\"\n box_list = []\n for box in boxes:\n # Move box to ego vehicle coord system\n box.rotate(Quaternion(info['lidar2ego_rotation']))\n box.translate(np.array(info['lidar2ego_translation']))\n # Move box to global coord system\n box.rotate(Quaternion(info['ego2global_rotation']))\n box.translate(np.array(info['ego2global_translation']))\n box_list.append(box)\n return box_list\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nfrom mmcv.runner import BaseModule, force_fp32\nfrom torch import nn as nn\n\nfrom mmdet3d.core import (PseudoSampler, box3d_multiclass_nms, limit_period,\n xywhr2xyxyr)\nfrom mmdet.core import (build_assigner, build_bbox_coder,\n build_prior_generator, build_sampler, multi_apply)\nfrom mmdet.models import HEADS\nfrom ..builder import build_loss\nfrom .train_mixins import AnchorTrainMixin\n\n\[email protected]_module()\nclass Anchor3DHead(BaseModule, AnchorTrainMixin):\n \"\"\"Anchor head for SECOND/PointPillars/MVXNet/PartA2.\n\n Args:\n num_classes (int): Number of classes.\n in_channels (int): Number of channels in the input feature map.\n train_cfg (dict): Train configs.\n test_cfg (dict): Test configs.\n feat_channels (int): Number of channels of the feature map.\n use_direction_classifier (bool): Whether to add a direction classifier.\n anchor_generator(dict): Config dict of anchor generator.\n assigner_per_size (bool): Whether to do assignment for each separate\n anchor size.\n assign_per_class (bool): Whether to do assignment for each class.\n diff_rad_by_sin (bool): Whether to change the difference into sin\n difference for box regression loss.\n dir_offset (float | int): The offset of BEV rotation angles.\n (TODO: may be moved into box coder)\n dir_limit_offset (float | int): The limited range of BEV\n rotation angles. (TODO: may be moved into box coder)\n bbox_coder (dict): Config dict of box coders.\n loss_cls (dict): Config of classification loss.\n loss_bbox (dict): Config of localization loss.\n loss_dir (dict): Config of direction classifier loss.\n \"\"\"\n\n def __init__(self,\n num_classes,\n in_channels,\n train_cfg,\n test_cfg,\n feat_channels=256,\n use_direction_classifier=True,\n anchor_generator=dict(\n type='Anchor3DRangeGenerator',\n range=[0, -39.68, -1.78, 69.12, 39.68, -1.78],\n strides=[2],\n sizes=[[3.9, 1.6, 1.56]],\n rotations=[0, 1.57],\n custom_values=[],\n reshape_out=False),\n assigner_per_size=False,\n assign_per_class=False,\n diff_rad_by_sin=True,\n dir_offset=-np.pi / 2,\n dir_limit_offset=0,\n bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'),\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n loss_weight=1.0),\n loss_bbox=dict(\n type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0),\n loss_dir=dict(type='CrossEntropyLoss', loss_weight=0.2),\n init_cfg=None):\n super().__init__(init_cfg=init_cfg)\n self.in_channels = in_channels\n self.num_classes = num_classes\n self.feat_channels = feat_channels\n self.diff_rad_by_sin = diff_rad_by_sin\n self.use_direction_classifier = use_direction_classifier\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n self.assigner_per_size = assigner_per_size\n self.assign_per_class = assign_per_class\n self.dir_offset = dir_offset\n self.dir_limit_offset = dir_limit_offset\n import warnings\n warnings.warn(\n 'dir_offset and dir_limit_offset will be depressed and be '\n 'incorporated into box coder in the future')\n self.fp16_enabled = False\n\n # build anchor generator\n self.anchor_generator = build_prior_generator(anchor_generator)\n # In 3D detection, the anchor stride is connected with anchor size\n self.num_anchors = self.anchor_generator.num_base_anchors\n # build box coder\n self.bbox_coder = build_bbox_coder(bbox_coder)\n self.box_code_size = self.bbox_coder.code_size\n\n # build loss function\n self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n self.sampling = loss_cls['type'] not in ['FocalLoss', 'GHMC']\n if not self.use_sigmoid_cls:\n self.num_classes += 1\n self.loss_cls = build_loss(loss_cls)\n self.loss_bbox = build_loss(loss_bbox)\n self.loss_dir = build_loss(loss_dir)\n self.fp16_enabled = False\n\n self._init_layers()\n self._init_assigner_sampler()\n\n if init_cfg is None:\n self.init_cfg = dict(\n type='Normal',\n layer='Conv2d',\n std=0.01,\n override=dict(\n type='Normal', name='conv_cls', std=0.01, bias_prob=0.01))\n\n def _init_assigner_sampler(self):\n \"\"\"Initialize the target assigner and sampler of the head.\"\"\"\n if self.train_cfg is None:\n return\n\n if self.sampling:\n self.bbox_sampler = build_sampler(self.train_cfg.sampler)\n else:\n self.bbox_sampler = PseudoSampler()\n if isinstance(self.train_cfg.assigner, dict):\n self.bbox_assigner = build_assigner(self.train_cfg.assigner)\n elif isinstance(self.train_cfg.assigner, list):\n self.bbox_assigner = [\n build_assigner(res) for res in self.train_cfg.assigner\n ]\n\n def _init_layers(self):\n \"\"\"Initialize neural network layers of the head.\"\"\"\n self.cls_out_channels = self.num_anchors * self.num_classes\n self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1)\n self.conv_reg = nn.Conv2d(self.feat_channels,\n self.num_anchors * self.box_code_size, 1)\n if self.use_direction_classifier:\n self.conv_dir_cls = nn.Conv2d(self.feat_channels,\n self.num_anchors * 2, 1)\n\n def forward_single(self, x):\n \"\"\"Forward function on a single-scale feature map.\n\n Args:\n x (torch.Tensor): Input features.\n\n Returns:\n tuple[torch.Tensor]: Contain score of each class, bbox\n regression and direction classification predictions.\n \"\"\"\n cls_score = self.conv_cls(x)\n bbox_pred = self.conv_reg(x)\n dir_cls_preds = None\n if self.use_direction_classifier:\n dir_cls_preds = self.conv_dir_cls(x)\n return cls_score, bbox_pred, dir_cls_preds\n\n def forward(self, feats):\n \"\"\"Forward pass.\n\n Args:\n feats (list[torch.Tensor]): Multi-level features, e.g.,\n features produced by FPN.\n\n Returns:\n tuple[list[torch.Tensor]]: Multi-level class score, bbox\n and direction predictions.\n \"\"\"\n return multi_apply(self.forward_single, feats)\n\n def get_anchors(self, featmap_sizes, input_metas, device='cuda'):\n \"\"\"Get anchors according to feature map sizes.\n\n Args:\n featmap_sizes (list[tuple]): Multi-level feature map sizes.\n input_metas (list[dict]): contain pcd and img's meta info.\n device (str): device of current module.\n\n Returns:\n list[list[torch.Tensor]]: Anchors of each image, valid flags\n of each image.\n \"\"\"\n num_imgs = len(input_metas)\n # since feature map sizes of all images are the same, we only compute\n # anchors for one time\n multi_level_anchors = self.anchor_generator.grid_anchors(\n featmap_sizes, device=device)\n anchor_list = [multi_level_anchors for _ in range(num_imgs)]\n return anchor_list\n\n def loss_single(self, cls_score, bbox_pred, dir_cls_preds, labels,\n label_weights, bbox_targets, bbox_weights, dir_targets,\n dir_weights, num_total_samples):\n \"\"\"Calculate loss of Single-level results.\n\n Args:\n cls_score (torch.Tensor): Class score in single-level.\n bbox_pred (torch.Tensor): Bbox prediction in single-level.\n dir_cls_preds (torch.Tensor): Predictions of direction class\n in single-level.\n labels (torch.Tensor): Labels of class.\n label_weights (torch.Tensor): Weights of class loss.\n bbox_targets (torch.Tensor): Targets of bbox predictions.\n bbox_weights (torch.Tensor): Weights of bbox loss.\n dir_targets (torch.Tensor): Targets of direction predictions.\n dir_weights (torch.Tensor): Weights of direction loss.\n num_total_samples (int): The number of valid samples.\n\n Returns:\n tuple[torch.Tensor]: Losses of class, bbox\n and direction, respectively.\n \"\"\"\n # classification loss\n if num_total_samples is None:\n num_total_samples = int(cls_score.shape[0])\n labels = labels.reshape(-1)\n label_weights = label_weights.reshape(-1)\n cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.num_classes)\n assert labels.max().item() <= self.num_classes\n loss_cls = self.loss_cls(\n cls_score, labels, label_weights, avg_factor=num_total_samples)\n\n # regression loss\n bbox_pred = bbox_pred.permute(0, 2, 3,\n 1).reshape(-1, self.box_code_size)\n bbox_targets = bbox_targets.reshape(-1, self.box_code_size)\n bbox_weights = bbox_weights.reshape(-1, self.box_code_size)\n\n bg_class_ind = self.num_classes\n pos_inds = ((labels >= 0)\n & (labels < bg_class_ind)).nonzero(\n as_tuple=False).reshape(-1)\n num_pos = len(pos_inds)\n\n pos_bbox_pred = bbox_pred[pos_inds]\n pos_bbox_targets = bbox_targets[pos_inds]\n pos_bbox_weights = bbox_weights[pos_inds]\n\n # dir loss\n if self.use_direction_classifier:\n dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).reshape(-1, 2)\n dir_targets = dir_targets.reshape(-1)\n dir_weights = dir_weights.reshape(-1)\n pos_dir_cls_preds = dir_cls_preds[pos_inds]\n pos_dir_targets = dir_targets[pos_inds]\n pos_dir_weights = dir_weights[pos_inds]\n\n if num_pos > 0:\n code_weight = self.train_cfg.get('code_weight', None)\n if code_weight:\n pos_bbox_weights = pos_bbox_weights * bbox_weights.new_tensor(\n code_weight)\n if self.diff_rad_by_sin:\n pos_bbox_pred, pos_bbox_targets = self.add_sin_difference(\n pos_bbox_pred, pos_bbox_targets)\n loss_bbox = self.loss_bbox(\n pos_bbox_pred,\n pos_bbox_targets,\n pos_bbox_weights,\n avg_factor=num_total_samples)\n\n # direction classification loss\n loss_dir = None\n if self.use_direction_classifier:\n loss_dir = self.loss_dir(\n pos_dir_cls_preds,\n pos_dir_targets,\n pos_dir_weights,\n avg_factor=num_total_samples)\n else:\n loss_bbox = pos_bbox_pred.sum()\n if self.use_direction_classifier:\n loss_dir = pos_dir_cls_preds.sum()\n\n return loss_cls, loss_bbox, loss_dir\n\n @staticmethod\n def add_sin_difference(boxes1, boxes2):\n \"\"\"Convert the rotation difference to difference in sine function.\n\n Args:\n boxes1 (torch.Tensor): Original Boxes in shape (NxC), where C>=7\n and the 7th dimension is rotation dimension.\n boxes2 (torch.Tensor): Target boxes in shape (NxC), where C>=7 and\n the 7th dimension is rotation dimension.\n\n Returns:\n tuple[torch.Tensor]: ``boxes1`` and ``boxes2`` whose 7th\n dimensions are changed.\n \"\"\"\n rad_pred_encoding = torch.sin(boxes1[..., 6:7]) * torch.cos(\n boxes2[..., 6:7])\n rad_tg_encoding = torch.cos(boxes1[..., 6:7]) * torch.sin(boxes2[...,\n 6:7])\n boxes1 = torch.cat(\n [boxes1[..., :6], rad_pred_encoding, boxes1[..., 7:]], dim=-1)\n boxes2 = torch.cat([boxes2[..., :6], rad_tg_encoding, boxes2[..., 7:]],\n dim=-1)\n return boxes1, boxes2\n\n @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'dir_cls_preds'))\n def loss(self,\n cls_scores,\n bbox_preds,\n dir_cls_preds,\n gt_bboxes,\n gt_labels,\n input_metas,\n gt_bboxes_ignore=None):\n \"\"\"Calculate losses.\n\n Args:\n cls_scores (list[torch.Tensor]): Multi-level class scores.\n bbox_preds (list[torch.Tensor]): Multi-level bbox predictions.\n dir_cls_preds (list[torch.Tensor]): Multi-level direction\n class predictions.\n gt_bboxes (list[:obj:`BaseInstance3DBoxes`]): Gt bboxes\n of each sample.\n gt_labels (list[torch.Tensor]): Gt labels of each sample.\n input_metas (list[dict]): Contain pcd and img's meta info.\n gt_bboxes_ignore (list[torch.Tensor]): Specify\n which bounding boxes to ignore.\n\n Returns:\n dict[str, list[torch.Tensor]]: Classification, bbox, and\n direction losses of each level.\n\n - loss_cls (list[torch.Tensor]): Classification losses.\n - loss_bbox (list[torch.Tensor]): Box regression losses.\n - loss_dir (list[torch.Tensor]): Direction classification\n losses.\n \"\"\"\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n assert len(featmap_sizes) == self.anchor_generator.num_levels\n device = cls_scores[0].device\n anchor_list = self.get_anchors(\n featmap_sizes, input_metas, device=device)\n label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n cls_reg_targets = self.anchor_target_3d(\n anchor_list,\n gt_bboxes,\n input_metas,\n gt_bboxes_ignore_list=gt_bboxes_ignore,\n gt_labels_list=gt_labels,\n num_classes=self.num_classes,\n label_channels=label_channels,\n sampling=self.sampling)\n\n if cls_reg_targets is None:\n return None\n (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n dir_targets_list, dir_weights_list, num_total_pos,\n num_total_neg) = cls_reg_targets\n num_total_samples = (\n num_total_pos + num_total_neg if self.sampling else num_total_pos)\n\n # num_total_samples = None\n losses_cls, losses_bbox, losses_dir = multi_apply(\n self.loss_single,\n cls_scores,\n bbox_preds,\n dir_cls_preds,\n labels_list,\n label_weights_list,\n bbox_targets_list,\n bbox_weights_list,\n dir_targets_list,\n dir_weights_list,\n num_total_samples=num_total_samples)\n return dict(\n loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dir=losses_dir)\n\n def get_bboxes(self,\n cls_scores,\n bbox_preds,\n dir_cls_preds,\n input_metas,\n cfg=None,\n rescale=False):\n \"\"\"Get bboxes of anchor head.\n\n Args:\n cls_scores (list[torch.Tensor]): Multi-level class scores.\n bbox_preds (list[torch.Tensor]): Multi-level bbox predictions.\n dir_cls_preds (list[torch.Tensor]): Multi-level direction\n class predictions.\n input_metas (list[dict]): Contain pcd and img's meta info.\n cfg (:obj:`ConfigDict`): Training or testing config.\n rescale (list[torch.Tensor]): Whether th rescale bbox.\n\n Returns:\n list[tuple]: Prediction resultes of batches.\n \"\"\"\n assert len(cls_scores) == len(bbox_preds)\n assert len(cls_scores) == len(dir_cls_preds)\n num_levels = len(cls_scores)\n featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]\n device = cls_scores[0].device\n mlvl_anchors = self.anchor_generator.grid_anchors(\n featmap_sizes, device=device)\n mlvl_anchors = [\n anchor.reshape(-1, self.box_code_size) for anchor in mlvl_anchors\n ]\n\n result_list = []\n for img_id in range(len(input_metas)):\n cls_score_list = [\n cls_scores[i][img_id].detach() for i in range(num_levels)\n ]\n bbox_pred_list = [\n bbox_preds[i][img_id].detach() for i in range(num_levels)\n ]\n dir_cls_pred_list = [\n dir_cls_preds[i][img_id].detach() for i in range(num_levels)\n ]\n\n input_meta = input_metas[img_id]\n proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list,\n dir_cls_pred_list, mlvl_anchors,\n input_meta, cfg, rescale)\n result_list.append(proposals)\n return result_list\n\n def get_bboxes_single(self,\n cls_scores,\n bbox_preds,\n dir_cls_preds,\n mlvl_anchors,\n input_meta,\n cfg=None,\n rescale=False):\n \"\"\"Get bboxes of single branch.\n\n Args:\n cls_scores (torch.Tensor): Class score in single batch.\n bbox_preds (torch.Tensor): Bbox prediction in single batch.\n dir_cls_preds (torch.Tensor): Predictions of direction class\n in single batch.\n mlvl_anchors (List[torch.Tensor]): Multi-level anchors\n in single batch.\n input_meta (list[dict]): Contain pcd and img's meta info.\n cfg (:obj:`ConfigDict`): Training or testing config.\n rescale (list[torch.Tensor]): whether th rescale bbox.\n\n Returns:\n tuple: Contain predictions of single batch.\n\n - bboxes (:obj:`BaseInstance3DBoxes`): Predicted 3d bboxes.\n - scores (torch.Tensor): Class score of each bbox.\n - labels (torch.Tensor): Label of each bbox.\n \"\"\"\n cfg = self.test_cfg if cfg is None else cfg\n assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)\n mlvl_bboxes = []\n mlvl_scores = []\n mlvl_dir_scores = []\n for cls_score, bbox_pred, dir_cls_pred, anchors in zip(\n cls_scores, bbox_preds, dir_cls_preds, mlvl_anchors):\n assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n assert cls_score.size()[-2:] == dir_cls_pred.size()[-2:]\n dir_cls_pred = dir_cls_pred.permute(1, 2, 0).reshape(-1, 2)\n dir_cls_score = torch.max(dir_cls_pred, dim=-1)[1]\n\n cls_score = cls_score.permute(1, 2,\n 0).reshape(-1, self.num_classes)\n if self.use_sigmoid_cls:\n scores = cls_score.sigmoid()\n else:\n scores = cls_score.softmax(-1)\n bbox_pred = bbox_pred.permute(1, 2,\n 0).reshape(-1, self.box_code_size)\n\n nms_pre = cfg.get('nms_pre', -1)\n if nms_pre > 0 and scores.shape[0] > nms_pre:\n if self.use_sigmoid_cls:\n max_scores, _ = scores.max(dim=1)\n else:\n max_scores, _ = scores[:, :-1].max(dim=1)\n _, topk_inds = max_scores.topk(nms_pre)\n anchors = anchors[topk_inds, :]\n bbox_pred = bbox_pred[topk_inds, :]\n scores = scores[topk_inds, :]\n dir_cls_score = dir_cls_score[topk_inds]\n\n bboxes = self.bbox_coder.decode(anchors, bbox_pred)\n mlvl_bboxes.append(bboxes)\n mlvl_scores.append(scores)\n mlvl_dir_scores.append(dir_cls_score)\n\n mlvl_bboxes = torch.cat(mlvl_bboxes)\n mlvl_bboxes_for_nms = xywhr2xyxyr(input_meta['box_type_3d'](\n mlvl_bboxes, box_dim=self.box_code_size).bev)\n mlvl_scores = torch.cat(mlvl_scores)\n mlvl_dir_scores = torch.cat(mlvl_dir_scores)\n\n if self.use_sigmoid_cls:\n # Add a dummy background class to the front when using sigmoid\n padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\n mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)\n\n score_thr = cfg.get('score_thr', 0)\n results = box3d_multiclass_nms(mlvl_bboxes, mlvl_bboxes_for_nms,\n mlvl_scores, score_thr, cfg.max_num,\n cfg, mlvl_dir_scores)\n bboxes, scores, labels, dir_scores = results\n if bboxes.shape[0] > 0:\n dir_rot = limit_period(bboxes[..., 6] - self.dir_offset,\n self.dir_limit_offset, np.pi)\n bboxes[..., 6] = (\n dir_rot + self.dir_offset +\n np.pi * dir_scores.to(bboxes.dtype))\n bboxes = input_meta['box_type_3d'](bboxes, box_dim=self.box_code_size)\n return bboxes, scores, labels\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import abstractmethod\n\nimport torch\nfrom mmcv.cnn import ConvModule, bias_init_with_prob, normal_init\nfrom mmcv.runner import force_fp32\nfrom torch import nn as nn\n\nfrom mmdet.core import multi_apply\nfrom mmdet.models.builder import HEADS, build_loss\nfrom .base_mono3d_dense_head import BaseMono3DDenseHead\n\n\[email protected]_module()\nclass AnchorFreeMono3DHead(BaseMono3DDenseHead):\n \"\"\"Anchor-free head for monocular 3D object detection.\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n feat_channels (int, optional): Number of hidden channels.\n Used in child classes. Defaults to 256.\n stacked_convs (int, optional): Number of stacking convs of the head.\n strides (tuple, optional): Downsample factor of each feature map.\n dcn_on_last_conv (bool, optional): If true, use dcn in the last\n layer of towers. Default: False.\n conv_bias (bool | str, optional): If specified as `auto`, it will be\n decided by the norm_cfg. Bias of conv will be set as True\n if `norm_cfg` is None, otherwise False. Default: 'auto'.\n background_label (int, optional): Label ID of background,\n set as 0 for RPN and num_classes for other heads.\n It will automatically set as `num_classes` if None is given.\n use_direction_classifier (bool, optional):\n Whether to add a direction classifier.\n diff_rad_by_sin (bool, optional): Whether to change the difference\n into sin difference for box regression loss. Defaults to True.\n dir_offset (float, optional): Parameter used in direction\n classification. Defaults to 0.\n dir_limit_offset (float, optional): Parameter used in direction\n classification. Defaults to 0.\n loss_cls (dict, optional): Config of classification loss.\n loss_bbox (dict, optional): Config of localization loss.\n loss_dir (dict, optional): Config of direction classifier loss.\n loss_attr (dict, optional): Config of attribute classifier loss,\n which is only active when `pred_attrs=True`.\n bbox_code_size (int, optional): Dimensions of predicted bounding boxes.\n pred_attrs (bool, optional): Whether to predict attributes.\n Defaults to False.\n num_attrs (int, optional): The number of attributes to be predicted.\n Default: 9.\n pred_velo (bool, optional): Whether to predict velocity.\n Defaults to False.\n pred_bbox2d (bool, optional): Whether to predict 2D boxes.\n Defaults to False.\n group_reg_dims (tuple[int], optional): The dimension of each regression\n target group. Default: (2, 1, 3, 1, 2).\n cls_branch (tuple[int], optional): Channels for classification branch.\n Default: (128, 64).\n reg_branch (tuple[tuple], optional): Channels for regression branch.\n Default: (\n (128, 64), # offset\n (128, 64), # depth\n (64, ), # size\n (64, ), # rot\n () # velo\n ),\n dir_branch (tuple[int], optional): Channels for direction\n classification branch. Default: (64, ).\n attr_branch (tuple[int], optional): Channels for classification branch.\n Default: (64, ).\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None.\n norm_cfg (dict, optional): Config dict for normalization layer.\n Default: None.\n train_cfg (dict, optional): Training config of anchor head.\n test_cfg (dict, optional): Testing config of anchor head.\n \"\"\" # noqa: W605\n\n _version = 1\n\n def __init__(\n self,\n num_classes,\n in_channels,\n feat_channels=256,\n stacked_convs=4,\n strides=(4, 8, 16, 32, 64),\n dcn_on_last_conv=False,\n conv_bias='auto',\n background_label=None,\n use_direction_classifier=True,\n diff_rad_by_sin=True,\n dir_offset=0,\n dir_limit_offset=0,\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(\n type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),\n loss_dir=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_attr=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n bbox_code_size=9, # For nuscenes\n pred_attrs=False,\n num_attrs=9, # For nuscenes\n pred_velo=False,\n pred_bbox2d=False,\n group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo,\n cls_branch=(128, 64),\n reg_branch=(\n (128, 64), # offset\n (128, 64), # depth\n (64, ), # size\n (64, ), # rot\n () # velo\n ),\n dir_branch=(64, ),\n attr_branch=(64, ),\n conv_cfg=None,\n norm_cfg=None,\n train_cfg=None,\n test_cfg=None,\n init_cfg=None):\n super(AnchorFreeMono3DHead, self).__init__(init_cfg=init_cfg)\n self.num_classes = num_classes\n self.cls_out_channels = num_classes\n self.in_channels = in_channels\n self.feat_channels = feat_channels\n self.stacked_convs = stacked_convs\n self.strides = strides\n self.dcn_on_last_conv = dcn_on_last_conv\n assert conv_bias == 'auto' or isinstance(conv_bias, bool)\n self.conv_bias = conv_bias\n self.use_direction_classifier = use_direction_classifier\n self.diff_rad_by_sin = diff_rad_by_sin\n self.dir_offset = dir_offset\n self.dir_limit_offset = dir_limit_offset\n self.loss_cls = build_loss(loss_cls)\n self.loss_bbox = build_loss(loss_bbox)\n self.loss_dir = build_loss(loss_dir)\n self.bbox_code_size = bbox_code_size\n self.group_reg_dims = list(group_reg_dims)\n self.cls_branch = cls_branch\n self.reg_branch = reg_branch\n assert len(reg_branch) == len(group_reg_dims), 'The number of '\\\n 'element in reg_branch and group_reg_dims should be the same.'\n self.pred_velo = pred_velo\n self.pred_bbox2d = pred_bbox2d\n self.out_channels = []\n for reg_branch_channels in reg_branch:\n if len(reg_branch_channels) > 0:\n self.out_channels.append(reg_branch_channels[-1])\n else:\n self.out_channels.append(-1)\n self.dir_branch = dir_branch\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.fp16_enabled = False\n self.background_label = (\n num_classes if background_label is None else background_label)\n # background_label should be either 0 or num_classes\n assert (self.background_label == 0\n or self.background_label == num_classes)\n self.pred_attrs = pred_attrs\n self.attr_background_label = -1\n self.num_attrs = num_attrs\n if self.pred_attrs:\n self.attr_background_label = num_attrs\n self.loss_attr = build_loss(loss_attr)\n self.attr_branch = attr_branch\n\n self._init_layers()\n\n def _init_layers(self):\n \"\"\"Initialize layers of the head.\"\"\"\n self._init_cls_convs()\n self._init_reg_convs()\n self._init_predictor()\n\n def _init_cls_convs(self):\n \"\"\"Initialize classification conv layers of the head.\"\"\"\n self.cls_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n if self.dcn_on_last_conv and i == self.stacked_convs - 1:\n conv_cfg = dict(type='DCNv2')\n else:\n conv_cfg = self.conv_cfg\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.conv_bias))\n\n def _init_reg_convs(self):\n \"\"\"Initialize bbox regression conv layers of the head.\"\"\"\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n if self.dcn_on_last_conv and i == self.stacked_convs - 1:\n conv_cfg = dict(type='DCNv2')\n else:\n conv_cfg = self.conv_cfg\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.conv_bias))\n\n def _init_branch(self, conv_channels=(64), conv_strides=(1)):\n \"\"\"Initialize conv layers as a prediction branch.\"\"\"\n conv_before_pred = nn.ModuleList()\n if isinstance(conv_channels, int):\n conv_channels = [self.feat_channels] + [conv_channels]\n conv_strides = [conv_strides]\n else:\n conv_channels = [self.feat_channels] + list(conv_channels)\n conv_strides = list(conv_strides)\n for i in range(len(conv_strides)):\n conv_before_pred.append(\n ConvModule(\n conv_channels[i],\n conv_channels[i + 1],\n 3,\n stride=conv_strides[i],\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n bias=self.conv_bias))\n\n return conv_before_pred\n\n def _init_predictor(self):\n \"\"\"Initialize predictor layers of the head.\"\"\"\n self.conv_cls_prev = self._init_branch(\n conv_channels=self.cls_branch,\n conv_strides=(1, ) * len(self.cls_branch))\n self.conv_cls = nn.Conv2d(self.cls_branch[-1], self.cls_out_channels,\n 1)\n self.conv_reg_prevs = nn.ModuleList()\n self.conv_regs = nn.ModuleList()\n for i in range(len(self.group_reg_dims)):\n reg_dim = self.group_reg_dims[i]\n reg_branch_channels = self.reg_branch[i]\n out_channel = self.out_channels[i]\n if len(reg_branch_channels) > 0:\n self.conv_reg_prevs.append(\n self._init_branch(\n conv_channels=reg_branch_channels,\n conv_strides=(1, ) * len(reg_branch_channels)))\n self.conv_regs.append(nn.Conv2d(out_channel, reg_dim, 1))\n else:\n self.conv_reg_prevs.append(None)\n self.conv_regs.append(\n nn.Conv2d(self.feat_channels, reg_dim, 1))\n if self.use_direction_classifier:\n self.conv_dir_cls_prev = self._init_branch(\n conv_channels=self.dir_branch,\n conv_strides=(1, ) * len(self.dir_branch))\n self.conv_dir_cls = nn.Conv2d(self.dir_branch[-1], 2, 1)\n if self.pred_attrs:\n self.conv_attr_prev = self._init_branch(\n conv_channels=self.attr_branch,\n conv_strides=(1, ) * len(self.attr_branch))\n self.conv_attr = nn.Conv2d(self.attr_branch[-1], self.num_attrs, 1)\n\n def init_weights(self):\n \"\"\"Initialize weights of the head.\n\n We currently still use the customized defined init_weights because the\n default init of DCN triggered by the init_cfg will init\n conv_offset.weight, which mistakenly affects the training stability.\n \"\"\"\n for modules in [self.cls_convs, self.reg_convs, self.conv_cls_prev]:\n for m in modules:\n if isinstance(m.conv, nn.Conv2d):\n normal_init(m.conv, std=0.01)\n for conv_reg_prev in self.conv_reg_prevs:\n if conv_reg_prev is None:\n continue\n for m in conv_reg_prev:\n if isinstance(m.conv, nn.Conv2d):\n normal_init(m.conv, std=0.01)\n if self.use_direction_classifier:\n for m in self.conv_dir_cls_prev:\n if isinstance(m.conv, nn.Conv2d):\n normal_init(m.conv, std=0.01)\n if self.pred_attrs:\n for m in self.conv_attr_prev:\n if isinstance(m.conv, nn.Conv2d):\n normal_init(m.conv, std=0.01)\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.conv_cls, std=0.01, bias=bias_cls)\n for conv_reg in self.conv_regs:\n normal_init(conv_reg, std=0.01)\n if self.use_direction_classifier:\n normal_init(self.conv_dir_cls, std=0.01, bias=bias_cls)\n if self.pred_attrs:\n normal_init(self.conv_attr, std=0.01, bias=bias_cls)\n\n def forward(self, feats):\n \"\"\"Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually contain classification scores, bbox predictions,\n and direction class predictions.\n cls_scores (list[Tensor]): Box scores for each scale level,\n each is a 4D-tensor, the channel number is\n num_points * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level, each is a 4D-tensor, the channel number is\n num_points * bbox_code_size.\n dir_cls_preds (list[Tensor]): Box scores for direction class\n predictions on each scale level, each is a 4D-tensor,\n the channel number is num_points * 2. (bin = 2)\n attr_preds (list[Tensor]): Attribute scores for each scale\n level, each is a 4D-tensor, the channel number is\n num_points * num_attrs.\n \"\"\"\n return multi_apply(self.forward_single, feats)[:5]\n\n def forward_single(self, x):\n \"\"\"Forward features of a single scale level.\n\n Args:\n x (Tensor): FPN feature maps of the specified stride.\n\n Returns:\n tuple: Scores for each class, bbox predictions, direction class,\n and attributes, features after classification and regression\n conv layers, some models needs these features like FCOS.\n \"\"\"\n cls_feat = x\n reg_feat = x\n\n for cls_layer in self.cls_convs:\n cls_feat = cls_layer(cls_feat)\n # clone the cls_feat for reusing the feature map afterwards\n clone_cls_feat = cls_feat.clone()\n for conv_cls_prev_layer in self.conv_cls_prev:\n clone_cls_feat = conv_cls_prev_layer(clone_cls_feat)\n cls_score = self.conv_cls(clone_cls_feat)\n\n for reg_layer in self.reg_convs:\n reg_feat = reg_layer(reg_feat)\n bbox_pred = []\n for i in range(len(self.group_reg_dims)):\n # clone the reg_feat for reusing the feature map afterwards\n clone_reg_feat = reg_feat.clone()\n if len(self.reg_branch[i]) > 0:\n for conv_reg_prev_layer in self.conv_reg_prevs[i]:\n clone_reg_feat = conv_reg_prev_layer(clone_reg_feat)\n bbox_pred.append(self.conv_regs[i](clone_reg_feat))\n bbox_pred = torch.cat(bbox_pred, dim=1)\n\n dir_cls_pred = None\n if self.use_direction_classifier:\n clone_reg_feat = reg_feat.clone()\n for conv_dir_cls_prev_layer in self.conv_dir_cls_prev:\n clone_reg_feat = conv_dir_cls_prev_layer(clone_reg_feat)\n dir_cls_pred = self.conv_dir_cls(clone_reg_feat)\n\n attr_pred = None\n if self.pred_attrs:\n # clone the cls_feat for reusing the feature map afterwards\n clone_cls_feat = cls_feat.clone()\n for conv_attr_prev_layer in self.conv_attr_prev:\n clone_cls_feat = conv_attr_prev_layer(clone_cls_feat)\n attr_pred = self.conv_attr(clone_cls_feat)\n\n return cls_score, bbox_pred, dir_cls_pred, attr_pred, cls_feat, \\\n reg_feat\n\n @abstractmethod\n @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'dir_cls_preds'))\n def loss(self,\n cls_scores,\n bbox_preds,\n dir_cls_preds,\n attr_preds,\n gt_bboxes,\n gt_labels,\n gt_bboxes_3d,\n gt_labels_3d,\n centers2d,\n depths,\n attr_labels,\n img_metas,\n gt_bboxes_ignore=None):\n \"\"\"Compute loss of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level,\n each is a 4D-tensor, the channel number is\n num_points * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level, each is a 4D-tensor, the channel number is\n num_points * bbox_code_size.\n dir_cls_preds (list[Tensor]): Box scores for direction class\n predictions on each scale level, each is a 4D-tensor,\n the channel number is num_points * 2. (bin = 2)\n attr_preds (list[Tensor]): Box scores for each scale level,\n each is a 4D-tensor, the channel number is\n num_points * num_attrs.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_3d (list[Tensor]): 3D Ground truth bboxes for each\n image with shape (num_gts, bbox_code_size).\n gt_labels_3d (list[Tensor]): 3D class indices of each box.\n centers2d (list[Tensor]): Projected 3D centers onto 2D images.\n depths (list[Tensor]): Depth of projected centers on 2D images.\n attr_labels (list[Tensor], optional): Attribute indices\n corresponding to each box\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n \"\"\"\n\n raise NotImplementedError\n\n @abstractmethod\n @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'dir_cls_preds'))\n def get_bboxes(self,\n cls_scores,\n bbox_preds,\n dir_cls_preds,\n attr_preds,\n img_metas,\n cfg=None,\n rescale=None):\n \"\"\"Transform network output for a batch into bbox predictions.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_points * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_points * bbox_code_size, H, W)\n dir_cls_preds (list[Tensor]): Box scores for direction class\n predictions on each scale level, each is a 4D-tensor,\n the channel number is num_points * 2. (bin = 2)\n attr_preds (list[Tensor]): Attribute scores for each scale level\n Has shape (N, num_points * num_attrs, H, W)\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n cfg (mmcv.Config): Test / postprocessing configuration,\n if None, test_cfg would be used\n rescale (bool): If True, return boxes in original image space\n \"\"\"\n\n raise NotImplementedError\n\n @abstractmethod\n def get_targets(self, points, gt_bboxes_list, gt_labels_list,\n gt_bboxes_3d_list, gt_labels_3d_list, centers2d_list,\n depths_list, attr_labels_list):\n \"\"\"Compute regression, classification and centerss targets for points\n in multiple images.\n\n Args:\n points (list[Tensor]): Points of each fpn level, each has shape\n (num_points, 2).\n gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,\n each has shape (num_gt, 4).\n gt_labels_list (list[Tensor]): Ground truth labels of each box,\n each has shape (num_gt,).\n gt_bboxes_3d_list (list[Tensor]): 3D Ground truth bboxes of each\n image, each has shape (num_gt, bbox_code_size).\n gt_labels_3d_list (list[Tensor]): 3D Ground truth labels of each\n box, each has shape (num_gt,).\n centers2d_list (list[Tensor]): Projected 3D centers onto 2D image,\n each has shape (num_gt, 2).\n depths_list (list[Tensor]): Depth of projected 3D centers onto 2D\n image, each has shape (num_gt, 1).\n attr_labels_list (list[Tensor]): Attribute labels of each box,\n each has shape (num_gt,).\n \"\"\"\n raise NotImplementedError\n\n def _get_points_single(self,\n featmap_size,\n stride,\n dtype,\n device,\n flatten=False):\n \"\"\"Get points of a single scale level.\"\"\"\n h, w = featmap_size\n x_range = torch.arange(w, dtype=dtype, device=device)\n y_range = torch.arange(h, dtype=dtype, device=device)\n y, x = torch.meshgrid(y_range, x_range)\n if flatten:\n y = y.flatten()\n x = x.flatten()\n return y, x\n\n def get_points(self, featmap_sizes, dtype, device, flatten=False):\n \"\"\"Get points according to feature map sizes.\n\n Args:\n featmap_sizes (list[tuple]): Multi-level feature map sizes.\n dtype (torch.dtype): Type of points.\n device (torch.device): Device of points.\n\n Returns:\n tuple: points of each image.\n \"\"\"\n mlvl_points = []\n for i in range(len(featmap_sizes)):\n mlvl_points.append(\n self._get_points_single(featmap_sizes[i], self.strides[i],\n dtype, device, flatten))\n return mlvl_points\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nfrom mmcv.runner import get_dist_info\nfrom torch import distributed as dist\n\nfrom mmdet.apis import train_detector\nfrom mmseg.apis import train_segmentor\n\n\ndef init_random_seed(seed=None, device='cuda'):\n \"\"\"Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n Args:\n seed (int, optional): The seed. Default to None.\n device (str, optional): The device where the seed will be put on.\n Default to 'cuda'.\n Returns:\n int: Seed to be used.\n \"\"\"\n if seed is not None:\n return seed\n\n # Make sure all ranks share the same random seed to prevent\n # some potential bugs. Please refer to\n # https://github.com/open-mmlab/mmdetection/issues/6339\n rank, world_size = get_dist_info()\n seed = np.random.randint(2**31)\n if world_size == 1:\n return seed\n\n if rank == 0:\n random_num = torch.tensor(seed, dtype=torch.int32, device=device)\n else:\n random_num = torch.tensor(0, dtype=torch.int32, device=device)\n dist.broadcast(random_num, src=0)\n return random_num.item()\n\n\ndef train_model(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n meta=None):\n \"\"\"A function wrapper for launching model training according to cfg.\n\n Because we need different eval_hook in runner. Should be deprecated in the\n future.\n \"\"\"\n if cfg.model.type in ['EncoderDecoder3D']:\n train_segmentor(\n model,\n dataset,\n cfg,\n distributed=distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta)\n else:\n train_detector(\n model,\n dataset,\n cfg,\n distributed=distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta)\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport pytest\nimport torch\n\n\ndef test_dgcnn_gf_module():\n if not torch.cuda.is_available():\n pytest.skip()\n from mmdet3d.ops import DGCNNGFModule\n\n self = DGCNNGFModule(\n mlp_channels=[18, 64, 64],\n num_sample=20,\n knn_mod='D-KNN',\n radius=None,\n norm_cfg=dict(type='BN2d'),\n act_cfg=dict(type='ReLU'),\n pool_mod='max').cuda()\n\n assert self.mlps[0].layer0.conv.in_channels == 18\n assert self.mlps[0].layer0.conv.out_channels == 64\n\n xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32)\n\n # (B, N, C)\n xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda()\n points = xyz.repeat([1, 1, 3])\n\n # test forward\n new_points = self(points)\n\n assert new_points.shape == torch.Size([1, 200, 64])\n\n # test F-KNN mod\n self = DGCNNGFModule(\n mlp_channels=[6, 64, 64],\n num_sample=20,\n knn_mod='F-KNN',\n radius=None,\n norm_cfg=dict(type='BN2d'),\n act_cfg=dict(type='ReLU'),\n pool_mod='max').cuda()\n\n # test forward\n new_points = self(xyz)\n assert new_points.shape == torch.Size([1, 200, 64])\n\n # test ball query\n self = DGCNNGFModule(\n mlp_channels=[6, 64, 64],\n num_sample=20,\n knn_mod='F-KNN',\n radius=0.2,\n norm_cfg=dict(type='BN2d'),\n act_cfg=dict(type='ReLU'),\n pool_mod='max').cuda()\n\n\ndef test_dgcnn_fa_module():\n if not torch.cuda.is_available():\n pytest.skip()\n from mmdet3d.ops import DGCNNFAModule\n\n self = DGCNNFAModule(mlp_channels=[24, 16]).cuda()\n assert self.mlps.layer0.conv.in_channels == 24\n assert self.mlps.layer0.conv.out_channels == 16\n\n points = [torch.rand(1, 200, 12).float().cuda() for _ in range(3)]\n\n fa_points = self(points)\n assert fa_points.shape == torch.Size([1, 200, 40])\n\n\ndef test_dgcnn_fp_module():\n if not torch.cuda.is_available():\n pytest.skip()\n from mmdet3d.ops import DGCNNFPModule\n\n self = DGCNNFPModule(mlp_channels=[24, 16]).cuda()\n assert self.mlps.layer0.conv.in_channels == 24\n assert self.mlps.layer0.conv.out_channels == 16\n\n xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin',\n np.float32).reshape((-1, 6))\n\n # (B, N, 3)\n xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda()\n points = xyz.repeat([1, 1, 8]).cuda()\n\n fp_points = self(points)\n assert fp_points.shape == torch.Size([1, 200, 16])\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom torch import nn\nfrom torch.autograd import Function\n\nfrom .voxel_layer import (dynamic_point_to_voxel_backward,\n dynamic_point_to_voxel_forward)\n\n\nclass _dynamic_scatter(Function):\n\n @staticmethod\n def forward(ctx, feats, coors, reduce_type='max'):\n \"\"\"convert kitti points(N, >=3) to voxels.\n\n Args:\n feats: [N, C] float tensor. points features to be reduced\n into voxels.\n coors: [N, ndim] int tensor. corresponding voxel coordinates\n (specifically multi-dim voxel index) of each points.\n reduce_type: str. reduce op. support 'max', 'sum' and 'mean'\n Returns:\n tuple\n voxel_feats: [M, C] float tensor. reduced features. input features\n that shares the same voxel coordinates are reduced to one row\n coordinates: [M, ndim] int tensor, voxel coordinates.\n \"\"\"\n results = dynamic_point_to_voxel_forward(feats, coors, reduce_type)\n (voxel_feats, voxel_coors, point2voxel_map,\n voxel_points_count) = results\n ctx.reduce_type = reduce_type\n ctx.save_for_backward(feats, voxel_feats, point2voxel_map,\n voxel_points_count)\n ctx.mark_non_differentiable(voxel_coors)\n return voxel_feats, voxel_coors\n\n @staticmethod\n def backward(ctx, grad_voxel_feats, grad_voxel_coors=None):\n (feats, voxel_feats, point2voxel_map,\n voxel_points_count) = ctx.saved_tensors\n grad_feats = torch.zeros_like(feats)\n # TODO: whether to use index put or use cuda_backward\n # To use index put, need point to voxel index\n dynamic_point_to_voxel_backward(grad_feats,\n grad_voxel_feats.contiguous(), feats,\n voxel_feats, point2voxel_map,\n voxel_points_count, ctx.reduce_type)\n return grad_feats, None, None\n\n\ndynamic_scatter = _dynamic_scatter.apply\n\n\nclass DynamicScatter(nn.Module):\n\n def __init__(self, voxel_size, point_cloud_range, average_points: bool):\n super(DynamicScatter, self).__init__()\n \"\"\"Scatters points into voxels, used in the voxel encoder with\n dynamic voxelization\n\n **Note**: The CPU and GPU implementation get the same output, but\n have numerical difference after summation and division (e.g., 5e-7).\n\n Args:\n average_points (bool): whether to use avg pooling to scatter\n points into voxel voxel_size (list): list [x, y, z] size\n of three dimension\n point_cloud_range (list):\n [x_min, y_min, z_min, x_max, y_max, z_max]\n \"\"\"\n self.voxel_size = voxel_size\n self.point_cloud_range = point_cloud_range\n self.average_points = average_points\n\n def forward_single(self, points, coors):\n reduce = 'mean' if self.average_points else 'max'\n return dynamic_scatter(points.contiguous(), coors.contiguous(), reduce)\n\n def forward(self, points, coors):\n \"\"\"\n Args:\n input: NC points\n \"\"\"\n if coors.size(-1) == 3:\n return self.forward_single(points, coors)\n else:\n batch_size = coors[-1, 0] + 1\n voxels, voxel_coors = [], []\n for i in range(batch_size):\n inds = torch.where(coors[:, 0] == i)\n voxel, voxel_coor = self.forward_single(\n points[inds], coors[inds][:, 1:])\n coor_pad = nn.functional.pad(\n voxel_coor, (1, 0), mode='constant', value=i)\n voxel_coors.append(coor_pad)\n voxels.append(voxel)\n features = torch.cat(voxels, dim=0)\n feature_coors = torch.cat(voxel_coors, dim=0)\n\n return features, feature_coors\n\n def __repr__(self):\n tmpstr = self.__class__.__name__ + '('\n tmpstr += 'voxel_size=' + str(self.voxel_size)\n tmpstr += ', point_cloud_range=' + str(self.point_cloud_range)\n tmpstr += ', average_points=' + str(self.average_points)\n tmpstr += ')'\n return tmpstr\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom mmdet.core.bbox.builder import BBOX_CODERS\nfrom .partial_bin_based_bbox_coder import PartialBinBasedBBoxCoder\n\n\n@BBOX_CODERS.register_module()\nclass GroupFree3DBBoxCoder(PartialBinBasedBBoxCoder):\n \"\"\"Modified partial bin based bbox coder for GroupFree3D.\n\n Args:\n num_dir_bins (int): Number of bins to encode direction angle.\n num_sizes (int): Number of size clusters.\n mean_sizes (list[list[int]]): Mean size of bboxes in each class.\n with_rot (bool, optional): Whether the bbox is with rotation.\n Defaults to True.\n size_cls_agnostic (bool, optional): Whether the predicted size is\n class-agnostic. Defaults to True.\n \"\"\"\n\n def __init__(self,\n num_dir_bins,\n num_sizes,\n mean_sizes,\n with_rot=True,\n size_cls_agnostic=True):\n super(GroupFree3DBBoxCoder, self).__init__(\n num_dir_bins=num_dir_bins,\n num_sizes=num_sizes,\n mean_sizes=mean_sizes,\n with_rot=with_rot)\n self.size_cls_agnostic = size_cls_agnostic\n\n def encode(self, gt_bboxes_3d, gt_labels_3d):\n \"\"\"Encode ground truth to prediction targets.\n\n Args:\n gt_bboxes_3d (BaseInstance3DBoxes): Ground truth bboxes\n with shape (n, 7).\n gt_labels_3d (torch.Tensor): Ground truth classes.\n\n Returns:\n tuple: Targets of center, size and direction.\n \"\"\"\n # generate center target\n center_target = gt_bboxes_3d.gravity_center\n\n # generate bbox size target\n size_target = gt_bboxes_3d.dims\n size_class_target = gt_labels_3d\n size_res_target = gt_bboxes_3d.dims - gt_bboxes_3d.tensor.new_tensor(\n self.mean_sizes)[size_class_target]\n\n # generate dir target\n box_num = gt_labels_3d.shape[0]\n if self.with_rot:\n (dir_class_target,\n dir_res_target) = self.angle2class(gt_bboxes_3d.yaw)\n else:\n dir_class_target = gt_labels_3d.new_zeros(box_num)\n dir_res_target = gt_bboxes_3d.tensor.new_zeros(box_num)\n\n return (center_target, size_target, size_class_target, size_res_target,\n dir_class_target, dir_res_target)\n\n def decode(self, bbox_out, prefix=''):\n \"\"\"Decode predicted parts to bbox3d.\n\n Args:\n bbox_out (dict): Predictions from model, should contain keys below.\n\n - center: predicted bottom center of bboxes.\n - dir_class: predicted bbox direction class.\n - dir_res: predicted bbox direction residual.\n - size_class: predicted bbox size class.\n - size_res: predicted bbox size residual.\n - size: predicted class-agnostic bbox size\n prefix (str, optional): Decode predictions with specific prefix.\n Defaults to ''.\n\n Returns:\n torch.Tensor: Decoded bbox3d with shape (batch, n, 7).\n \"\"\"\n center = bbox_out[f'{prefix}center']\n batch_size, num_proposal = center.shape[:2]\n\n # decode heading angle\n if self.with_rot:\n dir_class = torch.argmax(bbox_out[f'{prefix}dir_class'], -1)\n dir_res = torch.gather(bbox_out[f'{prefix}dir_res'], 2,\n dir_class.unsqueeze(-1))\n dir_res.squeeze_(2)\n dir_angle = self.class2angle(dir_class, dir_res).reshape(\n batch_size, num_proposal, 1)\n else:\n dir_angle = center.new_zeros(batch_size, num_proposal, 1)\n\n # decode bbox size\n if self.size_cls_agnostic:\n bbox_size = bbox_out[f'{prefix}size'].reshape(\n batch_size, num_proposal, 3)\n else:\n size_class = torch.argmax(\n bbox_out[f'{prefix}size_class'], -1, keepdim=True)\n size_res = torch.gather(\n bbox_out[f'{prefix}size_res'], 2,\n size_class.unsqueeze(-1).repeat(1, 1, 1, 3))\n mean_sizes = center.new_tensor(self.mean_sizes)\n size_base = torch.index_select(mean_sizes, 0,\n size_class.reshape(-1))\n bbox_size = size_base.reshape(batch_size, num_proposal,\n -1) + size_res.squeeze(2)\n\n bbox3d = torch.cat([center, bbox_size, dir_angle], dim=-1)\n return bbox3d\n\n def split_pred(self, cls_preds, reg_preds, base_xyz, prefix=''):\n \"\"\"Split predicted features to specific parts.\n\n Args:\n cls_preds (torch.Tensor): Class predicted features to split.\n reg_preds (torch.Tensor): Regression predicted features to split.\n base_xyz (torch.Tensor): Coordinates of points.\n prefix (str, optional): Decode predictions with specific prefix.\n Defaults to ''.\n\n Returns:\n dict[str, torch.Tensor]: Split results.\n \"\"\"\n results = {}\n start, end = 0, 0\n\n cls_preds_trans = cls_preds.transpose(2, 1)\n reg_preds_trans = reg_preds.transpose(2, 1)\n\n # decode center\n end += 3\n # (batch_size, num_proposal, 3)\n results[f'{prefix}center_residual'] = \\\n reg_preds_trans[..., start:end].contiguous()\n results[f'{prefix}center'] = base_xyz + \\\n reg_preds_trans[..., start:end].contiguous()\n start = end\n\n # decode direction\n end += self.num_dir_bins\n results[f'{prefix}dir_class'] = \\\n reg_preds_trans[..., start:end].contiguous()\n start = end\n\n end += self.num_dir_bins\n dir_res_norm = reg_preds_trans[..., start:end].contiguous()\n start = end\n\n results[f'{prefix}dir_res_norm'] = dir_res_norm\n results[f'{prefix}dir_res'] = dir_res_norm * (\n np.pi / self.num_dir_bins)\n\n # decode size\n if self.size_cls_agnostic:\n end += 3\n results[f'{prefix}size'] = \\\n reg_preds_trans[..., start:end].contiguous()\n else:\n end += self.num_sizes\n results[f'{prefix}size_class'] = reg_preds_trans[\n ..., start:end].contiguous()\n start = end\n\n end += self.num_sizes * 3\n size_res_norm = reg_preds_trans[..., start:end]\n batch_size, num_proposal = reg_preds_trans.shape[:2]\n size_res_norm = size_res_norm.view(\n [batch_size, num_proposal, self.num_sizes, 3])\n start = end\n\n results[f'{prefix}size_res_norm'] = size_res_norm.contiguous()\n mean_sizes = reg_preds.new_tensor(self.mean_sizes)\n results[f'{prefix}size_res'] = (\n size_res_norm * mean_sizes.unsqueeze(0).unsqueeze(0))\n\n # decode objectness score\n # Group-Free-3D objectness output shape (batch, proposal, 1)\n results[f'{prefix}obj_scores'] = cls_preds_trans[..., :1].contiguous()\n\n # decode semantic score\n results[f'{prefix}sem_scores'] = cls_preds_trans[..., 1:].contiguous()\n\n return results\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\n\n\ndef test_camera_to_lidar():\n from mmdet3d.core.bbox.box_np_ops import camera_to_lidar\n points = np.array([[1.84, 1.47, 8.41]])\n rect = np.array([[0.9999128, 0.01009263, -0.00851193, 0.],\n [-0.01012729, 0.9999406, -0.00403767, 0.],\n [0.00847068, 0.00412352, 0.9999556, 0.], [0., 0., 0.,\n 1.]])\n Trv2c = np.array([[0.00692796, -0.9999722, -0.00275783, -0.02457729],\n [-0.00116298, 0.00274984, -0.9999955, -0.06127237],\n [0.9999753, 0.00693114, -0.0011439, -0.3321029],\n [0., 0., 0., 1.]])\n points_lidar = camera_to_lidar(points, rect, Trv2c)\n expected_points = np.array([[8.73138192, -1.85591746, -1.59969933]])\n assert np.allclose(points_lidar, expected_points)\n\n\ndef test_box_camera_to_lidar():\n from mmdet3d.core.bbox.box_np_ops import box_camera_to_lidar\n box = np.array([[1.84, 1.47, 8.41, 1.2, 1.89, 0.48, -0.01]])\n rect = np.array([[0.9999128, 0.01009263, -0.00851193, 0.],\n [-0.01012729, 0.9999406, -0.00403767, 0.],\n [0.00847068, 0.00412352, 0.9999556, 0.], [0., 0., 0.,\n 1.]])\n Trv2c = np.array([[0.00692796, -0.9999722, -0.00275783, -0.02457729],\n [-0.00116298, 0.00274984, -0.9999955, -0.06127237],\n [0.9999753, 0.00693114, -0.0011439, -0.3321029],\n [0., 0., 0., 1.]])\n box_lidar = box_camera_to_lidar(box, rect, Trv2c)\n expected_box = np.array([[\n 8.73138192, -1.85591746, -1.59969933, 1.2, 0.48, 1.89, 0.01 - np.pi / 2\n ]])\n assert np.allclose(box_lidar, expected_box)\n\n\ndef test_corners_nd():\n from mmdet3d.core.bbox.box_np_ops import corners_nd\n dims = np.array([[0.47, 0.98]])\n corners = corners_nd(dims)\n expected_corners = np.array([[[-0.235, -0.49], [-0.235, 0.49],\n [0.235, 0.49], [0.235, -0.49]]])\n assert np.allclose(corners, expected_corners)\n\n\ndef test_center_to_corner_box2d():\n from mmdet3d.core.bbox.box_np_ops import center_to_corner_box2d\n center = np.array([[9.348705, -3.6271024]])\n dims = np.array([[0.47, 0.98]])\n angles = np.array([3.14])\n corner = center_to_corner_box2d(center, dims, angles)\n expected_corner = np.array([[[9.584485, -3.1374772], [9.582925, -4.117476],\n [9.112926, -4.1167274],\n [9.114486, -3.1367288]]])\n assert np.allclose(corner, expected_corner)\n\n center = np.array([[-0.0, 0.0]])\n dims = np.array([[4.0, 8.0]])\n angles = np.array([-0.785398]) # -45 degrees\n corner = center_to_corner_box2d(center, dims, angles)\n expected_corner = np.array([[[-4.24264, -1.41421], [1.41421, 4.24264],\n [4.24264, 1.41421], [-1.41421, -4.24264]]])\n assert np.allclose(corner, expected_corner)\n\n\ndef test_points_in_convex_polygon_jit():\n from mmdet3d.core.bbox.box_np_ops import points_in_convex_polygon_jit\n points = np.array([[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]])\n polygons = np.array([[[1.0, 0.0], [0.0, 1.0], [0.0, 0.5], [0.0, 0.0]],\n [[1.0, 0.0], [1.0, 1.0], [0.5, 1.0], [0.0, 1.0]],\n [[1.0, 0.0], [0.0, 1.0], [-1.0, 0.0], [0.0, -1.0]]])\n res = points_in_convex_polygon_jit(points, polygons)\n expected_res = np.array([[1, 0, 1], [0, 0, 0], [0, 1, 0]]).astype(np.bool)\n assert np.allclose(res, expected_res)\n\n polygons = np.array([[[0.0, 0.0], [0.0, 1.0], [0.5, 0.5], [1.0, 0.0]],\n [[0.0, 1.0], [1.0, 1.0], [1.0, 0.5], [1.0, 0.0]],\n [[1.0, 0.0], [0.0, -1.0], [-1.0, 0.0], [0.0, 1.1]]])\n res = points_in_convex_polygon_jit(points, polygons, clockwise=True)\n expected_res = np.array([[1, 0, 1], [0, 0, 1], [0, 1, 0]]).astype(np.bool)\n assert np.allclose(res, expected_res)\n"
] | [
[
"pandas.read_csv",
"numpy.linalg.inv",
"numpy.eye",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.array"
],
[
"torch.max",
"torch.sin",
"torch.cat",
"torch.nn.Conv2d",
"torch.cos"
],
[
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.arange",
"torch.meshgrid"
],
[
"torch.tensor",
"torch.distributed.broadcast",
"numpy.random.randint"
],
[
"torch.Size",
"numpy.fromfile",
"torch.from_numpy",
"torch.rand",
"torch.cuda.is_available"
],
[
"torch.nn.functional.pad",
"torch.zeros_like",
"torch.where",
"torch.cat"
],
[
"torch.argmax",
"torch.cat"
],
[
"numpy.array",
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Martin36/FEVER2021_SharedTask | [
"4dd49e0ddf2909a93d44dab22eae988a067fc355",
"4dd49e0ddf2909a93d44dab22eae988a067fc355",
"4dd49e0ddf2909a93d44dab22eae988a067fc355"
] | [
"src/entailment/entailment_with_t5.py",
"src/table_retrieval/retrieve_table_cells.py",
"src/util/util_funcs.py"
] | [
"import argparse\nfrom collections import defaultdict\nimport torch\n\nfrom transformers import T5ForConditionalGeneration, T5Tokenizer\nfrom tqdm import tqdm\nfrom util.util_funcs import load_jsonl\n\nmodel = T5ForConditionalGeneration.from_pretrained(\"t5-small\")\ntokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\nMNLI_TO_FEVER_MAP = {\n \"▁entailment\": \"SUPPORTS\",\n \"▁neutral\": \"NOT ENOUGH INFO\",\n \"▁contradiction\": \"REFUTES\",\n}\n\nstats = defaultdict(int)\n\n\ndef predict_veracity(claim, evidence):\n # task = \"rte\"\n task = \"mnli\"\n if task == \"mnli\":\n input_str = \"{} premise: {} hypothesis: {}\".format(task, evidence, claim)\n if task == \"rte\":\n input_str = \"{} sentence1: {} sentence2: {}\".format(task, claim, evidence)\n\n input_ids = tokenizer(input_str, return_tensors=\"pt\").input_ids\n\n result = model.generate(input_ids)\n result = torch.squeeze(result)\n target = tokenizer.convert_ids_to_tokens(result, skip_special_tokens=True)\n\n return target\n\n\ndef get_veracity_label(claim, evidence):\n predicted_label = predict_veracity(claim, evidence)\n predicted_label = \"\".join(predicted_label)\n if predicted_label not in MNLI_TO_FEVER_MAP.keys():\n return \"NOT ENOUGH INFO\"\n else:\n return MNLI_TO_FEVER_MAP[predicted_label]\n\n\ndef test_model(data):\n num_correct = 0\n counter = 0\n for d in tqdm(data):\n # if counter > 200: break\n claim = d[\"claim\"]\n evidence = d[\"evidence\"]\n label = d[\"label\"]\n stats[\"nr_of_{}_samples\".format(label)] += 1\n predicted_label = predict_veracity(claim, evidence)\n predicted_label = \"\".join(predicted_label)\n if predicted_label not in MNLI_TO_FEVER_MAP.keys():\n # Assume that all invalid predicted labels means not enough information\n if label == \"NOT ENOUGH INFO\":\n stats[\"nr_of_correct_{}_samples\".format(label)] += 1\n num_correct += 1\n else:\n if label == MNLI_TO_FEVER_MAP[predicted_label]:\n stats[\"nr_of_correct_{}_samples\".format(label)] += 1\n num_correct += 1\n counter += 1\n accuracy = num_correct / counter\n\n print(\"Accuracy for {} samples: {}\".format(len(data), accuracy))\n print()\n print(\"========== STATS ============\")\n for label in MNLI_TO_FEVER_MAP.values():\n print(\n \"Nr of {} samples: {}\".format(\n label, stats[\"nr_of_{}_samples\".format(label)]\n )\n )\n print(\n \"Nr of correct {} samples: {}\".format(\n label, stats[\"nr_of_correct_{}_samples\".format(label)]\n )\n )\n if stats[\"nr_of_{}_samples\".format(label)] > 0:\n amount_correct = (\n stats[\"nr_of_correct_{}_samples\".format(label)]\n / stats[\"nr_of_{}_samples\".format(label)]\n )\n else:\n amount_correct = 1.0\n print(\"Amount of correct {} samples: {}\".format(label, amount_correct))\n print()\n print(\"=============================\")\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Extracts the text from the feverous db and creates a corpus\"\n )\n parser.add_argument(\n \"--data_path\",\n default=None,\n type=str,\n help=\"Path to the file containing the training data\",\n )\n\n args = parser.parse_args()\n\n if not args.data_path:\n raise RuntimeError(\"Invalid train data path\")\n\n data = load_jsonl(args.data_path)\n test_model(data)\n\n\nif __name__ == \"__main__\":\n main()\n",
"import os, sys\nimport torch\nimport argparse\nimport shutil\nimport pandas as pd\n\nfrom tqdm import tqdm\nfrom transformers import TapasTokenizer\nfrom data_processing.create_tapas_tables import create_tables\nfrom collections import defaultdict\n\nfrom util.util_funcs import load_jsonl, get_tables_from_docs, store_jsonl\n\nDIR_PATH = os.path.abspath(os.getcwd())\n\nFEVEROUS_PATH = DIR_PATH + \"/FEVEROUS/src\"\nsys.path.insert(0, FEVEROUS_PATH)\n\nfrom database.feverous_db import FeverousDB\nfrom utils.wiki_page import WikiPage\n\nstats = defaultdict(int)\n\n\ndef predict(model, tokenizer, data_path, device):\n\n data = pd.read_csv(data_path)\n cell_classification_threshold = 0.1\n claim_to_cell_id_map = defaultdict(list)\n with torch.no_grad():\n for idx, item in tqdm(data.iterrows()):\n table = pd.read_csv(item.table_file).astype(str)\n try:\n batch = tokenizer(\n table=table,\n queries=item.question,\n truncation=True,\n answer_coordinates=[],\n answer_text=[],\n padding=\"max_length\",\n return_tensors=\"pt\",\n )\n batch = {key: val for key, val in batch.items()}\n if torch.gt(batch[\"numeric_values\"], 1e20).any():\n stats[\"tables_with_too_large_numbers\"] += 1\n continue\n batch[\"float_answer\"] = torch.tensor(0.0)\n except:\n e = sys.exc_info()[0]\n stats[\"tokenizing_errors\"] += 1\n continue\n\n input_ids = batch[\"input_ids\"].to(device)\n attention_mask = batch[\"attention_mask\"].to(device)\n token_type_ids = batch[\"token_type_ids\"].to(device)\n labels = batch[\"labels\"].to(device)\n numeric_values = batch[\"numeric_values\"].to(device)\n numeric_values_scale = batch[\"numeric_values_scale\"].to(device)\n float_answer = batch[\"float_answer\"].to(device)\n float_answer = torch.reshape(float_answer, (1, 1))\n\n outputs = model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n labels=labels,\n numeric_values=numeric_values,\n numeric_values_scale=numeric_values_scale,\n float_answer=float_answer,\n )\n\n logits = outputs.logits.cpu()\n logits_agg = outputs.logits_aggregation.cpu()\n output_labels = tokenizer.convert_logits_to_predictions(\n batch,\n logits,\n logits_agg,\n cell_classification_threshold=cell_classification_threshold,\n )\n\n output_cells = output_labels[0][0]\n\n # Keep only the top 5 cells,\n # assuming that they are ordered by score\n for output_cell in output_cells[:6]:\n table_id_split = item.table_id.split(\"_\")\n page_name = table_id_split[0]\n table_id = table_id_split[1]\n # Example format: 'Algebraic logic_cell_0_9_1'\n cell_id = \"{}_cell_{}_{}_{}\".format(\n page_name, table_id, output_cell[0], output_cell[1]\n )\n claim_to_cell_id_map[item.question].append(cell_id)\n\n return claim_to_cell_id_map\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Retrieves the top tables cells from the top tables\"\n )\n parser.add_argument(\n \"--db_path\", default=None, type=str, help=\"Path to the FEVEROUS database\"\n )\n parser.add_argument(\n \"--data_file\",\n default=None,\n type=str,\n help=\"Path to the csv file containing the evaluation examples\",\n )\n parser.add_argument(\n \"--model_file\",\n default=None,\n type=str,\n help=\"Path to the trained veracity prediction model\",\n )\n parser.add_argument(\n \"--tapas_model_name\",\n default=\"google/tapas-tiny\",\n type=str,\n help=\"Name of the pretrained tapas model\",\n )\n parser.add_argument(\n \"--batch_size\",\n default=1,\n type=int,\n help=\"The size of each training batch. Reduce this is you run out of memory\",\n )\n parser.add_argument(\n \"--out_dir\",\n default=None,\n type=str,\n help=\"Path to the csv file containing the evaluation examples\",\n )\n parser.add_argument(\n \"--out_file\",\n default=None,\n type=str,\n help=\"Path to the csv file containing the evaluation examples\",\n )\n\n args = parser.parse_args()\n\n if not args.db_path:\n raise RuntimeError(\"Invalid database path\")\n if \".db\" not in args.db_path:\n raise RuntimeError(\"The database path should include the name of the db file\")\n if not args.data_file:\n raise RuntimeError(\"Invalid in file path\")\n if \".jsonl\" not in args.data_file:\n raise RuntimeError(\n \"The train csv path should include the name of the .csv file\"\n )\n if not args.model_file:\n raise RuntimeError(\"Invalid model path\")\n if \".pth\" not in args.model_file:\n raise RuntimeError(\"The model path should include the name of the .pth file\")\n if not args.out_dir:\n raise RuntimeError(\"Invalid out file path\")\n if not args.out_file:\n raise RuntimeError(\"Invalid out file path\")\n if \".jsonl\" not in args.out_file:\n raise RuntimeError(\n \"The train csv path should include the name of the .jsonl file\"\n )\n\n db = FeverousDB(args.db_path)\n\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n model = torch.load(args.model_file, map_location=device)\n tokenizer = TapasTokenizer.from_pretrained(args.tapas_model_name)\n model.eval()\n\n tapas_tables_folder = args.out_dir + \"torch_tables/\"\n tapas_tables_folder = os.path.dirname(tapas_tables_folder)\n if not os.path.exists(tapas_tables_folder):\n print(\"Output directory doesn't exist. Creating {}\".format(tapas_tables_folder))\n os.makedirs(tapas_tables_folder)\n\n top_tables_data = load_jsonl(args.data_file)\n results = []\n tapas_input_data_list = []\n batch_counter = 0\n for i, d in enumerate(top_tables_data):\n claim = d[\"claim\"]\n doc_names = []\n for table_id in d[\"table_ids\"]:\n table_id_split = table_id.split(\"_\")\n doc_names.append(table_id_split[0])\n doc_names = set(doc_names)\n doc_tables_dict = get_tables_from_docs(db, doc_names)\n top_tables = d[\"table_ids\"]\n\n # First we need to convert the table data to the correct format\n filtered_tables = []\n ordered_table_ids = []\n for doc_name, table_dicts in doc_tables_dict.items():\n for j, table_dict in enumerate(table_dicts):\n table_id = \"{}_{}\".format(doc_name, j)\n if table_id in top_tables:\n filtered_tables.append(table_dict)\n ordered_table_ids.append(table_id)\n\n tapas_input_data = {\n \"id\": i, # This is actually useless\n \"claim\": claim,\n \"label\": \"\",\n \"has_tables\": len(top_tables) > 0,\n \"table_dicts\": filtered_tables,\n \"table_ids\": ordered_table_ids,\n \"evidence\": [],\n }\n tapas_input_data_list.append(tapas_input_data)\n\n if len(tapas_input_data_list) == args.batch_size:\n batch_counter += 1\n print(\"=======================================\")\n print(\n \"predicting for batch: {}/{}\".format(\n batch_counter, int(len(top_tables_data) / args.batch_size)\n )\n )\n print(\"=======================================\")\n\n tapas_data_file = create_tables(\n tapas_input_data_list,\n args.out_dir,\n tapas_tables_folder + \"/\",\n write_to_files=True,\n is_predict=True,\n )\n\n claim_to_cell_id_map = predict(model, tokenizer, tapas_data_file, device)\n\n result_objs = [\n {\"claim\": claim, \"cell_ids\": cell_ids}\n for claim, cell_ids in claim_to_cell_id_map.items()\n ]\n\n results += result_objs\n\n tapas_input_data_list = []\n\n # Remove the previously created tables\n shutil.rmtree(tapas_tables_folder)\n os.makedirs(tapas_tables_folder)\n\n # Do predict for the last (possibly incomplete batch\n print(\"=======================================\")\n print(\"predicting for last batch\")\n print(\"=======================================\")\n\n tapas_data_file = create_tables(\n tapas_input_data_list,\n args.out_dir,\n tapas_tables_folder + \"/\",\n write_to_files=True,\n is_predict=True,\n )\n\n claim_to_cell_id_map = predict(model, tokenizer, tapas_data_file, device)\n\n result_objs = [\n {\"claim\": claim, \"cell_ids\": cell_ids}\n for claim, cell_ids in claim_to_cell_id_map.items()\n ]\n\n results += result_objs\n\n store_jsonl(results, args.out_file)\n print(\"Stored top tables cells in '{}'\".format(args.out_file))\n\n\nif __name__ == \"__main__\":\n main()\n",
"import os, sys, re, json, jsonlines, nltk, pickle, torch\nimport numpy as np\nimport pandas as pd\nfrom argparse import ArgumentTypeError\nfrom collections import OrderedDict, defaultdict\nfrom typing import List, Union\nfrom glob import glob\nfrom tqdm import tqdm\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.corpus import stopwords\nfrom util.logger import get_logger\n\nDIR_PATH = os.path.abspath(os.getcwd())\nFEVEROUS_PATH = DIR_PATH + \"/FEVEROUS/src\"\nsys.path.insert(0, FEVEROUS_PATH)\n\nfrom database.feverous_db import FeverousDB\nfrom utils.wiki_page import WikiPage\nfrom utils.wiki_table import WikiTable\n\nnltk.download(\"stopwords\")\nporter_stemmer = PorterStemmer()\ns_words = set(stopwords.words(\"english\"))\n\nlogger = get_logger()\n\nLABEL_TO_IDX = {\"SUPPORTS\": 0, \"REFUTES\": 1, \"NOT ENOUGH INFO\": 2}\nIDX_TO_LABEL = {0: \"SUPPORTS\", 1: \"REFUTES\", 2: \"NOT ENOUGH INFO\"}\nMAX_NUM_COLS = 32\nMAX_NUM_ROWS = 64\nMAX_TABLE_SIZE = 512\n\n\ndef calc_f1(precision: float, recall: float):\n \"\"\"Calculates the F1 score\n\n Args:\n precision (float): The calculated precision\n recall (float): The calculated recall\n\n Returns:\n float: The F1 score\n \"\"\"\n if precision + recall == 0:\n return 0\n return 2 * ((precision * recall) / (precision + recall))\n\n\ndef calc_acc(pred_data: List[List[str]], gold_data: List[List[str]]):\n \"\"\"Calculates the accuracy, precision and recall\n\n Args:\n pred_data (List[List[str]]): The output data from the model\n gold_data (List[List[str]]): The labeled data to compare with\n\n Returns:\n tuple[float, float, float]: Accuracy, recall and precision of the predictions\n \"\"\"\n\n nr_dp = len(pred_data)\n nr_correct = 0\n nr_min_one_corr = 0\n total_pred = 0\n total_gold = 0\n for i, pred_list in enumerate(pred_data):\n min_one_corr = False\n for pred_d in pred_list:\n total_pred += 1\n total_gold += len(gold_data[i])\n if pred_d in gold_data[i]:\n nr_correct += 1\n min_one_corr = True\n if min_one_corr:\n nr_min_one_corr += 1\n\n accuracy = nr_min_one_corr / nr_dp\n recall = nr_correct / total_gold\n precision = nr_correct / total_pred\n return accuracy, recall, precision\n\n\ndef corpus_generator(corpus_path: str, testing=False, only_doc=False, only_key=False):\n \"\"\"A generator that returns each document in the corpus\n\n Args:\n corpus_path (str): The path to the folder containing the corpus files\n testing (bool, optional): If True, the generator will only yield a small part of the corpus\n only_doc (bool, optional): If True, the generator will only return the document texts\n only_key (bool, optional): If True, the generator will only return the document titles\n Yields:\n str: A document in the corpus\n \"\"\"\n\n if testing:\n file_paths = glob(corpus_path + \"corpora_1.json\")\n else:\n file_paths = glob(corpus_path + \"*.json\")\n file_paths = sorted(file_paths)\n for f_path in file_paths:\n print(\"Opening file '{}'\".format(f_path))\n with open(f_path, \"r\") as f:\n docs = json.loads(f.read())\n for key in tqdm(docs):\n if only_doc:\n yield docs[key]\n elif only_key:\n yield key\n else:\n yield docs[key], key\n\n\ndef create_doc_id_map(corpus_path: str):\n doc_id_map = []\n corpus = corpus_generator(corpus_path, only_key=True)\n for key in corpus:\n doc_id_map.append(key)\n return doc_id_map\n\n\ndef create_table_dict(table: WikiTable):\n \"\"\"Creates a dict from a WikiTable object\n\n Args:\n table (WikiTable): The table on WikiTable format\n\n Returns:\n dict: Dict of the table\n \"\"\"\n\n table_rows = table.get_rows()\n rows = [replace_entities(table_row.cell_content) for table_row in table_rows]\n col_names = rows[0]\n rows = rows[1:]\n\n table_dict = {}\n table_dict[\"header\"] = [name.strip() for name in col_names]\n table_dict[\"cell_ids\"] = table.get_ids()\n table_dict[\"rows\"] = rows\n table_dict[\"page\"] = table.page\n\n # Keep only rows that have the same nr of columns as the header\n # This is probably not needed, but since it works now, this stays so nothing breaks\n # TODO: Figure out if this is really needed\n table_dict[\"rows\"] = [\n row for row in table_dict[\"rows\"] if len(row) == len(table_dict[\"header\"])\n ]\n\n return table_dict\n\n\ndef create_tapas_tables(\n tapas_table_dicts: List[dict],\n table_out_path: str,\n write_to_files: bool,\n is_predict=False,\n):\n \"\"\"Function for creating the tables in csv format used by tapas and returning the data table for tapas input data\n\n Args:\n tapas_table_dicts (List[dict]): A list of the tapas tables as dicts\n table_out_path (str): Path to the folder where to store the tapas tables\n write_to_files (bool): If True, will create the tapas tables in the given folder\n is_predict (bool, optional): Set to true if there is no labelled data. Defaults to False.\n\n Returns:\n DataFrame: A DataFrame of the tapas model data\n \"\"\"\n\n counter = 0\n stats = defaultdict(int)\n table_counter = 1\n data_rows = []\n\n if is_predict:\n column_names = [\"id\", \"claim_id\", \"claim\", \"table_file\", \"table_id\"]\n else:\n column_names = [\n \"id\",\n \"claim_id\",\n \"annotator\",\n \"claim\",\n \"table_file\",\n \"answer_coordinates\",\n \"answer_text\",\n \"float_answer\",\n ]\n\n for i, data in enumerate(tqdm(tapas_table_dicts)):\n\n # Skip data points that doesn't have any tables and contains no evidence\n if not data[\"has_tables\"]:\n stats[\"has_no_tables\"] += 1\n continue\n\n # TODO: Figure out why some samples don't have any evidence\n # It's probably because they have \"table_caption\" evidence and not \"table_cell\"\n if len(data[\"evidence\"]) == 0:\n stats[\"has_no_evidence\"] += 1\n # In the prediction phase it is possible to have samples without evidence\n if not is_predict:\n continue\n\n coords_answer_map = defaultdict(dict)\n\n for j, e in enumerate(data[\"evidence\"]):\n if not \"_cell_\" in e:\n continue\n e_split = e.split(\"_\")\n table_id = \"_\".join([e_split[0], e_split[-3]])\n coords = (int(e_split[-2]), int(e_split[-1]))\n coords_answer_map[table_id][coords] = remove_header_tokens(\n data[\"answer_texts\"][j]\n ).strip()\n\n table_file_names = {}\n has_too_large_tables = False\n evidence_id_out_of_range = False\n for d in data[\"table_dicts\"]:\n if (\n len(d[\"header\"]) > MAX_NUM_COLS\n or len(d[\"rows\"]) + len(d[\"header\"]) > MAX_NUM_ROWS\n or len(d[\"header\"]) * (len(d[\"rows\"]) + 1) > MAX_TABLE_SIZE\n ):\n has_too_large_tables = True\n break\n\n page_name = d[\"page\"]\n table_idx = d[\"cell_ids\"][0].split(\"_\")[-3]\n table_id = \"_\".join([page_name, table_idx])\n headers = []\n rows = []\n rows.append(d[\"header\"])\n for h in range(len(d[\"header\"])):\n headers.append(\"col{}\".format(h))\n for row in d[\"rows\"]:\n rows.append(row)\n\n # Since the table cell numbers are not exactly the same as their\n # col and row number, some evidence ids may go \"out of\" the table\n # Therefore we need to check this in order to not get errors when\n # training the model.\n # TODO: Solve the ID problem so this part will not be needed\n evidence_id_out_of_range = False\n for e in data[\"evidence\"]:\n e_split = e.split(\"_\")\n if e_split[0] == page_name and e_split[-3] == table_idx:\n row_idx = e_split[-2]\n col_idx = e_split[-1]\n if int(row_idx) >= len(rows) or int(col_idx) >= len(d[\"header\"]):\n evidence_id_out_of_range = True\n break\n\n if evidence_id_out_of_range:\n break\n\n df = pd.DataFrame(rows, columns=headers)\n\n table_file_name = table_out_path + \"table_{}.csv\".format(table_counter)\n table_file_names[table_id] = table_file_name\n\n if write_to_files:\n df.to_csv(table_file_name)\n\n table_counter += 1\n\n if evidence_id_out_of_range:\n stats[\"evidence_id_out_of_range\"] += 1\n continue\n\n if has_too_large_tables:\n stats[\"too_large_tables\"] += 1\n continue\n\n if is_predict:\n # [\"id\", \"claim_id\", \"claim\", \"table_file\", \"table_id\"]\n for table_id in table_file_names:\n data_row = [\n i,\n data[\"id\"],\n data[\"claim\"],\n table_file_names[table_id],\n table_id,\n ]\n data_rows.append(data_row)\n counter += 1\n else:\n # TODO: How to handle the case with multiple tables?\n # For now, use the table that has the most table cells from the evidence\n table_index = max(\n coords_answer_map, key=lambda x: len(coords_answer_map[x].keys())\n )\n answer_coords = list(coords_answer_map[table_index].keys())\n\n assert table_index is not None and answer_coords is not None\n\n answer_texts = [\n coords_answer_map[table_index][coords] for coords in answer_coords\n ]\n data_row = [\n i,\n data[\"id\"],\n None,\n data[\"claim\"],\n table_file_names[table_index],\n answer_coords,\n answer_texts,\n np.nan,\n ]\n data_rows.append(data_row)\n counter += 1\n\n logger.info(\n \"{} valid train samples out of {}\".format(\n len(data_rows), len(tapas_table_dicts)\n )\n )\n logger.info(\"{} samples have no tables\".format(stats[\"has_no_tables\"]))\n logger.info(\"{} samples have no evidence\".format(stats[\"has_no_evidence\"]))\n logger.info(\"{} samples have too large tables\".format(stats[\"too_large_tables\"]))\n logger.info(\n \"{} samples have indicies outside of the table dimensions\".format(\n stats[\"evidence_id_out_of_range\"]\n )\n )\n\n df = pd.DataFrame(data_rows, columns=column_names)\n return df\n\n\ndef extract_sents(doc_json: dict):\n \"\"\"Extracts the sentences from a document in the DB\n\n Args:\n doc_json (dict): A json object from the FEVEROUS DB\n\n Returns:\n List[str]: A list of the sentences from the page\n \"\"\"\n\n page = WikiPage(doc_json[\"title\"], doc_json)\n sents = [replace_entities(sent.content) for sent in page.get_sentences()]\n sents = [sent.lower() for sent in sents]\n return sents\n\n\ndef get_evidence_docs(doc_json: dict):\n \"\"\"Gets the document ids for the documents where the evidence is\n\n Args:\n doc_json (dict): A data dict from the FEVEROUS dataset\n\n Returns:\n List[str]: A list of the document ids\n \"\"\"\n\n doc_names = []\n for evidence_content in doc_json[\"evidence\"][0][\"content\"]:\n doc_name = evidence_content.split(\"_\")[0]\n if doc_name not in doc_names:\n doc_names.append(doc_name)\n return doc_names\n\n\ndef get_tables_from_docs(db: FeverousDB, doc_names: List[str]):\n \"\"\"Takes a list of document names and returns a dict with a list of tables for each document\n\n Args:\n db (FeverousDB): The FEVEROUS DB object\n doc_names (List[str]): A list of the document ids\n\n Returns:\n dict: A dict with a mapping from document id to a list of table dicts\n \"\"\"\n\n result = {}\n for doc_name in doc_names:\n doc_json = db.get_doc_json(doc_name)\n page = WikiPage(doc_name, doc_json)\n tables = page.get_tables()\n table_dicts = [create_table_dict(table) for table in tables]\n result[doc_name] = table_dicts\n return result\n\n\ndef load_json(path: str):\n \"\"\"Loads the json file from 'path' into a list of dicts\n\n Args:\n path (str): The path to the json file\n\n Raises:\n RuntimeError: If the provided path does not point to a json file\n\n Returns:\n dict: A dict of the json file\n \"\"\"\n\n if not \".json\" in path:\n raise RuntimeError(\"'path' is not pointing to a json file\")\n data = None\n with open(path) as f:\n data = json.loads(f.read())\n return data\n\n\ndef load_jsonl(path: str) -> List[dict]:\n \"\"\"Loads the jsonl file from 'path' into a list of dicts\n\n Args:\n path (str): The path to the jsonl file\n\n Raises:\n RuntimeError: If the provided path does not point to a jsonl file\n\n Returns:\n List[dict]: A list of the jsonl file\n \"\"\"\n\n if not \".jsonl\" in path:\n raise RuntimeError(\"'path' is not pointing to a jsonl file\")\n result = []\n with jsonlines.open(path) as reader:\n for doc in reader:\n result.append(doc)\n return result\n\n\ndef load_tfidf(vectorizer_path: str, wm_path: str):\n \"\"\"Loads the stored TF-IDF objects\n\n Args:\n vectorizer_path (str): Path to the vectorizer .pickle file\n wm_path (str): Path to the word model .pickle file\n\n Returns:\n tuple: A tuple of the tfidfvectorizer and tfidf_wm objects\n \"\"\"\n\n tfidfvectorizer = pickle.load(open(vectorizer_path, \"rb\"))\n tfidf_wm = pickle.load(open(wm_path, \"rb\"))\n return tfidfvectorizer, tfidf_wm\n\n\ndef store_json(\n data: Union[dict, list, defaultdict, OrderedDict],\n file_path: str,\n sort_keys=False,\n indent=None,\n):\n \"\"\" Function for storing a dict to a json file\n\n Args:\n data(dict): The dict or list to be stored in the json file\n file_path(str): The path to the file to be created (note: will delete files that have the same name)\n sort_keys(bool, optional): Set to True if the keys in the dict should be sorted before stored (default: False)\n indent(bool, optional): Set this if indentation should be added (default: None)\n \"\"\"\n\n if (\n type(data) != dict\n and type(data) != list\n and type(data) != defaultdict\n and type(data) != OrderedDict\n ):\n raise ArgumentTypeError(\"'data' needs to be a dict\")\n if \".json\" not in file_path:\n raise RuntimeError(\"'file_path' needs to include the name of the output file\")\n with open(file_path, mode=\"w\") as f:\n f.write(json.dumps(data, sort_keys=sort_keys, indent=indent))\n\n\ndef store_jsonl(data: list, file_path: str):\n if type(data) != list:\n raise ArgumentTypeError(\"'data' needs to be a list\")\n if \".jsonl\" not in file_path:\n raise RuntimeError(\"'file_path' needs to include the name of the output file\")\n with jsonlines.open(file_path, mode=\"w\") as f:\n for d in data:\n f.write(d)\n\n\ndef replace_entities(sent):\n if not sent:\n return sent\n\n regex = r\"\\[\\[([^\\|]+)\\|([^\\]]+)\\]\\]\"\n\n if type(sent) == list:\n return [re.sub(regex, \"\\\\2\", s) for s in sent]\n else:\n return re.sub(regex, \"\\\\2\", sent)\n\n\ndef remove_header_tokens(string):\n regex = r\"\\[H\\]\"\n return re.sub(regex, \"\", string)\n\n\ndef remove_punctuation(sent):\n if sent[-1] == \".\":\n return sent[:-1]\n else:\n return sent\n\n\ndef remove_stopwords(tokens):\n return [t for t in tokens if t not in s_words]\n\n\ndef sim_matrix(a, b, eps=1e-8):\n \"\"\"\n added eps for numerical stability\n \"\"\"\n a_n, b_n = a.norm(dim=1)[:, None], b.norm(dim=1)[:, None]\n a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n))\n b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n))\n sim_mt = torch.mm(a_norm, b_norm.transpose(0, 1))\n return sim_mt\n\n\ndef stemming_tokenizer(input: str):\n \"\"\"Converts a string to a list of words, removing special character, stopwords\n and stemming the words\n\n Args:\n s (str): The string to be tokenized\n\n Returns:\n list: A list of words\n \"\"\"\n\n words = re.sub(r\"[^A-Za-z0-9\\-]\", \" \", input).lower().split()\n words = [word for word in words if word not in s_words]\n words = [porter_stemmer.stem(word) for word in words]\n return words\n\n\ndef tokenize(s: str):\n \"\"\"Converts a string to a list of words, and removing special character and stopwords\n\n Args:\n s (str): The string to be tokenized\n\n Returns:\n list: A list of words\n \"\"\"\n\n words = re.sub(r\"[^A-Za-z0-9\\-]\", \" \", s).lower().split()\n words = [word for word in words if word not in s_words]\n return words\n\n\ndef unique(sequence: list):\n \"\"\"Returns all the unique items in the list while keeping order (which set() does not)\n\n Args:\n sequence (list): The list to filter\n\n Returns:\n list: List with only unique elements\n \"\"\"\n seen = set()\n return [x for x in sequence if not (x in seen or seen.add(x))]\n"
] | [
[
"torch.squeeze"
],
[
"pandas.read_csv",
"torch.load",
"torch.reshape",
"torch.tensor",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.gt"
],
[
"torch.ones_like",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
wangyum/anaconda | [
"6e5a0dbead3327661d73a61e85414cf92aa52be6",
"6e5a0dbead3327661d73a61e85414cf92aa52be6",
"6e5a0dbead3327661d73a61e85414cf92aa52be6",
"6e5a0dbead3327661d73a61e85414cf92aa52be6",
"6e5a0dbead3327661d73a61e85414cf92aa52be6",
"6e5a0dbead3327661d73a61e85414cf92aa52be6",
"6e5a0dbead3327661d73a61e85414cf92aa52be6",
"6e5a0dbead3327661d73a61e85414cf92aa52be6",
"6e5a0dbead3327661d73a61e85414cf92aa52be6",
"6e5a0dbead3327661d73a61e85414cf92aa52be6",
"6e5a0dbead3327661d73a61e85414cf92aa52be6",
"6e5a0dbead3327661d73a61e85414cf92aa52be6",
"6e5a0dbead3327661d73a61e85414cf92aa52be6"
] | [
"pkgs/statsmodels-0.6.1-np110py27_0/lib/python2.7/site-packages/statsmodels/emplike/aft_el.py",
"lib/python2.7/site-packages/openopt/examples/ksp_1.py",
"lib/python2.7/site-packages/openopt/solvers/UkrOpt/interalgODE.py",
"pkgs/numba-0.24.0-np110py27_0/lib/python2.7/site-packages/numba/tests/npyufunc/test_vectorize_decor.py",
"lib/python2.7/site-packages/openopt/kernel/baseSolver.py",
"lib/python2.7/site-packages/openopt/solvers/Standalone/sa.py",
"pkgs/statsmodels-0.6.1-np110py27_0/lib/python2.7/site-packages/statsmodels/base/optimizer.py",
"lib/python2.7/site-packages/openopt/examples/eig_1.py",
"lib/python2.7/site-packages/openopt/examples/nlp_2.py",
"pkgs/pytables-3.2.2-np110py27_1/lib/python2.7/site-packages/tables/table.py",
"pkgs/dask-0.8.1-py27_0/lib/python2.7/site-packages/dask/array/chunk.py",
"pkgs/bokeh-0.11.1-py27_0/Examples/bokeh/plotting/server/fourier_animated.py",
"pkgs/blaze-0.9.1-py27_0/lib/python2.7/site-packages/blaze/compute/tests/test_spark.py"
] | [
"\"\"\"\n\nAccelerated Failure Time (AFT) Model with empirical likelihood inference.\n\nAFT regression analysis is applicable when the researcher has access\nto a randomly right censored dependent variable, a matrix of exogenous\nvariables and an indicatior variable (delta) that takes a value of 0 if the\nobservation is censored and 1 otherwise.\n\nAFT References\n--------------\n\nStute, W. (1993). \"Consistent Estimation Under Random Censorship when\nCovariables are Present.\" Journal of Multivariate Analysis.\nVol. 45. Iss. 1. 89-103\n\nEL and AFT References\n---------------------\n\nZhou, Kim And Bathke. \"Empirical Likelihood Analysis for the Heteroskedastic\nAccelerated Failure Time Model.\" Manuscript:\nURL: www.ms.uky.edu/~mai/research/CasewiseEL20080724.pdf\n\nZhou, M. (2005). Empirical Likelihood Ratio with Arbitrarily Censored/\nTruncated Data by EM Algorithm. Journal of Computational and Graphical\nStatistics. 14:3, 643-656.\n\n\n\"\"\"\n\nimport numpy as np\nfrom statsmodels.regression.linear_model import OLS, WLS\nfrom statsmodels.tools import add_constant\n#from elregress import ElReg\nfrom scipy import optimize\nfrom scipy.stats import chi2\nfrom .descriptive import _OptFuncts\nimport warnings\nfrom statsmodels.tools.sm_exceptions import IterationLimitWarning\n\nclass OptAFT(_OptFuncts):\n \"\"\"\n Provides optimization functions used in estimating and conducting\n inference in an AFT model.\n\n Methods\n ------\n\n _opt_wtd_nuis_regress:\n Function optimized over nuisance parameters to compute\n the profile likelihood\n\n _EM_test:\n Uses the modified Em algorithm of Zhou 2005 to maximize the\n likelihood of a parameter vector.\n\n \"\"\"\n def __init__(self):\n pass\n\n def _opt_wtd_nuis_regress(self, test_vals):\n \"\"\"\n A function that is optimized over nuisance parameters to conduct a\n hypothesis test for the parameters of interest\n\n Parameters\n ----------\n\n params: 1d array\n The regression coefficients of the model. This includes the\n nuisance and parameters of interests.\n\n Returns\n -------\n\n llr: float\n -2 times the log likelihood of the nuisance parameters and the\n hypothesized value of the parameter(s) of interest.\n\n \"\"\"\n test_params = test_vals.reshape(self.model.nvar, 1)\n est_vect = self.model.uncens_exog * (self.model.uncens_endog -\n np.dot(self.model.uncens_exog,\n test_params))\n eta_star = self._modif_newton(np.zeros(self.model.nvar), est_vect,\n self.model._fit_weights)\n denom = np.sum(self.model._fit_weights) + np.dot(eta_star, est_vect.T)\n self.new_weights = self.model._fit_weights / denom\n return -1 * np.sum(np.log(self.new_weights))\n\n def _EM_test(self, nuisance_params, params=None, param_nums=None,\n b0_vals=None, F=None, survidx=None, uncens_nobs=None,\n numcensbelow=None, km=None, uncensored=None, censored=None,\n maxiter=None, ftol=None):\n \"\"\"\n Uses EM algorithm to compute the maximum likelihood of a test\n\n Parameters\n ---------\n\n Nuisance Params: array\n Vector of values to be used as nuisance params.\n\n maxiter: int\n Number of iterations in the EM algorithm for a parameter vector\n\n Returns\n -------\n -2 ''*'' log likelihood ratio at hypothesized values and\n nuisance params\n\n Notes\n -----\n Optional parameters are provided by the test_beta function.\n \"\"\"\n iters = 0\n params[param_nums] = b0_vals\n\n nuis_param_index = np.int_(np.delete(np.arange(self.model.nvar),\n param_nums))\n params[nuis_param_index] = nuisance_params\n to_test = params.reshape(self.model.nvar, 1)\n opt_res = np.inf\n diff = np.inf\n while iters < maxiter and diff > ftol:\n F = F.flatten()\n death = np.cumsum(F[::-1])\n survivalprob = death[::-1]\n surv_point_mat = np.dot(F.reshape(-1, 1),\n 1. / survivalprob[survidx].reshape(1, - 1))\n surv_point_mat = add_constant(surv_point_mat)\n summed_wts = np.cumsum(surv_point_mat, axis=1)\n wts = summed_wts[np.int_(np.arange(uncens_nobs)),\n numcensbelow[uncensored]]\n # ^E step\n # See Zhou 2005, section 3.\n self.model._fit_weights = wts\n new_opt_res = self._opt_wtd_nuis_regress(to_test)\n # ^ Uncensored weights' contribution to likelihood value.\n F = self.new_weights\n # ^ M step\n diff = np.abs(new_opt_res - opt_res)\n opt_res = new_opt_res\n iters = iters + 1\n death = np.cumsum(F.flatten()[::-1])\n survivalprob = death[::-1]\n llike = -opt_res + np.sum(np.log(survivalprob[survidx]))\n wtd_km = km.flatten() / np.sum(km)\n survivalmax = np.cumsum(wtd_km[::-1])[::-1]\n llikemax = np.sum(np.log(wtd_km[uncensored])) + \\\n np.sum(np.log(survivalmax[censored]))\n if iters == maxiter:\n warnings.warn('The EM reached the maximum number of iterations',\n IterationLimitWarning)\n return -2 * (llike - llikemax)\n\n def _ci_limits_beta(self, b0, param_num=None):\n \"\"\"\n Returns the difference between the log likelihood for a\n parameter and some critical value.\n\n Parameters\n ---------\n b0: float\n Value of a regression parameter\n\n param_num: int\n Parameter index of b0\n \"\"\"\n return self.test_beta([b0], [param_num])[0] - self.r0\n\n\nclass emplikeAFT(object):\n \"\"\"\n\n Class for estimating and conducting inference in an AFT model.\n\n Parameters\n ---------\n\n endog: nx1 array\n Response variables that are subject to random censoring\n\n exog: nxk array\n Matrix of covariates\n\n censors: nx1 array\n array with entries 0 or 1. 0 indicates a response was\n censored.\n\n Attributes\n ----------\n\n nobs: float\n Number of observations\n\n endog: array\n Endog attay\n\n exog: array\n Exogenous variable matrix\n\n censors\n Censors array but sets the max(endog) to uncensored\n\n nvar: float\n Number of exogenous variables\n\n uncens_nobs: float\n Number of uncensored observations\n\n uncens_endog: array\n Uncensored response variables\n\n uncens_exog: array\n Exogenous variables of the uncensored observations\n\n Methods\n -------\n\n params:\n Fits model parameters\n\n test_beta:\n Tests if beta = b0 for any vector b0.\n\n Notes\n -----\n\n The data is immediately sorted in order of increasing endogenous\n variables\n\n The last observation is assumed to be uncensored which makes\n estimation and inference possible.\n \"\"\"\n def __init__(self, endog, exog, censors):\n self.nobs = float(np.shape(exog)[0])\n self.endog = endog.reshape(self.nobs, 1)\n self.exog = exog.reshape(self.nobs, -1)\n self.censors = censors.reshape(self.nobs, 1)\n self.nvar = self.exog.shape[1]\n idx = np.lexsort((-self.censors[:, 0], self.endog[:, 0]))\n self.endog = self.endog[idx]\n self.exog = self.exog[idx]\n self.censors = self.censors[idx]\n self.censors[-1] = 1 # Sort in init, not in function\n self.uncens_nobs = np.sum(self.censors)\n mask = self.censors.ravel().astype(bool)\n self.uncens_endog = self.endog[mask, :].reshape(-1, 1)\n self.uncens_exog = self.exog[mask, :]\n\n\n def _is_tied(self, endog, censors):\n \"\"\"\n Indicated if an observation takes the same value as the next\n ordered observation.\n\n Parameters\n ----------\n endog: array\n Models endogenous variable\n censors: array\n arrat indicating a censored array\n\n Returns\n -------\n indic_ties: array\n ties[i]=1 if endog[i]==endog[i+1] and\n censors[i]=censors[i+1]\n \"\"\"\n nobs = int(self.nobs)\n endog_idx = endog[np.arange(nobs - 1)] == (\n endog[np.arange(nobs - 1) + 1])\n censors_idx = censors[np.arange(nobs - 1)] == (\n censors[np.arange(nobs - 1) + 1])\n indic_ties = endog_idx * censors_idx # Both true\n return np.int_(indic_ties)\n\n def _km_w_ties(self, tie_indic, untied_km):\n \"\"\"\n Computes KM estimator value at each observation, taking into acocunt\n ties in the data.\n\n Parameters\n ----------\n tie_indic: 1d array\n Indicates if the i'th observation is the same as the ith +1\n untied_km: 1d array\n Km estimates at each observation assuming no ties.\n\n \"\"\"\n # TODO: Vectorize, even though it is only 1 pass through for any\n # function call\n num_same = 1\n idx_nums = []\n for obs_num in np.arange(int(self.nobs - 1))[::-1]:\n if tie_indic[obs_num] == 1:\n idx_nums.append(obs_num)\n num_same = num_same + 1\n untied_km[obs_num] = untied_km[obs_num + 1]\n elif tie_indic[obs_num] == 0 and num_same > 1:\n idx_nums.append(max(idx_nums) + 1)\n idx_nums = np.asarray(idx_nums)\n untied_km[idx_nums] = untied_km[idx_nums]\n num_same = 1\n idx_nums = []\n return untied_km.reshape(self.nobs, 1)\n\n def _make_km(self, endog, censors):\n \"\"\"\n\n Computes the Kaplan-Meier estimate for the weights in the AFT model\n\n Parameters\n ----------\n endog: nx1 array\n Array of response variables\n censors: nx1 array\n Censor-indicating variable\n\n Returns\n -------\n Kaplan Meier estimate for each observation\n\n Notes\n -----\n\n This function makes calls to _is_tied and km_w_ties to handle ties in\n the data.If a censored observation and an uncensored observation has\n the same value, it is assumed that the uncensored happened first.\n\n \"\"\"\n nobs = self.nobs\n num = (nobs - (np.arange(nobs) + 1.))\n denom = ((nobs - (np.arange(nobs) + 1.) + 1.))\n km = (num / denom).reshape(nobs, 1)\n km = km ** np.abs(censors - 1.)\n km = np.cumprod(km) # If no ties, this is kaplan-meier\n tied = self._is_tied(endog, censors)\n wtd_km = self._km_w_ties(tied, km)\n return (censors / wtd_km).reshape(nobs, 1)\n\n def fit(self):\n \"\"\"\n\n Fits an AFT model and returns results instance\n\n Parameters\n ---------\n None\n\n\n Returns\n -------\n Results instance.\n\n Notes\n -----\n To avoid dividing by zero, max(endog) is assumed to be uncensored.\n \"\"\"\n return AFTResults(self)\n\n def predict(self, params, endog=None):\n if endog is None:\n endog = self.endog\n return np.dot(endog, params)\n\n\nclass AFTResults(OptAFT):\n def __init__(self, model):\n self.model = model\n\n def params(self):\n \"\"\"\n\n Fits an AFT model and returns parameters.\n\n Parameters\n ---------\n None\n\n\n Returns\n -------\n Fitted params\n\n Notes\n -----\n To avoid dividing by zero, max(endog) is assumed to be uncensored.\n \"\"\"\n self.model.modif_censors = np.copy(self.model.censors)\n self.model.modif_censors[-1] = 1\n wts = self.model._make_km(self.model.endog, self.model.modif_censors)\n res = WLS(self.model.endog, self.model.exog, wts).fit()\n params = res.params\n return params\n\n def test_beta(self, b0_vals, param_nums, ftol=10 ** - 5, maxiter=30,\n print_weights=1):\n \"\"\"\n Returns the profile log likelihood for regression parameters\n 'param_num' at 'b0_vals.'\n\n Parameters\n ----------\n b0_vals: list\n The value of parameters to be tested\n\n param_num: list\n Which parameters to be tested\n\n maxiter: int, optional\n How many iterations to use in the EM algorithm. Default is 30\n\n ftol: float, optional\n The function tolerance for the EM optimization.\n Default is 10''**''-5\n\n print_weights: bool\n If true, returns the weights tate maximize the profile\n log likelihood. Default is False\n\n Returns\n -------\n\n test_results: tuple\n The log-likelihood and p-pvalue of the test.\n\n Notes\n ----\n\n The function will warn if the EM reaches the maxiter. However, when\n optimizing over nuisance parameters, it is possible to reach a\n maximum number of inner iterations for a specific value for the\n nuisance parameters while the resultsof the function are still valid.\n This usually occurs when the optimization over the nuisance parameters\n selects paramater values that yield a log-likihood ratio close to\n infinity.\n\n Examples\n -------\n\n import statsmodels.api as sm\n import numpy as np\n\n # Test parameter is .05 in one regressor no intercept model\n data=sm.datasets.heart.load()\n y = np.log10(data.endog)\n x = data.exog\n cens = data.censors\n model = sm.emplike.emplikeAFT(y, x, cens)\n res=model.test_beta([0], [0])\n >>>res\n >>>(1.4657739632606308, 0.22601365256959183)\n\n #Test slope is 0 in model with intercept\n\n data=sm.datasets.heart.load()\n y = np.log10(data.endog)\n x = data.exog\n cens = data.censors\n model = sm.emplike.emplikeAFT(y, sm.add_constant(x), cens)\n res=model.test_beta([0], [1])\n >>>res\n >>>(4.623487775078047, 0.031537049752572731)\n\n \"\"\"\n censors = self.model.censors\n endog = self.model.endog\n exog = self.model.exog\n uncensored = (censors == 1).flatten()\n censored = (censors == 0).flatten()\n uncens_endog = endog[uncensored]\n uncens_exog = exog[uncensored, :]\n reg_model = OLS(uncens_endog, uncens_exog).fit()\n llr, pval, new_weights = reg_model.el_test(b0_vals, param_nums,\n return_weights=True) # Needs to be changed\n km = self.model._make_km(endog, censors).flatten() # when merged\n uncens_nobs = self.model.uncens_nobs\n F = np.asarray(new_weights).reshape(uncens_nobs)\n # Step 0 ^\n params = self.params()\n survidx = np.where(censors == 0)\n survidx = survidx[0] - np.arange(len(survidx[0]))\n numcensbelow = np.int_(np.cumsum(1 - censors))\n if len(param_nums) == len(params):\n llr = self._EM_test([], F=F, params=params,\n param_nums=param_nums,\n b0_vals=b0_vals, survidx=survidx,\n uncens_nobs=uncens_nobs,\n numcensbelow=numcensbelow, km=km,\n uncensored=uncensored, censored=censored,\n ftol=ftol, maxiter=25)\n return llr, chi2.sf(llr, self.model.nvar)\n else:\n x0 = np.delete(params, param_nums)\n try:\n res = optimize.fmin(self._EM_test, x0,\n (params, param_nums, b0_vals, F, survidx,\n uncens_nobs, numcensbelow, km, uncensored,\n censored, maxiter, ftol), full_output=1,\n disp=0)\n\n llr = res[1]\n return llr, chi2.sf(llr, len(param_nums))\n except np.linalg.linalg.LinAlgError:\n return np.inf, 0\n\n def ci_beta(self, param_num, beta_high, beta_low, sig=.05):\n \"\"\"\n Returns the confidence interval for a regression\n parameter in the AFT model.\n\n Parameters\n ---------\n\n param_num: int\n Parameter number of interest\n\n beta_high: float\n Upper bound for the confidence interval\n\n beta_low:\n Lower bound for the confidence interval\n\n sig: float, optional\n Significance level. Default is .05\n\n Notes\n ----\n If the function returns f(a) and f(b) must have different signs,\n consider widening the search area by adjusting beta_low and\n beta_high.\n\n Also note that this process is computational intensive. There\n are 4 levels of optimization/solving. From outer to inner:\n\n 1) Solving so that llr-critical value = 0\n 2) maximizing over nuisance parameters\n 3) Using EM at each value of nuisamce parameters\n 4) Using the _modified_Newton optimizer at each iteration\n of the EM algorithm.\n\n Also, for very unlikely nuisance parameters, it is possible for\n the EM algorithm to not converge. This is not an indicator\n that the solver did not find the correct solution. It just means\n for a specific iteration of the nuisance parameters, the optimizer\n was unable to converge.\n\n If the user desires to verify the success of the optimization,\n it is recommended to test the limits using test_beta.\n\n \"\"\"\n params = self.params()\n self.r0 = chi2.ppf(1 - sig, 1)\n ll = optimize.brentq(self._ci_limits_beta, beta_low,\n params[param_num], (param_num))\n ul = optimize.brentq(self._ci_limits_beta,\n params[param_num], beta_high, (param_num))\n return ll, ul\n",
"#!/usr/bin/python\n'''\nSimplest OpenOpt KSP example;\nrequires FuncDesigner installed.\nFor some solvers limitations on time, cputime, \"enough\" value, basic GUI features are available.\nSee http://openopt.org/KSP for more details\n'''\nfrom openopt import *\nfrom numpy import sin, cos\n\nN = 150\n\nitems = [{'name': 'item %d' % i,'weight': 1.5*(cos(i)+1)**2, \n'volume': 2*sin(i) + 3, 'n': 1 if i < N/3 else 2 if i < 2*N/3 else 3} for i in range(N)]\nconstraints = lambda values: values['volume'] < 10\n\np = KSP('weight', items, constraints = constraints) \nr = p.solve('glpk', iprint = 0) # requires cvxopt and glpk installed, see http://openopt.org/KSP for other solvers\n#Solver: Time Elapsed = 0.73 \tCPU Time Elapsed = 0.55\n#objFunValue: 27.389749 (feasible, MaxResidual = 0)\nprint(r.xf) # {'item 131': 2, 'item 18': 1, 'item 62': 2, 'item 87': 1, 'item 43': 1}\n# pay attention that Python indexation starts from zero: item 0, item 1 ...\n# if fields 'name' are absent, you'll have list of numbers instead of Python dict\n",
"from numpy import hstack, asarray, abs, atleast_1d, \\\nlogical_not, argsort, vstack, sum, array, nan, all\nimport numpy as np\nfrom FuncDesigner import oopoint, FDmisc\nwhere = FDmisc.where\n#from FuncDesigner.boundsurf import boundsurf\n\n\ndef interalg_ODE_routine(p, solver):\n isIP = p.probType == 'IP'\n isODE = p.probType == 'ODE'\n if isODE:\n f, y0, r30, ftol = p.equations, p.x0, p.times, p.ftol\n assert len(f) == 1, 'multiple ODE equations are unimplemented for FuncDesigner yet'\n f = list(f.values())[0]\n t = list(f._getDep())[0]\n elif isIP:\n assert p.n == 1 and p.__isNoMoreThanBoxBounded__()\n f, y0, ftol = p.user.f[0], 0.0, p.ftol\n if p.fTol is not None: ftol = p.fTol\n t = list(f._getDep())[0]\n r30 = p.domain[t]\n p.iterfcn(p.point([nan]*p.n))\n else:\n p.err('incorrect prob type for interalg ODE routine') \n \n eq_var = list(p._x0.keys())[0]\n\n dataType = solver.dataType\n if type(ftol) == int: \n ftol = float(ftol) # e.g. someone set ftol = 1\n # Currently ftol is scalar, in future it can be array of same length as timeArray\n if len(r30) < 2:\n p.err('length ot time array must be at least 2') \n# if any(r30[1:] < r30[:-1]):\n# p.err('currently interalg can handle only time arrays sorted is ascending order') \n# if any(r30 < 0):\n# p.err('currently interalg can handle only time arrays with positive values') \n# if p.times[0] != 0:\n# p.err('currently solver interalg requires times start from zero') \n \n r37 = abs(r30[-1] - r30[0])\n# if len(r30) == 2:\n# r30 = np.linspace(r30[0], r30[-1], 150)\n r28 = asarray(atleast_1d(r30[:-1]), dataType)\n r29 = asarray(atleast_1d(r30[1:]), dataType)\n \n r20_store = array([], float)\n r38_store = array([], float)\n r39_store = array([], float)\n maxActiveNodes = 150000#solver.maxActiveNodes\n\n storedr28 = []\n r27 = []\n r31 = []\n r32 = []\n r33 = ftol\n F = 0.0\n p._Residual = 0\n \n # Main cycle\n for itn in range(p.maxIter+1):\n p.extras['nNodes'].append(r28.size)\n p.extras['nActiveNodes'].append(r28.size)\n mp = oopoint(\n {t: [r28, r29] if r30[-1] > r30[0] else [r29, r28]}, \n skipArrayCast = True\n )\n mp.isMultiPoint = True\n mp.nPoints = r28.size\n \n mp.dictOfFixedFuncs = p.dictOfFixedFuncs\n mp._dictOfRedirectedFuncs = p._dictOfRedirectedFuncs\n mp.maxDistributionSize = p.maxDistributionSize\n mp.surf_preference = True\n tmp = f.interval(mp, ia_surf_level = 2 if isIP else 1)\n if not all(tmp.definiteRange):\n p.err('''\n solving ODE and IP by interalg is implemented for definite (real) range only, \n no NaN values in integrand are allowed''')\n # TODO: perform check on NaNs\n isBoundsurf = hasattr(tmp, 'resolve')\n if isBoundsurf:\n if isIP:\n if tmp.level == 1:\n #adjustr4WithDiscreteVariables(wr4, p)\n cs = oopoint([(v, asarray(0.5*(val[0] + val[1]), dataType)) for v, val in mp.items()])\n cs.dictOfFixedFuncs = p.dictOfFixedFuncs\n cs._dictOfRedirectedFuncs = p._dictOfRedirectedFuncs\n r21, r22 = tmp.values(cs)\n o, a = atleast_1d(r21), atleast_1d(r22)\n r20 = a-o\n approx_value = 0.5*(a+o)\n else:\n assert tmp.level == 2\n ts, te = r28, r29\n A, B = (te**2 + te*ts+ts**2) / 3.0, 0.5 * (te + ts)\n a, b, c = tmp.l.d2.get(t, 0.0), tmp.l.d.get(t, 0.0), tmp.l.c\n val_l = a * A + b * B + c \n a, b, c = tmp.u.d2.get(t, 0.0), tmp.u.d.get(t, 0.0), tmp.u.c\n val_u = a * A + b * B + c \n r20 = val_u - val_l\n approx_value = 0.5 * (val_l + val_u)\n# import pylab, numpy\n# xx = numpy.linspace(-1, 0, 1000)\n# pylab.plot(xx, tmp.l.d2.get(t, 0.0)[1]*xx**2+ tmp.l.d.get(t, 0.0)[1]*xx+ tmp.l.c[1], 'r')\n# pylab.plot(xx, tmp.u.d2.get(t, 0.0)[1]*xx**2+ tmp.u.d.get(t, 0.0)[1]*xx+ tmp.u.c[1], 'b')\n# pylab.grid()\n# pylab.show()\n \n elif isODE:\n l, u = tmp.l, tmp.u\n assert len(l.d) <= 1 and len(u.d) <= 1 # at most time variable\n l_koeffs, u_koeffs = l.d.get(t, 0.0), u.d.get(t, 0.0)\n l_c, u_c = l.c, u.c\n# dT = r29 - r28 if r30[-1] > r30[0] else r28 - r29\n \n ends = oopoint([(v, asarray(val[1], dataType)) for v, val in mp.items()])\n ends.dictOfFixedFuncs = p.dictOfFixedFuncs\n ends._dictOfRedirectedFuncs = p._dictOfRedirectedFuncs\n ends_L, ends_U = tmp.values(ends)\n \n starts = oopoint([(v, asarray(val[0], dataType)) for v, val in mp.items()])\n starts.dictOfFixedFuncs = p.dictOfFixedFuncs\n starts._dictOfRedirectedFuncs = p._dictOfRedirectedFuncs\n starts_L, starts_U = tmp.values(starts)\n\n# o, a = atleast_1d(r21), atleast_1d(r22)\n\n o, a = tmp.resolve()[0]\n# r20 = 0.5 * u_koeffs * dT + u_c - (0.5 * l_koeffs * dT + l_c)\n r20_end = 0.5 * (ends_U - ends_L)\n r20_start = 0.5 * (starts_U - starts_L)\n r20 = where(r20_end>r20_start, r20_end, r20_start)\n \n# r20 = 0.5 * u_koeffs * dT ** 2 + u_c * dT - (0.5 * l_koeffs * dT ** 2 + l_c * dT)\n# r20 = 0.5*u_koeffs * dT + u_c - ( 0.5*l_koeffs * dT + l_c)\n\n# o = 0.5*l_koeffs * dT + l_c\n# a = 0.5*u_koeffs * dT + u_c\n #assert 0, 'unimplemented'\n else:\n assert 0\n else:\n o, a = atleast_1d(tmp.lb), atleast_1d(tmp.ub)\n ends_L = starts_L = o\n ends_U = starts_U = a\n r20 = a - o\n approx_value = 0.5 * (a+o)\n \n if isODE:\n r36 = atleast_1d(r20 <= 0.95 * r33)\n r36 = np.logical_and(r36, r20 < ftol)\n r36 = np.logical_and(r36, a-o < ftol)\n# else:\n# r36 = atleast_1d(r20 <= 0.95 * r33 / r37)\n\n \n if isODE and isBoundsurf:\n d = r37 #if not isODE or not isBoundsurf else len(r28)\n r36 = np.logical_and(\n atleast_1d(r20_end <= 0.95 * r33 / d), \n atleast_1d(r20_start <= 0.95 * r33 / d)\n )\n r36 &= atleast_1d(r20_end <= ftol)\n r36 &= atleast_1d(r20_start <= ftol)\n else:\n r36 = atleast_1d(r20 <= 0.95 * r33 / r37)\n \n# r36 = np.logical_and(r36, r20 < ftol)\n# r36 = np.logical_and(r36, a-o < ftol)\n\n ind = where(r36)[0]\n if isODE:\n storedr28.append(r28[ind])\n r27.append(r29[ind])\n r31.append(a[ind])\n r32.append(o[ind])\n# r31.append(ends_U[ind])\n# r32.append(ends_L[ind])\n else:\n assert isIP\n #F += 0.5 * sum((r29[ind]-r28[ind])*(a[ind]+o[ind]))\n F += sum((r29[ind]-r28[ind])*approx_value[ind])\n \n if ind.size != 0: \n tmp = abs(r29[ind] - r28[ind])\n Tmp = sum(r20[ind] * tmp) #if not isODE or not isBoundsurf else sum(r20[ind])\n r33 -= Tmp\n if isIP: p._residual += Tmp\n r37 -= sum(tmp)\n \n ind = where(logical_not(r36))[0]\n if 1:#new\n if ind.size == 0 and r20_store.size == 0:\n p.istop = 1000\n p.msg = 'problem has been solved according to required user-defined accuracy %0.1g' % ftol\n break\n if ind.size != 0:\n # TODO: use merge sorted lists\n if r20_store.size != 0:\n r20_store = hstack((r20_store, r20[ind]*abs(r29[ind] - r28[ind])))\n r38_store = hstack((r38_store, r28[ind]))\n r39_store = hstack((r39_store, r29[ind]))\n else:\n r20_store = r20[ind]*abs(r29[ind] - r28[ind])\n r38_store, r39_store = r28[ind], r29[ind]\n ind_a = argsort(r20_store)\n r20_store = r20_store[ind_a]\n r38_store = r38_store[ind_a]\n r39_store = r39_store[ind_a]\n r38_store, r38 = r38_store[:-maxActiveNodes], r38_store[-maxActiveNodes:]\n r39_store, r39 = r39_store[:-maxActiveNodes], r39_store[-maxActiveNodes:]\n r20_store = r20_store[:-maxActiveNodes]\n r40 = 0.5 * (r38 + r39)\n r28 = vstack((r38, r40)).flatten()\n r29 = vstack((r40, r39)).flatten()\n else:\n if ind.size == 0:\n p.istop = 1000\n p.msg = 'problem has been solved according to required user-defined accuracy %0.1g' % ftol\n break\n \n r38, r39 = r28[ind], r29[ind]\n r40 = 0.5 * (r38 + r39)\n r28 = vstack((r38, r40)).flatten()\n r29 = vstack((r40, r39)).flatten()\n \n # !!! unestablished !!!\n if isODE:\n p.iterfcn(fk = r33/ftol)\n elif isIP:\n p.iterfcn(xk=array(nan), fk=F, rk = ftol - r33)\n else:\n p.err('bug in interalgODE.py')\n \n if p.istop != 0 : \n break\n \n #print(itn, r28.size)\n\n if isODE:\n \n t0, t1, lb, ub = hstack(storedr28), hstack(r27), hstack(r32), hstack(r31)\n ind = argsort(t0)\n if r30[0] > r30[-1]:\n ind = ind[::-1] # reverse\n t0, t1, lb, ub = t0[ind], t1[ind], lb[ind], ub[ind]\n lb, ub = hstack((y0, y0+(lb*(t1-t0)).cumsum())), hstack((y0, y0+(ub*(t1-t0)).cumsum()))\n #y_var = p._x0.keys()[0]\n #p.xf = p.xk = 0.5*(lb+ub)\n p.extras = {'startTimes': t0, 'endTimes': t1, eq_var:{'infinums': lb, 'supremums': ub}}\n return t0, t1, lb, ub\n elif isIP:\n P = p.point([nan]*p.n)\n P._f = F\n P._mr = ftol - r33\n P._mrName = 'None'\n P._mrInd = 0\n# p.xk = array([nan]*p.n)\n# p.rk = r33\n# p.fk = F\n #p._Residual = \n p.iterfcn(asarray([nan]*p.n), fk=F, rk = ftol - r33)\n else:\n p.err('incorrect prob type in interalg ODE routine')\n",
"from __future__ import absolute_import, print_function, division\n\nimport math\n\nimport numpy as np\n\nfrom numba import unittest_support as unittest\nfrom numba import int32, uint32, float32, float64, vectorize\nfrom ..support import tag\n\n\npi = math.pi\n\n\ndef sinc(x):\n if x == 0.0:\n return 1.0\n else:\n return math.sin(x * pi) / (pi * x)\n\ndef scaled_sinc(x, scale):\n if x == 0.0:\n return scale\n else:\n return scale * (math.sin(x * pi) / (pi * x))\n\ndef vector_add(a, b):\n return a + b\n\n\nclass BaseVectorizeDecor(unittest.TestCase):\n def _run_and_compare(self, numba_func, numpy_func):\n A = np.arange(100, dtype=np.float64)\n result = numba_func(A)\n gold = numpy_func(A)\n self.assertTrue(np.allclose(result, gold))\n\n def _test_template_1(self, target):\n numba_sinc = vectorize(['float64(float64)', 'float32(float32)'],\n target=target)(sinc)\n numpy_sinc = np.vectorize(sinc)\n self._run_and_compare(numba_sinc, numpy_sinc)\n\n def _test_template_2(self, target):\n numba_sinc = vectorize([float64(float64), float32(float32)],\n target=target)(sinc)\n numpy_sinc = np.vectorize(sinc)\n self._run_and_compare(numba_sinc, numpy_sinc)\n\n def _test_template_3(self, target):\n numba_scaled_sinc = vectorize(['float64(float64, uint32)'],\n target=target)(scaled_sinc)\n numpy_scaled_sinc = np.vectorize(scaled_sinc)\n A = np.arange(100, dtype=np.float64)\n scale = np.uint32(3)\n result = numba_scaled_sinc(A, scale)\n gold = numpy_scaled_sinc(A, scale)\n self.assertTrue(np.allclose(result, gold))\n\n def _test_template_4(self, target):\n sig = [int32(int32, int32),\n uint32(uint32, uint32),\n float32(float32, float32),\n float64(float64, float64)]\n basic_ufunc = vectorize(sig, target=target)(vector_add)\n np_ufunc = np.add\n\n def test(ty):\n data = np.linspace(0., 100., 500).astype(ty)\n result = basic_ufunc(data, data)\n gold = np_ufunc(data, data)\n self.assertTrue(np.allclose(gold, result))\n\n test(np.double)\n test(np.float32)\n test(np.int32)\n test(np.uint32)\n\n\nclass TestVectorizeDecor(BaseVectorizeDecor):\n\n @tag('important')\n def test_cpu_1(self):\n self._test_template_1('cpu')\n\n @tag('important')\n def test_parallel_1(self):\n self._test_template_1('parallel')\n\n @tag('important')\n def test_cpu_2(self):\n self._test_template_2('cpu')\n\n @tag('important')\n def test_parallel_2(self):\n self._test_template_2('parallel')\n\n @tag('important')\n def test_cpu_3(self):\n self._test_template_3('cpu')\n\n @tag('important')\n def test_parallel_3(self):\n self._test_template_3('parallel')\n\n @tag('important')\n def test_cpu_4(self):\n self._test_template_4('cpu')\n\n @tag('important')\n def test_parallel_4(self):\n self._test_template_4('parallel')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"__docformat__ = \"restructuredtext en\"\nfrom numpy import asarray, copy, ravel, isnan, isscalar, asscalar#where\nfrom nonOptMisc import where\nfrom openopt.kernel.Point import Point\n\nclass baseSolver:\n def __init__(self):pass\n __name__ = 'Undefined. If you are a user and got the message, inform developers please.'\n __license__ = \"Undefined. If you are a user and got the message, inform developers please.\"\n __authors__ = \"Undefined. If you are a user and got the message, inform developers please.\"\n __alg__ = \"Undefined\"\n __solver__ = \"Undefined. If you are a user and got the message, inform developers please.\"\n __homepage__ = 'Undefined. Use web search'\n __info__ = 'None'\n _requiresBestPointDetection = False\n _requiresFiniteBoxBounds = False\n\n \"\"\" useStopByException should be turned to False\n for those solvers where passing exceptions through C/Fortran code is buggy\n and cannot be catched by OO\n \"\"\"\n useStopByException = True\n\n __optionalDataThatCanBeHandled__ = []\n __isIterPointAlwaysFeasible__ = lambda self, p: p.isUC#TODO: provide possibility of simple True, False\n iterfcnConnected = False\n funcForIterFcnConnection = 'df' # the field is used for non-linear solvers with not-connected iter function\n _canHandleScipySparse = False # True if can handle linear constraints Ax<=b, Aeq x = beq and nonlin cons derivs\n properTextOutput = False\n useLinePoints = False\n\n # these ones below are used in iterfcn (ooIter.py)\n # to decode input args\n # and can be overdetermined by child class (LP, QP, network etc)\n __expectedArgs__ = ['xk', 'fk', 'rk'] #point, objFunVal, max residual\n def __decodeIterFcnArgs__(self, p, *args, **kwargs):\n \"\"\"\n decode and assign x, f, maxConstr\n (and/or other fields) to p.iterValues\n \"\"\"\n fArg = True\n\n if len(args)>0 and isinstance(args[0], Point):\n if len(args) != 1: p.err('incorrect iterfcn args, if you see this contact OO developers')\n point = args[0]\n p.xk, p.fk = point.x, point.f()\n p.rk, p.rtk, p.rik = point.mr(True)\n p.nNaNs = point.nNaNs()\n if p.solver._requiresBestPointDetection and (p.iter == 0 or point.betterThan(p._bestPoint)): p._bestPoint = point\n else:\n if len(args)>0: p.xk = args[0]\n elif 'xk' in kwargs.keys(): p.xk = kwargs['xk']\n elif not hasattr(p, 'xk'): p.err('iterfcn must get x value, if you see it inform oo developers')\n if p._baseClassName == 'NonLin': \n C = p.c(p.xk)\n H = p.h(p.xk)\n p.nNaNs = len(where(isnan(C))[0]) + len(where(isnan(H))[0])\n if p.solver._requiresBestPointDetection:\n currPoint = p.point(p.xk)\n if p.iter == 0 or currPoint.betterThan(p._bestPoint): p._bestPoint = currPoint\n if len(args)>1: p.fk = args[1]\n elif 'fk' in kwargs.keys(): p.fk = kwargs['fk']\n else: fArg = False\n\n if len(args)>2: \n #p.pWarn('executing deprecated code, inform developers')\n p.rk = args[2]\n elif 'rk' in kwargs.keys(): \n #p.pWarn('executing deprecated code, inform developers')\n p.rk = kwargs['rk']\n else:\n p.rk, p.rtk, p.rik = p.getMaxResidual(p.xk, True)\n \n \n p.iterValues.r.append(p.rk)\n if p.probType != 'IP':\n # recalculations are not performed\n p.rk, p.rtk, p.rik = p.getMaxResidual(p.xk, True)\n p.iterValues.rt.append(p.rtk)\n p.iterValues.ri.append(p.rik)\n if p._baseClassName == 'NonLin': p.iterValues.nNaNs.append(p.nNaNs)\n\n #TODO: handle kwargs correctly! (decodeIterFcnArgs)\n\n# for key in kwargs.keys():\n# if p.debug: print 'decodeIterFcnArgs>>', key, kwargs[key]\n# setattr(p, key, kwargs[key])\n\n p.iterValues.x.append(copy(p.xk))\n if not p.storeIterPoints and len(p.iterValues.x) > 2:\n p.iterValues.x.pop(0)\n \n if not fArg:\n p.Fk = p.F(p.xk)\n p.fk = copy(p.Fk)\n else:\n if asarray(p.fk).size >1:\n if p.debug and p.iter <= 1: p.warn('please fix solver iter output func, objFuncVal should be single number (use p.F)')\n p.Fk = p.objFuncMultiple2Single(asarray(p.fk))\n else:\n p.Fk = p.fk\n\n #if p.isObjFunValueASingleNumber: p.Fk = p.fk\n #else: p.Fk = p.objFuncMultiple2Single(fv)\n\n v = ravel(p.Fk)[0]\n if p.invertObjFunc: v = -v\n\n p.iterValues.f.append(v)\n \n if not isscalar(p.fk) and p.fk.size == 1:\n p.fk = asscalar(p.fk)\n \n\n",
"import random\nimport math\n#import logging\nimport numpy as np\nfrom openopt.kernel.setDefaultIterFuncs import IS_MAX_FUN_EVALS_REACHED\n\ndef P(prev_score,next_score,temperature):\n if next_score > prev_score:\n return 1.0\n else:\n return math.exp( -abs(next_score-prev_score)/temperature )\n\nclass ObjectiveFunction:\n '''class to wrap an objective function and \n keep track of the best solution evaluated'''\n def __init__(self,objective_function):\n self.objective_function=objective_function\n self.best=None\n self.best_score=None\n \n def __call__(self,solution):\n score=self.objective_function(solution)\n if self.best is None or score > self.best_score:\n self.best_score=score\n self.best=solution\n# logging.info('new best score: %f',self.best_score)\n return score\n\ndef kirkpatrick_cooling(start_temp,alpha):\n T=start_temp\n while True:\n yield T\n T=alpha*T\n\ndef anneal(init_function,move_operator,objective_function,max_evaluations,start_temp,alpha, prob = None):\n \n # wrap the objective function (so we record the best)\n objective_function=ObjectiveFunction(objective_function)\n \n current=init_function()\n current_score=objective_function(current)\n num_evaluations=1\n \n cooling_schedule=kirkpatrick_cooling(start_temp,alpha)\n \n# logging.info('anneal started: score=%f',current_score)\n \n for temperature in cooling_schedule:\n done = False\n # examine moves around our current position\n for next in move_operator(current):\n if num_evaluations >= max_evaluations:\n if prob is not None:\n prob.istop = IS_MAX_FUN_EVALS_REACHED\n prob.msg = 'max objfunc evals limit (p.maxFunEvals=%d) has been reached' % prob.maxFunEvals\n done=True\n break\n \n next_score=objective_function(next)\n num_evaluations+=1\n \n # probablistically accept this solution\n # always accepting better solutions\n p=P(current_score,next_score,temperature)\n \n if not num_evaluations % 64 and prob is not None:\n prob.iterfcn(np.array(current), -current_score)\n if prob.istop != 0:\n return (num_evaluations, objective_function.best_score, objective_function.best) \n\n if random.random() < p:\n current=next\n current_score=next_score\n break\n \n \n # see if completely finished\n if done: break\n \n best_score=objective_function.best_score\n best=objective_function.best\n# logging.info('final temperature: %f',temperature)\n# logging.info('anneal finished: num_evaluations=%d, best_score=%f',num_evaluations,best_score)\n return (num_evaluations,best_score,best)\n",
"\"\"\"\nFunctions that are general enough to use for any model fitting. The idea is\nto untie these from LikelihoodModel so that they may be re-used generally.\n\"\"\"\nfrom __future__ import print_function\nimport distutils.version\n\nfrom scipy import __version__ as scipy_version\nimport numpy as np\nfrom scipy import optimize\n\ndef _check_method(method, methods):\n if method not in methods:\n message = \"Unknown fit method %s\" % method\n raise ValueError(message)\n\n\nclass Optimizer(object):\n\n def _fit(self, objective, gradient, start_params, fargs, kwargs,\n hessian=None, method='newton', maxiter=100, full_output=True,\n disp=True, callback=None, retall=False):\n \"\"\"\n Fit function for any model with an objective function.\n\n Parameters\n ----------\n start_params : array-like, optional\n Initial guess of the solution for the loglikelihood maximization.\n The default is an array of zeros.\n method : str {'newton','nm','bfgs','powell','cg','ncg','basinhopping'}\n Method can be 'newton' for Newton-Raphson, 'nm' for Nelder-Mead,\n 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno, 'powell' for modified\n Powell's method, 'cg' for conjugate gradient, 'ncg' for Newton-\n conjugate gradient or 'basinhopping' for global basin-hopping\n solver, if available. `method` determines which solver from\n scipy.optimize is used. The explicit arguments in `fit` are passed\n to the solver, with the exception of the basin-hopping solver. Each\n solver has several optional arguments that are not the same across\n solvers. See the notes section below (or scipy.optimize) for the\n available arguments and for the list of explicit arguments that the\n basin-hopping solver supports..\n maxiter : int\n The maximum number of iterations to perform.\n full_output : bool\n Set to True to have all available output in the Results object's\n mle_retvals attribute. The output is dependent on the solver.\n See LikelihoodModelResults notes section for more information.\n disp : bool\n Set to True to print convergence messages.\n fargs : tuple\n Extra arguments passed to the likelihood function, i.e.,\n loglike(x,*args)\n callback : callable callback(xk)\n Called after each iteration, as callback(xk), where xk is the\n current parameter vector.\n retall : bool\n Set to True to return list of solutions at each iteration.\n Available in Results object's mle_retvals attribute.\n\n Returns\n -------\n xopt : array\n The solution to the objective function\n retvals : dict, None\n If `full_output` is True then this is a dictionary which holds\n information returned from the solver used. If it is False, this is\n None.\n optim_settings : dict\n A dictionary that contains the parameters passed to the solver.\n\n Notes\n -----\n The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`\n explicit arguments.\n\n Optional arguments for the solvers (available in Results.mle_settings)::\n\n 'newton'\n tol : float\n Relative error in params acceptable for convergence.\n 'nm' -- Nelder Mead\n xtol : float\n Relative error in params acceptable for convergence\n ftol : float\n Relative error in loglike(params) acceptable for\n convergence\n maxfun : int\n Maximum number of function evaluations to make.\n 'bfgs'\n gtol : float\n Stop when norm of gradient is less than gtol.\n norm : float\n Order of norm (np.Inf is max, -np.Inf is min)\n epsilon\n If fprime is approximated, use this value for the step\n size. Only relevant if LikelihoodModel.score is None.\n 'cg'\n gtol : float\n Stop when norm of gradient is less than gtol.\n norm : float\n Order of norm (np.Inf is max, -np.Inf is min)\n epsilon : float\n If fprime is approximated, use this value for the step\n size. Can be scalar or vector. Only relevant if\n Likelihoodmodel.score is None.\n 'ncg'\n fhess_p : callable f'(x,*args)\n Function which computes the Hessian of f times an arbitrary\n vector, p. Should only be supplied if\n LikelihoodModel.hessian is None.\n avextol : float\n Stop when the average relative error in the minimizer\n falls below this amount.\n epsilon : float or ndarray\n If fhess is approximated, use this value for the step size.\n Only relevant if Likelihoodmodel.hessian is None.\n 'powell'\n xtol : float\n Line-search error tolerance\n ftol : float\n Relative error in loglike(params) for acceptable for\n convergence.\n maxfun : int\n Maximum number of function evaluations to make.\n start_direc : ndarray\n Initial direction set.\n 'basinhopping'\n niter : integer\n The number of basin hopping iterations.\n niter_success : integer\n Stop the run if the global minimum candidate remains the\n same for this number of iterations.\n T : float\n The \"temperature\" parameter for the accept or reject\n criterion. Higher \"temperatures\" mean that larger jumps\n in function value will be accepted. For best results\n `T` should be comparable to the separation (in function\n value) between local minima.\n stepsize : float\n Initial step size for use in the random displacement.\n interval : integer\n The interval for how often to update the `stepsize`.\n minimizer : dict\n Extra keyword arguments to be passed to the minimizer\n `scipy.optimize.minimize()`, for example 'method' - the\n minimization method (e.g. 'L-BFGS-B'), or 'tol' - the\n tolerance for termination. Other arguments are mapped from\n explicit argument of `fit`:\n - `args` <- `fargs`\n - `jac` <- `score`\n - `hess` <- `hess`\n \"\"\"\n #TODO: generalize the regularization stuff\n # Extract kwargs specific to fit_regularized calling fit\n extra_fit_funcs = kwargs.setdefault('extra_fit_funcs', dict())\n\n methods = ['newton', 'nm', 'bfgs', 'lbfgs', 'powell', 'cg', 'ncg',\n 'basinhopping']\n methods += extra_fit_funcs.keys()\n method = method.lower()\n _check_method(method, methods)\n\n fit_funcs = {\n 'newton': _fit_newton,\n 'nm': _fit_nm, # Nelder-Mead\n 'bfgs': _fit_bfgs,\n 'lbfgs': _fit_lbfgs,\n 'cg': _fit_cg,\n 'ncg': _fit_ncg,\n 'powell': _fit_powell,\n 'basinhopping': _fit_basinhopping,\n }\n\n #NOTE: fit_regularized checks the methods for these but it should be\n # moved up probably\n if extra_fit_funcs:\n fit_funcs.update(extra_fit_funcs)\n\n func = fit_funcs[method]\n xopt, retvals = func(objective, gradient, start_params, fargs, kwargs,\n disp=disp, maxiter=maxiter, callback=callback,\n retall=retall, full_output=full_output,\n hess=hessian)\n\n # this is stupid TODO: just change this to something sane\n # no reason to copy scipy here\n if not full_output: # xopt should be None and retvals is argmin\n xopt = retvals\n retvals = None\n\n optim_settings = {'optimizer': method, 'start_params': start_params,\n 'maxiter': maxiter, 'full_output': full_output,\n 'disp': disp, 'fargs': fargs, 'callback': callback,\n 'retall': retall}\n optim_settings.update(kwargs)\n # set as attributes or return?\n return xopt, retvals, optim_settings\n\n def _fit_constrained(self, params):\n \"\"\"\n TODO: how to add constraints?\n\n Something like\n sm.add_constraint(Model, func)\n\n or\n\n model_instance.add_constraint(func)\n model_instance.add_constraint(\"x1 + x2 = 2\")\n result = model_instance.fit()\n \"\"\"\n pass\n\n\n def _fit_regularized(self, params):\n #TODO: code won't necessarily be general here. 3 options.\n # 1) setup for scipy.optimize.fmin_sqlsqp\n # 2) setup for cvxopt\n # 3) setup for openopt\n pass\n\n########################################\n# Helper functions to fit\n\ndef _fit_newton(f, score, start_params, fargs, kwargs, disp=True,\n maxiter=100, callback=None, retall=False,\n full_output=True, hess=None, ridge_factor=1e-10):\n tol = kwargs.setdefault('tol', 1e-8)\n iterations = 0\n oldparams = np.inf\n newparams = np.asarray(start_params)\n if retall:\n history = [oldparams, newparams]\n while (iterations < maxiter and np.any(np.abs(newparams -\n oldparams) > tol)):\n H = np.asarray(hess(newparams))\n # regularize Hessian, not clear what ridge factor should be\n # keyword option with absolute default 1e-10, see #1847\n if not np.all(ridge_factor == 0):\n H[np.diag_indices(H.shape[0])] += ridge_factor\n oldparams = newparams\n newparams = oldparams - np.dot(np.linalg.inv(H),\n score(oldparams))\n if retall:\n history.append(newparams)\n if callback is not None:\n callback(newparams)\n iterations += 1\n fval = f(newparams, *fargs) # this is the negative likelihood\n if iterations == maxiter:\n warnflag = 1\n if disp:\n print(\"Warning: Maximum number of iterations has been \"\n \"exceeded.\")\n print(\" Current function value: %f\" % fval)\n print(\" Iterations: %d\" % iterations)\n else:\n warnflag = 0\n if disp:\n print(\"Optimization terminated successfully.\")\n print(\" Current function value: %f\" % fval)\n print(\" Iterations %d\" % iterations)\n if full_output:\n (xopt, fopt, niter,\n gopt, hopt) = (newparams, f(newparams, *fargs),\n iterations, score(newparams),\n hess(newparams))\n converged = not warnflag\n retvals = {'fopt': fopt, 'iterations': niter, 'score': gopt,\n 'Hessian': hopt, 'warnflag': warnflag,\n 'converged': converged}\n if retall:\n retvals.update({'allvecs': history})\n\n else:\n retvals = newparams\n xopt = None\n\n return xopt, retvals\n\n\ndef _fit_bfgs(f, score, start_params, fargs, kwargs, disp=True,\n maxiter=100, callback=None, retall=False,\n full_output=True, hess=None):\n gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)\n norm = kwargs.setdefault('norm', np.Inf)\n epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)\n retvals = optimize.fmin_bfgs(f, start_params, score, args=fargs,\n gtol=gtol, norm=norm, epsilon=epsilon,\n maxiter=maxiter, full_output=full_output,\n disp=disp, retall=retall, callback=callback)\n if full_output:\n if not retall:\n xopt, fopt, gopt, Hinv, fcalls, gcalls, warnflag = retvals\n else:\n (xopt, fopt, gopt, Hinv, fcalls,\n gcalls, warnflag, allvecs) = retvals\n converged = not warnflag\n retvals = {'fopt': fopt, 'gopt': gopt, 'Hinv': Hinv,\n 'fcalls': fcalls, 'gcalls': gcalls, 'warnflag':\n warnflag, 'converged': converged}\n if retall:\n retvals.update({'allvecs': allvecs})\n else:\n xopt = None\n\n return xopt, retvals\n\n\ndef _fit_lbfgs(f, score, start_params, fargs, kwargs, disp=True,\n maxiter=100, callback=None, retall=False,\n full_output=True, hess=None):\n \"\"\"\n Parameters\n ----------\n f : function\n Returns negative log likelihood given parameters.\n score : function\n Returns gradient of negative log likelihood with respect to params.\n\n Notes\n -----\n Within the mle part of statsmodels, the log likelihood function and\n its gradient with respect to the parameters do not have notationally\n consistent sign.\n \"\"\"\n\n # Use unconstrained optimization by default.\n bounds = kwargs.setdefault('bounds', [(None, None)] * len(start_params))\n\n # Pass the following keyword argument names through to fmin_l_bfgs_b\n # if they are present in kwargs, otherwise use the fmin_l_bfgs_b\n # default values.\n names = ('m', 'pgtol', 'factr', 'maxfun', 'epsilon', 'approx_grad')\n extra_kwargs = dict((x, kwargs[x]) for x in names if x in kwargs)\n\n # Extract values for the options related to the gradient.\n approx_grad = kwargs.get('approx_grad', False)\n loglike_and_score = kwargs.get('loglike_and_score', None)\n epsilon = kwargs.get('epsilon', None)\n\n # The approx_grad flag has superpowers nullifying the score function arg.\n if approx_grad:\n score = None\n\n # Choose among three options for dealing with the gradient (the gradient\n # of a log likelihood function with respect to its parameters\n # is more specifically called the score in statistics terminology).\n # The first option is to use the finite-differences\n # approximation that is built into the fmin_l_bfgs_b optimizer.\n # The second option is to use the provided score function.\n # The third option is to use the score component of a provided\n # function that simultaneously evaluates the log likelihood and score.\n if epsilon and not approx_grad:\n raise ValueError('a finite-differences epsilon was provided '\n 'even though we are not using approx_grad')\n if approx_grad and loglike_and_score:\n raise ValueError('gradient approximation was requested '\n 'even though an analytic loglike_and_score function '\n 'was given')\n if loglike_and_score:\n func = lambda p, *a : tuple(-x for x in loglike_and_score(p, *a))\n elif score:\n func = f\n extra_kwargs['fprime'] = score\n elif approx_grad:\n func = f\n\n # Customize the fmin_l_bfgs_b call according to the scipy version.\n # Old scipy does not support maxiter and callback.\n scipy_version_curr = distutils.version.LooseVersion(scipy_version)\n scipy_version_12 = distutils.version.LooseVersion('0.12.0')\n if scipy_version_curr < scipy_version_12:\n retvals = optimize.fmin_l_bfgs_b(func, start_params, args=fargs,\n bounds=bounds, disp=disp,\n **extra_kwargs)\n else:\n retvals = optimize.fmin_l_bfgs_b(func, start_params, maxiter=maxiter,\n callback=callback, args=fargs,\n bounds=bounds, disp=disp,\n **extra_kwargs)\n\n if full_output:\n xopt, fopt, d = retvals\n # The warnflag is\n # 0 if converged\n # 1 if too many function evaluations or too many iterations\n # 2 if stopped for another reason, given in d['task']\n warnflag = d['warnflag']\n converged = (warnflag == 0)\n gopt = d['grad']\n fcalls = d['funcalls']\n retvals = {'fopt': fopt, 'gopt': gopt, 'fcalls': fcalls,\n 'warnflag': warnflag, 'converged': converged}\n else:\n xopt = None\n\n return xopt, retvals\n\n\ndef _fit_nm(f, score, start_params, fargs, kwargs, disp=True,\n maxiter=100, callback=None, retall=False,\n full_output=True, hess=None):\n xtol = kwargs.setdefault('xtol', 0.0001)\n ftol = kwargs.setdefault('ftol', 0.0001)\n maxfun = kwargs.setdefault('maxfun', None)\n retvals = optimize.fmin(f, start_params, args=fargs, xtol=xtol,\n ftol=ftol, maxiter=maxiter, maxfun=maxfun,\n full_output=full_output, disp=disp, retall=retall,\n callback=callback)\n if full_output:\n if not retall:\n xopt, fopt, niter, fcalls, warnflag = retvals\n else:\n xopt, fopt, niter, fcalls, warnflag, allvecs = retvals\n converged = not warnflag\n retvals = {'fopt': fopt, 'iterations': niter,\n 'fcalls': fcalls, 'warnflag': warnflag,\n 'converged': converged}\n if retall:\n retvals.update({'allvecs': allvecs})\n else:\n xopt = None\n\n return xopt, retvals\n\n\ndef _fit_cg(f, score, start_params, fargs, kwargs, disp=True,\n maxiter=100, callback=None, retall=False,\n full_output=True, hess=None):\n gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)\n norm = kwargs.setdefault('norm', np.Inf)\n epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)\n retvals = optimize.fmin_cg(f, start_params, score, gtol=gtol, norm=norm,\n epsilon=epsilon, maxiter=maxiter,\n full_output=full_output, disp=disp,\n retall=retall, callback=callback)\n if full_output:\n if not retall:\n xopt, fopt, fcalls, gcalls, warnflag = retvals\n else:\n xopt, fopt, fcalls, gcalls, warnflag, allvecs = retvals\n converged = not warnflag\n retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls,\n 'warnflag': warnflag, 'converged': converged}\n if retall:\n retvals.update({'allvecs': allvecs})\n\n else:\n xopt = None\n\n return xopt, retvals\n\n\ndef _fit_ncg(f, score, start_params, fargs, kwargs, disp=True,\n maxiter=100, callback=None, retall=False,\n full_output=True, hess=None):\n fhess_p = kwargs.setdefault('fhess_p', None)\n avextol = kwargs.setdefault('avextol', 1.0000000000000001e-05)\n epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)\n retvals = optimize.fmin_ncg(f, start_params, score, fhess_p=fhess_p,\n fhess=hess, args=fargs, avextol=avextol,\n epsilon=epsilon, maxiter=maxiter,\n full_output=full_output, disp=disp,\n retall=retall, callback=callback)\n if full_output:\n if not retall:\n xopt, fopt, fcalls, gcalls, hcalls, warnflag = retvals\n else:\n xopt, fopt, fcalls, gcalls, hcalls, warnflag, allvecs =\\\n retvals\n converged = not warnflag\n retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls,\n 'hcalls': hcalls, 'warnflag': warnflag,\n 'converged': converged}\n if retall:\n retvals.update({'allvecs': allvecs})\n else:\n xopt = None\n\n return xopt, retvals\n\n\ndef _fit_powell(f, score, start_params, fargs, kwargs, disp=True,\n maxiter=100, callback=None, retall=False,\n full_output=True, hess=None):\n xtol = kwargs.setdefault('xtol', 0.0001)\n ftol = kwargs.setdefault('ftol', 0.0001)\n maxfun = kwargs.setdefault('maxfun', None)\n start_direc = kwargs.setdefault('start_direc', None)\n retvals = optimize.fmin_powell(f, start_params, args=fargs, xtol=xtol,\n ftol=ftol, maxiter=maxiter, maxfun=maxfun,\n full_output=full_output, disp=disp,\n retall=retall, callback=callback,\n direc=start_direc)\n if full_output:\n if not retall:\n xopt, fopt, direc, niter, fcalls, warnflag = retvals\n else:\n xopt, fopt, direc, niter, fcalls, warnflag, allvecs =\\\n retvals\n converged = not warnflag\n retvals = {'fopt': fopt, 'direc': direc, 'iterations': niter,\n 'fcalls': fcalls, 'warnflag': warnflag,\n 'converged': converged}\n if retall:\n retvals.update({'allvecs': allvecs})\n else:\n xopt = None\n\n return xopt, retvals\n\n\ndef _fit_basinhopping(f, score, start_params, fargs, kwargs, disp=True,\n maxiter=100, callback=None, retall=False,\n full_output=True, hess=None):\n if not 'basinhopping' in vars(optimize):\n msg = 'basinhopping solver is not available, use e.g. bfgs instead!'\n raise ValueError(msg)\n\n from copy import copy\n kwargs = copy(kwargs)\n niter = kwargs.setdefault('niter', 100)\n niter_success = kwargs.setdefault('niter_success', None)\n T = kwargs.setdefault('T', 1.0)\n stepsize = kwargs.setdefault('stepsize', 0.5)\n interval = kwargs.setdefault('interval', 50)\n minimizer_kwargs = kwargs.get('minimizer', {})\n minimizer_kwargs['args'] = fargs\n minimizer_kwargs['jac'] = score\n method = minimizer_kwargs.get('method', None)\n if method and method != 'L-BFGS-B': # l_bfgs_b doesn't take a hessian\n minimizer_kwargs['hess'] = hess\n\n res = optimize.basinhopping(f, start_params,\n minimizer_kwargs=minimizer_kwargs,\n niter=niter, niter_success=niter_success,\n T=T, stepsize=stepsize, disp=disp,\n callback=callback, interval=interval)\n if full_output:\n xopt, fopt, niter, fcalls = res.x, res.fun, res.nit, res.nfev\n converged = 'completed successfully' in res.message[0]\n retvals = {'fopt': fopt, 'iterations': niter,\n 'fcalls': fcalls, 'converged': converged}\n\n else:\n xopt = None\n\n return xopt, retvals\n",
"from openopt import EIG\n\n# create a 5 x 5 matrix\nimport numpy.random as nr\nnr.seed(0)\nN = 5\nA = nr.rand(N, N) \n\n#define prob\np = EIG(A, goal = {'lm':3}) # search for 3 eigenvalues of largest magnitude\n# or goal={'largest magnitude':3}, with or without space inside, case-insensitive\n# for whole list of available goals see http://openopt.org/EIG\n\n#solve\nr = p.solve('arpack') # arpack is name of the involved solver\n\nprint(r.eigenvalues) # [ 0.14607289-0.19602952j -0.65372843+0.j 2.89776724+0.j ]\n# for i-th eigenvalue r.eigenvectors[:,i] is corresponding vector, \n# as well as it is done for numpy/scipy functions\nprint(r.eigenvectors) \n'''\n[[-0.10391145-0.56334829j 0.19592536+0.j 0.43733688+0.j ]\n [-0.20999235+0.1812288j -0.03219327+0.j 0.49662623+0.j ]\n [-0.21334642+0.21648181j -0.55544796+0.j 0.42977207+0.j ]\n [ 0.34828527+0.36295959j 0.62338178+0.j 0.38727512+0.j ]\n [ 0.04820760-0.49714496j -0.51327338+0.j 0.47687818+0.j ]]\n '''\n",
"from openopt import NLP\nfrom numpy import cos, arange, ones, asarray, abs, zeros\nN = 30\nM = 5\nff = lambda x: ((x-M)**2).sum()\np = NLP(ff, cos(arange(N)))\np.df = lambda x: 2*(x-M)\np.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]\n\ndef dc(x):\n r = zeros((2, p.n))\n r[0,0] = 2 * 4 * x[0]**3\n r[1,1] = 2 * x[1]\n r[1,2] = 2 * x[2]\n return r\np.dc = dc\n\nh1 = lambda x: 1e1*(x[-1]-1)**4\nh2 = lambda x: (x[-2]-1.5)**4\np.h = lambda x: (h1(x), h2(x))\n\ndef dh(x):\n r = zeros((2, p.n))\n r[0,-1] = 1e1*4*(x[-1]-1)**3\n r[1,-2] = 4*(x[-2]-1.5)**3\n return r\np.dh = dh\n\np.lb = -6*ones(N)\np.ub = 6*ones(N)\np.lb[3] = 5.5\np.ub[4] = 4.5\n\n#r = p.solve('ipopt', showLS=0, xtol=1e-7, maxIter = 1504)\n#solver = 'ipopt'\nsolver = 'ralg'\n#solver = 'scipy_slsqp'\n#solver = 'algencan'\nr = p.solve(solver, maxIter = 1504, plot=1)\n#!! fmin_cobyla can't use user-supplied gradient\n#r = p.solve('scipy_cobyla')\n\n",
"# -*- coding: utf-8 -*-\n\n########################################################################\n#\n# License: BSD\n# Created: September 4, 2002\n# Author: Francesc Alted - [email protected]\n#\n# $Id$\n#\n########################################################################\n\n\"\"\"Here is defined the Table class.\"\"\"\n\nimport math\nimport operator\nimport os.path\nimport sys\nimport warnings\n\nfrom functools import reduce as _reduce\nfrom time import time\n\nimport numpy\nimport numexpr\n\nfrom tables import tableextension\nfrom tables.lrucacheextension import ObjectCache, NumCache\nfrom tables.atom import Atom\nfrom tables.conditions import compile_condition\nfrom numexpr.necompiler import (\n getType as numexpr_getType, double, is_cpu_amd_intel)\nfrom numexpr.expressions import functions as numexpr_functions\nfrom tables.flavor import flavor_of, array_as_internal, internal_to_flavor\nfrom tables.utils import is_idx, lazyattr, SizeType, NailedDict as CacheDict\nfrom tables.leaf import Leaf\nfrom tables.description import (\n IsDescription, Description, Col, descr_from_dtype)\nfrom tables.exceptions import (NodeError, HDF5ExtError, PerformanceWarning,\n OldIndexWarning, NoSuchNodeError)\nfrom tables.utilsextension import get_nested_field\n\nfrom tables.path import join_path, split_path\nfrom tables.index import (\n OldIndex, default_index_filters, default_auto_index, Index, IndexesDescG,\n IndexesTableG)\n\nprofile = False\n# profile = True # Uncomment for profiling\nif profile:\n from tables.utils import show_stats\n\nfrom tables._past import previous_api, previous_api_property\n\n# 2.2: Added support for complex types. Introduced in version 0.9.\n# 2.2.1: Added suport for time types.\n# 2.3: Changed the indexes naming schema.\n# 2.4: Changed indexes naming schema (again).\n# 2.5: Added the FIELD_%d_FILL attributes.\n# 2.6: Added the FLAVOR attribute (optional).\n# 2.7: Numeric and numarray flavors are gone.\nobversion = \"2.7\" # The Table VERSION number\n\n\ntry:\n # int_, long_ are only available in numexpr >= 2.1\n from numexpr.necompiler import int_, long_\nexcept ImportError:\n int_ = int\n long_ = long\n\n# Maps NumPy types to the types used by Numexpr.\n_nxtype_from_nptype = {\n numpy.bool_: bool,\n numpy.int8: int_,\n numpy.int16: int_,\n numpy.int32: int_,\n numpy.int64: long_,\n numpy.uint8: int_,\n numpy.uint16: int_,\n numpy.uint32: long_,\n numpy.uint64: long_,\n numpy.float32: float,\n numpy.float64: double,\n numpy.complex64: complex,\n numpy.complex128: complex,\n numpy.bytes_: bytes,\n}\n\nif sys.version_info[0] > 2:\n _nxtype_from_nptype[numpy.str_] = str\n\nif hasattr(numpy, 'float16'):\n _nxtype_from_nptype[numpy.float16] = float # XXX: check\nif hasattr(numpy, 'float96'):\n _nxtype_from_nptype[numpy.float96] = double # XXX: check\nif hasattr(numpy, 'float128'):\n _nxtype_from_nptype[numpy.float128] = double # XXX: check\nif hasattr(numpy, 'complec192'):\n _nxtype_from_nptype[numpy.complex192] = complex # XXX: check\nif hasattr(numpy, 'complex256'):\n _nxtype_from_nptype[numpy.complex256] = complex # XXX: check\n\n\n# The NumPy scalar type corresponding to `SizeType`.\n_npsizetype = numpy.array(SizeType(0)).dtype.type\n\n\ndef _index_name_of(node):\n return '_i_%s' % node._v_name\n\n_indexNameOf = previous_api(_index_name_of)\n\n\ndef _index_pathname_of(node):\n nodeParentPath = split_path(node._v_pathname)[0]\n return join_path(nodeParentPath, _index_name_of(node))\n\n_indexPathnameOf = previous_api(_index_pathname_of)\n\n\ndef _index_pathname_of_column(table, colpathname):\n return join_path(_index_pathname_of(table), colpathname)\n\n_indexPathnameOfColumn = previous_api(_index_pathname_of_column)\n\n# The next are versions that work with just paths (i.e. we don't need\n# a node instance for using them, which can be critical in certain\n# situations)\n\n\ndef _index_name_of_(nodeName):\n return '_i_%s' % nodeName\n\n_indexNameOf_ = previous_api(_index_name_of_)\n\n\ndef _index_pathname_of_(nodePath):\n nodeParentPath, nodeName = split_path(nodePath)\n return join_path(nodeParentPath, _index_name_of_(nodeName))\n\n_indexPathnameOf_ = previous_api(_index_pathname_of_)\n\n\ndef _index_pathname_of_column_(tablePath, colpathname):\n return join_path(_index_pathname_of_(tablePath), colpathname)\n\n_indexPathnameOfColumn_ = previous_api(_index_pathname_of_column_)\n\n\ndef _table__setautoindex(self, auto):\n auto = bool(auto)\n try:\n indexgroup = self._v_file._get_node(_index_pathname_of(self))\n except NoSuchNodeError:\n indexgroup = create_indexes_table(self)\n indexgroup.auto = auto\n # Update the cache in table instance as well\n self._autoindex = auto\n\n_table__setautoIndex = previous_api(_table__setautoindex)\n\n\n# **************** WARNING! ***********************\n# This function can be called during the destruction time of a table\n# so measures have been taken so that it doesn't have to revive\n# another node (which can fool the LRU cache). The solution devised\n# has been to add a cache for autoindex (Table._autoindex), populate\n# it in creation time of the cache (which is a safe period) and then\n# update the cache whenever it changes.\n# This solves the error when running test_indexes.py ManyNodesTestCase.\n# F. Alted 2007-04-20\n# **************************************************\ndef _table__getautoindex(self):\n if self._autoindex is None:\n try:\n indexgroup = self._v_file._get_node(_index_pathname_of(self))\n except NoSuchNodeError:\n self._autoindex = default_auto_index # update cache\n return self._autoindex\n else:\n self._autoindex = indexgroup.auto # update cache\n return self._autoindex\n else:\n # The value is in cache, return it\n return self._autoindex\n\n_table__getautoIndex = previous_api(_table__getautoindex)\n\n_table__autoindex = property(\n _table__getautoindex, _table__setautoindex, None,\n \"\"\"Automatically keep column indexes up to date?\n\n Setting this value states whether existing indexes should be\n automatically updated after an append operation or recomputed\n after an index-invalidating operation (i.e. removal and\n modification of rows). The default is true.\n\n This value gets into effect whenever a column is altered. If you\n don't have automatic indexing activated and you want to do an an\n immediate update use `Table.flush_rows_to_index()`; for an immediate\n reindexing of invalidated indexes, use `Table.reindex_dirty()`.\n\n This value is persistent.\n \"\"\")\n\n_table__autoIndex = previous_api(_table__autoindex)\n\n\ndef restorecache(self):\n # Define a cache for sparse table reads\n params = self._v_file.params\n chunksize = self._v_chunkshape[0]\n nslots = params['TABLE_MAX_SIZE'] / (chunksize * self._v_dtype.itemsize)\n self._chunkcache = NumCache((nslots, chunksize), self._v_dtype,\n 'table chunk cache')\n self._seqcache = ObjectCache(params['ITERSEQ_MAX_SLOTS'],\n params['ITERSEQ_MAX_SIZE'],\n 'Iter sequence cache')\n self._dirtycache = False\n\n\ndef _table__where_indexed(self, compiled, condition, condvars,\n start, stop, step):\n if profile:\n tref = time()\n if profile:\n show_stats(\"Entering table_whereIndexed\", tref)\n self._use_index = True\n # Clean the table caches for indexed queries if needed\n if self._dirtycache:\n restorecache(self)\n\n # Get the values in expression that are not columns\n values = []\n for key, value in condvars.iteritems():\n if isinstance(value, numpy.ndarray):\n values.append((key, value.item()))\n # Build a key for the sequence cache\n seqkey = (condition, tuple(values), (start, stop, step))\n # Do a lookup in sequential cache for this query\n nslot = self._seqcache.getslot(seqkey)\n if nslot >= 0:\n # Get the row sequence from the cache\n seq = self._seqcache.getitem(nslot)\n if len(seq) == 0:\n return iter([])\n # seq is a list.\n seq = numpy.array(seq, dtype='int64')\n # Correct the ranges in cached sequence\n if (start, stop, step) != (0, self.nrows, 1):\n seq = seq[(seq >= start) & (\n seq < stop) & ((seq - start) % step == 0)]\n return self.itersequence(seq)\n else:\n # No luck. self._seqcache will be populated\n # in the iterator if possible. (Row._finish_riterator)\n self._seqcache_key = seqkey\n\n # Compute the chunkmap for every index in indexed expression\n idxexprs = compiled.index_expressions\n strexpr = compiled.string_expression\n cmvars = {}\n tcoords = 0\n for i, idxexpr in enumerate(idxexprs):\n var, ops, lims = idxexpr\n col = condvars[var]\n index = col.index\n assert index is not None, \"the chosen column is not indexed\"\n assert not index.dirty, \"the chosen column has a dirty index\"\n\n # Get the number of rows that the indexed condition yields.\n range_ = index.get_lookup_range(ops, lims)\n ncoords = index.search(range_)\n tcoords += ncoords\n if index.reduction == 1 and ncoords == 0:\n # No values from index condition, thus the chunkmap should be empty\n nrowsinchunk = self.chunkshape[0]\n nchunks = long(math.ceil(float(self.nrows) / nrowsinchunk))\n chunkmap = numpy.zeros(shape=nchunks, dtype=\"bool\")\n else:\n # Get the chunkmap from the index\n chunkmap = index.get_chunkmap()\n # Assign the chunkmap to the cmvars dictionary\n cmvars[\"e%d\" % i] = chunkmap\n\n if index.reduction == 1 and tcoords == 0:\n # No candidates found in any indexed expression component, so leave now\n self._seqcache.setitem(seqkey, [], 1)\n return iter([])\n\n # Compute the final chunkmap\n chunkmap = numexpr.evaluate(strexpr, cmvars)\n if not chunkmap.any():\n # The chunkmap is all False, so the result is empty\n self._seqcache.setitem(seqkey, [], 1)\n return iter([])\n\n if profile:\n show_stats(\"Exiting table_whereIndexed\", tref)\n return chunkmap\n\n_table__whereIndexed = previous_api(_table__where_indexed)\n\n\ndef create_indexes_table(table):\n itgroup = IndexesTableG(\n table._v_parent, _index_name_of(table),\n \"Indexes container for table \" + table._v_pathname, new=True)\n return itgroup\n\ncreateIndexesTable = previous_api(create_indexes_table)\n\n\ndef create_indexes_descr(igroup, dname, iname, filters):\n idgroup = IndexesDescG(\n igroup, iname,\n \"Indexes container for sub-description \" + dname,\n filters=filters, new=True)\n return idgroup\n\ncreateIndexesDescr = previous_api(create_indexes_descr)\n\n\ndef _column__create_index(self, optlevel, kind, filters, tmp_dir,\n blocksizes, verbose):\n name = self.name\n table = self.table\n dtype = self.dtype\n descr = self.descr\n index = self.index\n get_node = table._v_file._get_node\n\n # Warn if the index already exists\n if index:\n raise ValueError(\"%s for column '%s' already exists. If you want to \"\n \"re-create it, please, try with reindex() method \"\n \"better\" % (str(index), str(self.pathname)))\n\n # Check that the datatype is indexable.\n if dtype.str[1:] == 'u8':\n raise NotImplementedError(\n \"indexing 64-bit unsigned integer columns \"\n \"is not supported yet, sorry\")\n if dtype.kind == 'c':\n raise TypeError(\"complex columns can not be indexed\")\n if dtype.shape != ():\n raise TypeError(\"multidimensional columns can not be indexed\")\n\n # Get the indexes group for table, and if not exists, create it\n try:\n itgroup = get_node(_index_pathname_of(table))\n except NoSuchNodeError:\n itgroup = create_indexes_table(table)\n\n # Create the necessary intermediate groups for descriptors\n idgroup = itgroup\n dname = \"\"\n pathname = descr._v_pathname\n if pathname != '':\n inames = pathname.split('/')\n for iname in inames:\n if dname == '':\n dname = iname\n else:\n dname += '/' + iname\n try:\n idgroup = get_node('%s/%s' % (itgroup._v_pathname, dname))\n except NoSuchNodeError:\n idgroup = create_indexes_descr(idgroup, dname, iname, filters)\n\n # Create the atom\n assert dtype.shape == ()\n atom = Atom.from_dtype(numpy.dtype((dtype, (0,))))\n\n # Protection on tables larger than the expected rows (perhaps the\n # user forgot to pass this parameter to the Table constructor?)\n expectedrows = table._v_expectedrows\n if table.nrows > expectedrows:\n expectedrows = table.nrows\n\n # Create the index itself\n index = Index(\n idgroup, name, atom=atom,\n title=\"Index for %s column\" % name,\n kind=kind,\n optlevel=optlevel,\n filters=filters,\n tmp_dir=tmp_dir,\n expectedrows=expectedrows,\n byteorder=table.byteorder,\n blocksizes=blocksizes)\n\n table._set_column_indexing(self.pathname, True)\n\n # Feed the index with values\n\n # Add rows to the index if necessary\n if table.nrows > 0:\n indexedrows = table._add_rows_to_index(\n self.pathname, 0, table.nrows, lastrow=True, update=False)\n else:\n indexedrows = 0\n index.dirty = False\n table._indexedrows = indexedrows\n table._unsaved_indexedrows = table.nrows - indexedrows\n\n # Optimize the index that has been already filled-up\n index.optimize(verbose=verbose)\n\n # We cannot do a flush here because when reindexing during a\n # flush, the indexes are created anew, and that creates a nested\n # call to flush().\n # table.flush()\n\n return indexedrows\n\n_column__createIndex = previous_api(_column__create_index)\n\n\nclass _ColIndexes(dict):\n \"\"\"Provides a nice representation of column indexes.\"\"\"\n\n def __repr__(self):\n \"\"\"Gives a detailed Description column representation.\"\"\"\n\n rep = [' \\\"%s\\\": %s' % (k, self[k]) for k in self.iterkeys()]\n return '{\\n %s}' % (',\\n '.join(rep))\n\n\nclass Table(tableextension.Table, Leaf):\n \"\"\"This class represents heterogeneous datasets in an HDF5 file.\n\n Tables are leaves (see the Leaf class in :ref:`LeafClassDescr`) whose data\n consists of a unidimensional sequence of *rows*, where each row contains\n one or more *fields*. Fields have an associated unique *name* and\n *position*, with the first field having position 0. All rows have the same\n fields, which are arranged in *columns*.\n\n Fields can have any type supported by the Col class (see\n :ref:`ColClassDescr`) and its descendants, which support multidimensional\n data. Moreover, a field can be *nested* (to an arbitrary depth), meaning\n that it includes further fields inside. A field named x inside a nested\n field a in a table can be accessed as the field a/x (its *path name*) from\n the table.\n\n The structure of a table is declared by its description, which is made\n available in the Table.description attribute (see :class:`Table`).\n\n This class provides new methods to read, write and search table data\n efficiently. It also provides special Python methods to allow accessing\n the table as a normal sequence or array (with extended slicing supported).\n\n PyTables supports *in-kernel* searches working simultaneously on several\n columns using complex conditions. These are faster than selections using\n Python expressions. See the :meth:`Table.where` method for more\n information on in-kernel searches.\n\n Non-nested columns can be *indexed*. Searching an indexed column can be\n several times faster than searching a non-nested one. Search methods\n automatically take advantage of indexing where available.\n\n When iterating a table, an object from the Row (see :ref:`RowClassDescr`)\n class is used. This object allows to read and write data one row at a\n time, as well as to perform queries which are not supported by in-kernel\n syntax (at a much lower speed, of course).\n\n Objects of this class support access to individual columns via *natural\n naming* through the :attr:`Table.cols` accessor. Nested columns are\n mapped to Cols instances, and non-nested ones to Column instances.\n See the Column class in :ref:`ColumnClassDescr` for examples of this\n feature.\n\n Parameters\n ----------\n parentnode\n The parent :class:`Group` object.\n\n .. versionchanged:: 3.0\n Renamed from *parentNode* to *parentnode*.\n\n name : str\n The name of this node in its parent group.\n description\n An IsDescription subclass or a dictionary where the keys are the field\n names, and the values the type definitions. In addition, a pure NumPy\n dtype is accepted. If None, the table metadata is read from disk,\n else, it's taken from previous parameters.\n title\n Sets a TITLE attribute on the HDF5 table entity.\n filters : Filters\n An instance of the Filters class that provides information about the\n desired I/O filters to be applied during the life of this object.\n expectedrows\n A user estimate about the number of rows that will be on table. If not\n provided, the default value is ``EXPECTED_ROWS_TABLE`` (see\n ``tables/parameters.py``). If you plan to save bigger tables, try\n providing a guess; this will optimize the HDF5 B-Tree creation and\n management process time and memory used.\n chunkshape\n The shape of the data chunk to be read or written as a single HDF5 I/O\n operation. The filters are applied to those chunks of data. Its rank\n for tables has to be 1. If ``None``, a sensible value is calculated\n based on the `expectedrows` parameter (which is recommended).\n byteorder\n The byteorder of the data *on-disk*, specified as 'little' or 'big'. If\n this is not specified, the byteorder is that of the platform, unless\n you passed a recarray as the `description`, in which case the recarray\n byteorder will be chosen.\n\n Notes\n -----\n The instance variables below are provided in addition to those in\n Leaf (see :ref:`LeafClassDescr`). Please note that there are several\n col* dictionaries to ease retrieving information about a column\n directly by its path name, avoiding the need to walk through\n Table.description or Table.cols.\n\n\n .. rubric:: Table attributes\n\n .. attribute:: coldescrs\n\n Maps the name of a column to its Col description (see\n :ref:`ColClassDescr`).\n\n .. attribute:: coldflts\n\n Maps the name of a column to its default value.\n\n .. attribute:: coldtypes\n\n Maps the name of a column to its NumPy data type.\n\n .. attribute:: colindexed\n\n Is the column which name is used as a key indexed?\n\n .. attribute:: colinstances\n\n Maps the name of a column to its Column (see\n :ref:`ColumnClassDescr`) or Cols (see :ref:`ColsClassDescr`)\n instance.\n\n .. attribute:: colnames\n\n A list containing the names of *top-level* columns in the table.\n\n .. attribute:: colpathnames\n\n A list containing the pathnames of *bottom-level* columns in\n the table.\n\n These are the leaf columns obtained when walking the table\n description left-to-right, bottom-first. Columns inside a\n nested column have slashes (/) separating name components in\n their pathname.\n\n .. attribute:: cols\n\n A Cols instance that provides *natural naming* access to\n non-nested (Column, see :ref:`ColumnClassDescr`) and nested\n (Cols, see :ref:`ColsClassDescr`) columns.\n\n .. attribute:: coltypes\n\n Maps the name of a column to its PyTables data type.\n\n .. attribute:: description\n\n A Description instance (see :ref:`DescriptionClassDescr`)\n reflecting the structure of the table.\n\n .. attribute:: extdim\n\n The index of the enlargeable dimension (always 0 for tables).\n\n .. attribute:: indexed\n\n Does this table have any indexed columns?\n\n .. attribute:: nrows\n\n The current number of rows in the table.\n\n \"\"\"\n\n # Class identifier.\n _c_classid = 'TABLE'\n\n _c_classId = previous_api_property('_c_classid')\n _v_objectId = previous_api_property('_v_objectid')\n\n # Properties\n # ~~~~~~~~~~\n @lazyattr\n def row(self):\n \"\"\"The associated Row instance (see :ref:`RowClassDescr`).\"\"\"\n\n return tableextension.Row(self)\n\n @lazyattr\n def dtype(self):\n \"\"\"The NumPy ``dtype`` that most closely matches this table.\"\"\"\n\n return self.description._v_dtype\n\n # Read-only shorthands\n # ````````````````````\n\n shape = property(\n lambda self: (self.nrows,), None, None,\n \"The shape of this table.\")\n\n rowsize = property(\n lambda self: self.description._v_dtype.itemsize, None, None,\n \"The size in bytes of each row in the table.\")\n\n size_in_memory = property(\n lambda self: self.nrows * self.rowsize, None, None,\n \"\"\"The size of this table's data in bytes when it is fully loaded into\n memory. This may be used in combination with size_on_disk to calculate\n the compression ratio of the data.\"\"\")\n\n # Lazy attributes\n # ```````````````\n @lazyattr\n def _v_iobuf(self):\n \"\"\"A buffer for doing I/O.\"\"\"\n\n return self._get_container(self.nrowsinbuf)\n\n @lazyattr\n def _v_wdflts(self):\n \"\"\"The defaults for writing in recarray format.\"\"\"\n\n # First, do a check to see whether we need to set default values\n # different from 0 or not.\n for coldflt in self.coldflts.itervalues():\n if isinstance(coldflt, numpy.ndarray) or coldflt:\n break\n else:\n # No default different from 0 found. Returning None.\n return None\n wdflts = self._get_container(1)\n for colname, coldflt in self.coldflts.iteritems():\n ra = get_nested_field(wdflts, colname)\n ra[:] = coldflt\n return wdflts\n\n @lazyattr\n def _colunaligned(self):\n \"\"\"The pathnames of unaligned, *unidimensional* columns.\"\"\"\n colunaligned, rarr = [], self._get_container(0)\n for colpathname in self.colpathnames:\n carr = get_nested_field(rarr, colpathname)\n if not carr.flags.aligned and carr.ndim == 1:\n colunaligned.append(colpathname)\n return frozenset(colunaligned)\n\n # Index-related properties\n # ````````````````````````\n autoindex = _table__autoindex\n \"\"\"Automatically keep column indexes up to date?\n\n Setting this value states whether existing indexes should be automatically\n updated after an append operation or recomputed after an index-invalidating\n operation (i.e. removal and modification of rows). The default is true.\n\n This value gets into effect whenever a column is altered. If you don't have\n automatic indexing activated and you want to do an immediate update use\n :meth:`Table.flush_rows_to_index`; for immediate reindexing of invalidated\n indexes, use :meth:`Table.reindex_dirty`.\n\n This value is persistent.\n\n .. versionchanged:: 3.0\n The *autoIndex* property has been renamed into *autoindex*.\n\n \"\"\"\n\n autoIndex = previous_api_property('autoindex')\n\n indexedcolpathnames = property(\n lambda self: [_colpname for _colpname in self.colpathnames\n if self.colindexed[_colpname]],\n None, None,\n \"\"\"List of pathnames of indexed columns in the table.\"\"\")\n\n colindexes = property(\n lambda self: _ColIndexes(\n ((_colpname, self.cols._f_col(_colpname).index)\n for _colpname in self.colpathnames\n if self.colindexed[_colpname])),\n None, None,\n \"\"\"A dictionary with the indexes of the indexed columns.\"\"\")\n\n _dirtyindexes = property(\n lambda self: self._condition_cache._nailcount > 0,\n None, None,\n \"\"\"Whether some index in table is dirty.\"\"\")\n\n # Other methods\n # ~~~~~~~~~~~~~\n def __init__(self, parentnode, name,\n description=None, title=\"\", filters=None,\n expectedrows=None, chunkshape=None,\n byteorder=None, _log=True):\n\n self._v_new = new = description is not None\n \"\"\"Is this the first time the node has been created?\"\"\"\n self._v_new_title = title\n \"\"\"New title for this node.\"\"\"\n self._v_new_filters = filters\n \"\"\"New filter properties for this node.\"\"\"\n self.extdim = 0 # Tables only have one dimension currently\n \"\"\"The index of the enlargeable dimension (always 0 for tables).\"\"\"\n self._v_recarray = None\n \"\"\"A structured array to be stored in the table.\"\"\"\n self._rabyteorder = None\n \"\"\"The computed byteorder of the self._v_recarray.\"\"\"\n if expectedrows is None:\n expectedrows = parentnode._v_file.params['EXPECTED_ROWS_TABLE']\n self._v_expectedrows = expectedrows\n \"\"\"The expected number of rows to be stored in the table.\"\"\"\n self.nrows = SizeType(0)\n \"\"\"The current number of rows in the table.\"\"\"\n self.description = None\n \"\"\"A Description instance (see :ref:`DescriptionClassDescr`)\n reflecting the structure of the table.\"\"\"\n self._time64colnames = []\n \"\"\"The names of ``Time64`` columns.\"\"\"\n self._strcolnames = []\n \"\"\"The names of ``String`` columns.\"\"\"\n self._colenums = {}\n \"\"\"Maps the name of an enumerated column to its ``Enum`` instance.\"\"\"\n self._v_chunkshape = None\n \"\"\"Private storage for the `chunkshape` property of the leaf.\"\"\"\n\n self.indexed = False\n \"\"\"Does this table have any indexed columns?\"\"\"\n self._indexedrows = 0\n \"\"\"Number of rows indexed in disk.\"\"\"\n self._unsaved_indexedrows = 0\n \"\"\"Number of rows indexed in memory but still not in disk.\"\"\"\n self._listoldindexes = []\n \"\"\"The list of columns with old indexes.\"\"\"\n self._autoindex = None\n \"\"\"Private variable that caches the value for autoindex.\"\"\"\n\n self.colnames = []\n \"\"\"A list containing the names of *top-level* columns in the table.\"\"\"\n self.colpathnames = []\n \"\"\"A list containing the pathnames of *bottom-level* columns in the\n table.\n\n These are the leaf columns obtained when walking the\n table description left-to-right, bottom-first. Columns inside a\n nested column have slashes (/) separating name components in\n their pathname.\n \"\"\"\n self.colinstances = {}\n \"\"\"Maps the name of a column to its Column (see\n :ref:`ColumnClassDescr`) or Cols (see :ref:`ColsClassDescr`)\n instance.\"\"\"\n self.coldescrs = {}\n \"\"\"Maps the name of a column to its Col description (see\n :ref:`ColClassDescr`).\"\"\"\n self.coltypes = {}\n \"\"\"Maps the name of a column to its PyTables data type.\"\"\"\n self.coldtypes = {}\n \"\"\"Maps the name of a column to its NumPy data type.\"\"\"\n self.coldflts = {}\n \"\"\"Maps the name of a column to its default value.\"\"\"\n self.colindexed = {}\n \"\"\"Is the column which name is used as a key indexed?\"\"\"\n\n self._use_index = False\n \"\"\"Whether an index can be used or not in a search. Boolean.\"\"\"\n self._where_condition = None\n \"\"\"Condition function and argument list for selection of values.\"\"\"\n self._seqcache_key = None\n \"\"\"The key under which to save a query's results (list of row indexes)\n or None to not save.\"\"\"\n max_slots = parentnode._v_file.params['COND_CACHE_SLOTS']\n self._condition_cache = CacheDict(max_slots)\n \"\"\"Cache of already compiled conditions.\"\"\"\n self._exprvars_cache = {}\n \"\"\"Cache of variables participating in numexpr expressions.\"\"\"\n self._enabled_indexing_in_queries = True\n \"\"\"Is indexing enabled in queries? *Use only for testing.*\"\"\"\n self._empty_array_cache = {}\n \"\"\"Cache of empty arrays.\"\"\"\n\n self._v_dtype = None\n \"\"\"The NumPy datatype fopr this table.\"\"\"\n self.cols = None\n \"\"\"\n A Cols instance that provides *natural naming* access to non-nested\n (Column, see :ref:`ColumnClassDescr`) and nested (Cols, see\n :ref:`ColsClassDescr`) columns.\n \"\"\"\n self._dirtycache = True\n \"\"\"Whether the data caches are dirty or not. Initially set to yes.\"\"\"\n self._descflavor = None\n \"\"\"Temporarily keeps the flavor of a description with data.\"\"\"\n\n # Initialize this object in case is a new Table\n\n # Try purely descriptive description objects.\n if new and isinstance(description, dict):\n # Dictionary case\n self.description = Description(description)\n elif new and (type(description) == type(IsDescription)\n and issubclass(description, IsDescription)):\n # IsDescription subclass case\n descr = description()\n self.description = Description(descr.columns)\n elif new and isinstance(description, Description):\n # It is a Description instance already\n self.description = description\n\n # No description yet?\n if new and self.description is None:\n # Try NumPy dtype instances\n if isinstance(description, numpy.dtype):\n self.description, self._rabyteorder = \\\n descr_from_dtype(description)\n\n # No description yet?\n if new and self.description is None:\n # Try structured array description objects.\n try:\n self._descflavor = flavor = flavor_of(description)\n except TypeError: # probably not an array\n pass\n else:\n if flavor == 'python':\n nparray = numpy.rec.array(description)\n else:\n nparray = array_as_internal(description, flavor)\n self.nrows = nrows = SizeType(nparray.size)\n # If `self._v_recarray` is set, it will be used as the\n # initial buffer.\n if nrows > 0:\n self._v_recarray = nparray\n self.description, self._rabyteorder = \\\n descr_from_dtype(nparray.dtype)\n\n # No description yet?\n if new and self.description is None:\n raise TypeError(\n \"the ``description`` argument is not of a supported type: \"\n \"``IsDescription`` subclass, ``Description`` instance, \"\n \"dictionary, or structured array\")\n\n # Check the chunkshape parameter\n if new and chunkshape is not None:\n if isinstance(chunkshape, (int, numpy.integer, long)):\n chunkshape = (chunkshape,)\n try:\n chunkshape = tuple(chunkshape)\n except TypeError:\n raise TypeError(\n \"`chunkshape` parameter must be an integer or sequence \"\n \"and you passed a %s\" % type(chunkshape))\n if len(chunkshape) != 1:\n raise ValueError(\"`chunkshape` rank (length) must be 1: %r\"\n % (chunkshape,))\n self._v_chunkshape = tuple(SizeType(s) for s in chunkshape)\n\n super(Table, self).__init__(parentnode, name, new, filters,\n byteorder, _log)\n\n def _g_post_init_hook(self):\n # We are putting here the index-related issues\n # as well as filling general info for table\n # This is needed because we need first the index objects created\n\n # First, get back the flavor of input data (if any) for\n # `Leaf._g_post_init_hook()`.\n self._flavor, self._descflavor = self._descflavor, None\n super(Table, self)._g_post_init_hook()\n\n # Create a cols accessor.\n self.cols = Cols(self, self.description)\n\n # Place the `Cols` and `Column` objects into `self.colinstances`.\n colinstances, cols = self.colinstances, self.cols\n for colpathname in self.description._v_pathnames:\n colinstances[colpathname] = cols._g_col(colpathname)\n\n if self._v_new:\n # Columns are never indexed on creation.\n self.colindexed = dict((cpn, False) for cpn in self.colpathnames)\n return\n\n # The following code is only for opened tables.\n\n # Do the indexes group exist?\n indexesgrouppath = _index_pathname_of(self)\n igroup = indexesgrouppath in self._v_file\n oldindexes = False\n for colobj in self.description._f_walk(type=\"Col\"):\n colname = colobj._v_pathname\n # Is this column indexed?\n if igroup:\n indexname = _index_pathname_of_column(self, colname)\n indexed = indexname in self._v_file\n self.colindexed[colname] = indexed\n if indexed:\n column = self.cols._g_col(colname)\n indexobj = column.index\n if isinstance(indexobj, OldIndex):\n indexed = False # Not a vaild index\n oldindexes = True\n self._listoldindexes.append(colname)\n else:\n # Tell the condition cache about columns with dirty\n # indexes.\n if indexobj.dirty:\n self._condition_cache.nail()\n else:\n indexed = False\n self.colindexed[colname] = False\n if indexed:\n self.indexed = True\n\n if oldindexes: # this should only appear under 2.x Pro\n warnings.warn(\n \"table ``%s`` has column indexes with PyTables 1.x format. \"\n \"Unfortunately, this format is not supported in \"\n \"PyTables 2.x series. Note that you can use the \"\n \"``ptrepack`` utility in order to recreate the indexes. \"\n \"The 1.x indexed columns found are: %s\" %\n (self._v_pathname, self._listoldindexes),\n OldIndexWarning)\n\n # It does not matter to which column 'indexobj' belongs,\n # since their respective index objects share\n # the same number of elements.\n if self.indexed:\n self._indexedrows = indexobj.nelements\n self._unsaved_indexedrows = self.nrows - self._indexedrows\n # Put the autoindex value in a cache variable\n self._autoindex = self.autoindex\n\n _g_postInitHook = previous_api(_g_post_init_hook)\n\n def _calc_nrowsinbuf(self):\n \"\"\"Calculate the number of rows that fits on a PyTables buffer.\"\"\"\n\n params = self._v_file.params\n # Compute the nrowsinbuf\n rowsize = self.rowsize\n buffersize = params['IO_BUFFER_SIZE']\n if rowsize != 0:\n nrowsinbuf = buffersize // rowsize\n # The number of rows in buffer needs to be an exact multiple of\n # chunkshape[0] for queries using indexed columns.\n # Fixes #319 and probably #409 too.\n nrowsinbuf -= nrowsinbuf % self.chunkshape[0]\n else:\n nrowsinbuf = 1\n\n # tableextension.pyx performs an assertion\n # to make sure nrowsinbuf is greater than or\n # equal to the chunksize.\n # See gh-206 and gh-238\n if self.chunkshape is not None:\n if nrowsinbuf < self.chunkshape[0]:\n nrowsinbuf = self.chunkshape[0]\n\n # Safeguard against row sizes being extremely large\n if nrowsinbuf == 0:\n nrowsinbuf = 1\n # If rowsize is too large, issue a Performance warning\n maxrowsize = params['BUFFER_TIMES'] * buffersize\n if rowsize > maxrowsize:\n warnings.warn(\"\"\"\\\nThe Table ``%s`` is exceeding the maximum recommended rowsize (%d bytes);\nbe ready to see PyTables asking for *lots* of memory and possibly slow\nI/O. You may want to reduce the rowsize by trimming the value of\ndimensions that are orthogonal (and preferably close) to the *main*\ndimension of this leave. Alternatively, in case you have specified a\nvery small/large chunksize, you may want to increase/decrease it.\"\"\"\n % (self._v_pathname, maxrowsize),\n PerformanceWarning)\n return nrowsinbuf\n\n def _getemptyarray(self, dtype):\n # Acts as a cache for empty arrays\n key = dtype\n if key in self._empty_array_cache:\n return self._empty_array_cache[key]\n else:\n self._empty_array_cache[\n key] = arr = numpy.empty(shape=0, dtype=key)\n return arr\n\n def _get_container(self, shape):\n \"Get the appropriate buffer for data depending on table nestedness.\"\n\n # This is *much* faster than the numpy.rec.array counterpart\n return numpy.empty(shape=shape, dtype=self._v_dtype)\n\n def _get_type_col_names(self, type_):\n \"\"\"Returns a list containing 'type_' column names.\"\"\"\n\n return [colobj._v_pathname\n for colobj in self.description._f_walk('Col')\n if colobj.type == type_]\n\n _getTypeColNames = previous_api(_get_type_col_names)\n\n def _get_enum_map(self):\n \"\"\"Return mapping from enumerated column names to `Enum` instances.\"\"\"\n\n enumMap = {}\n for colobj in self.description._f_walk('Col'):\n if colobj.kind == 'enum':\n enumMap[colobj._v_pathname] = colobj.enum\n return enumMap\n\n _getEnumMap = previous_api(_get_enum_map)\n\n def _g_create(self):\n \"\"\"Create a new table on disk.\"\"\"\n\n # Warning against assigning too much columns...\n # F. Alted 2005-06-05\n maxColumns = self._v_file.params['MAX_COLUMNS']\n if (len(self.description._v_names) > maxColumns):\n warnings.warn(\n \"table ``%s`` is exceeding the recommended \"\n \"maximum number of columns (%d); \"\n \"be ready to see PyTables asking for *lots* of memory \"\n \"and possibly slow I/O\" % (self._v_pathname, maxColumns),\n PerformanceWarning)\n\n # 1. Create the HDF5 table (some parameters need to be computed).\n\n # Fix the byteorder of the recarray and update the number of\n # expected rows if necessary\n if self._v_recarray is not None:\n self._v_recarray = self._g_fix_byteorder_data(self._v_recarray,\n self._rabyteorder)\n if len(self._v_recarray) > self._v_expectedrows:\n self._v_expectedrows = len(self._v_recarray)\n # Compute a sensible chunkshape\n if self._v_chunkshape is None:\n self._v_chunkshape = self._calc_chunkshape(\n self._v_expectedrows, self.rowsize, self.rowsize)\n # Correct the byteorder, if still needed\n if self.byteorder is None:\n self.byteorder = sys.byteorder\n\n # Cache some data which is already in the description.\n # This is necessary to happen before creation time in order\n # to be able to populate the self._v_wdflts\n self._cache_description_data()\n\n # After creating the table, ``self._v_objectid`` needs to be\n # set because it is needed for setting attributes afterwards.\n self._v_objectid = self._create_table(\n self._v_new_title, self.filters.complib or '', obversion)\n self._v_recarray = None # not useful anymore\n self._rabyteorder = None # not useful anymore\n\n # 2. Compute or get chunk shape and buffer size parameters.\n self.nrowsinbuf = self._calc_nrowsinbuf()\n\n # 3. Get field fill attributes from the table description and\n # set them on disk.\n if self._v_file.params['PYTABLES_SYS_ATTRS']:\n set_attr = self._v_attrs._g__setattr\n for i, colobj in enumerate(self.description._f_walk(type=\"Col\")):\n fieldname = \"FIELD_%d_FILL\" % i\n set_attr(fieldname, colobj.dflt)\n\n return self._v_objectid\n\n def _g_open(self):\n \"\"\"Opens a table from disk and read the metadata on it.\n\n Creates an user description on the flight to easy the access to\n the actual data.\n\n \"\"\"\n\n # 1. Open the HDF5 table and get some data from it.\n self._v_objectid, description, chunksize = self._get_info()\n self._v_expectedrows = self.nrows # the actual number of rows\n\n # 2. Create an instance description to host the record fields.\n validate = not self._v_file._isPTFile # only for non-PyTables files\n self.description = Description(description, validate=validate)\n\n # 3. Compute or get chunk shape and buffer size parameters.\n if chunksize == 0:\n self._v_chunkshape = self._calc_chunkshape(\n self._v_expectedrows, self.rowsize, self.rowsize)\n else:\n self._v_chunkshape = (chunksize,)\n self.nrowsinbuf = self._calc_nrowsinbuf()\n\n # 4. If there are field fill attributes, get them from disk and\n # set them in the table description.\n if self._v_file.params['PYTABLES_SYS_ATTRS']:\n if \"FIELD_0_FILL\" in self._v_attrs._f_list(\"sys\"):\n i = 0\n get_attr = self._v_attrs.__getattr__\n for objcol in self.description._f_walk(type=\"Col\"):\n colname = objcol._v_pathname\n # Get the default values for each column\n fieldname = \"FIELD_%s_FILL\" % i\n defval = get_attr(fieldname)\n if defval is not None:\n objcol.dflt = defval\n else:\n warnings.warn(\"could not load default value \"\n \"for the ``%s`` column of table ``%s``; \"\n \"using ``%r`` instead\"\n % (colname, self._v_pathname,\n objcol.dflt))\n defval = objcol.dflt\n i += 1\n\n # Set also the correct value in the desc._v_dflts dictionary\n for descr in self.description._f_walk(type=\"Description\"):\n names = descr._v_names\n for i in range(len(names)):\n objcol = descr._v_colobjects[names[i]]\n if isinstance(objcol, Col):\n descr._v_dflts[objcol._v_name] = objcol.dflt\n\n # 5. Cache some data which is already in the description.\n self._cache_description_data()\n\n return self._v_objectid\n\n def _cache_description_data(self):\n \"\"\"Cache some data which is already in the description.\n\n Some information is extracted from `self.description` to build\n some useful (but redundant) structures:\n\n * `self.colnames`\n * `self.colpathnames`\n * `self.coldescrs`\n * `self.coltypes`\n * `self.coldtypes`\n * `self.coldflts`\n * `self._v_dtype`\n * `self._time64colnames`\n * `self._strcolnames`\n * `self._colenums`\n\n \"\"\"\n\n self.colnames = list(self.description._v_names)\n self.colpathnames = [\n col._v_pathname for col in self.description._f_walk()\n if not hasattr(col, '_v_names')] # bottom-level\n\n # Find ``time64`` column names.\n self._time64colnames = self._get_type_col_names('time64')\n # Find ``string`` column names.\n self._strcolnames = self._get_type_col_names('string')\n # Get a mapping of enumerated columns to their `Enum` instances.\n self._colenums = self._get_enum_map()\n\n # Get info about columns\n for colobj in self.description._f_walk(type=\"Col\"):\n colname = colobj._v_pathname\n # Get the column types, types and defaults\n self.coldescrs[colname] = colobj\n self.coltypes[colname] = colobj.type\n self.coldtypes[colname] = colobj.dtype\n self.coldflts[colname] = colobj.dflt\n\n # Assign _v_dtype for this table\n self._v_dtype = self.description._v_dtype\n\n _cacheDescriptionData = previous_api(_cache_description_data)\n\n def _get_column_instance(self, colpathname):\n \"\"\"Get the instance of the column with the given `colpathname`.\n\n If the column does not exist in the table, a `KeyError` is\n raised.\n\n \"\"\"\n\n try:\n return _reduce(getattr, colpathname.split('/'), self.description)\n except AttributeError:\n raise KeyError(\"table ``%s`` does not have a column named ``%s``\"\n % (self._v_pathname, colpathname))\n\n _getColumnInstance = previous_api(_get_column_instance)\n\n _check_column = _get_column_instance\n\n def _disable_indexing_in_queries(self):\n \"\"\"Force queries not to use indexing.\n\n *Use only for testing.*\n\n \"\"\"\n\n if not self._enabled_indexing_in_queries:\n return # already disabled\n # The nail avoids setting/getting compiled conditions in/from\n # the cache where indexing is used.\n self._condition_cache.nail()\n self._enabled_indexing_in_queries = False\n\n _disableIndexingInQueries = previous_api(_disable_indexing_in_queries)\n\n def _enable_indexing_in_queries(self):\n \"\"\"Allow queries to use indexing.\n\n *Use only for testing.*\n\n \"\"\"\n\n if self._enabled_indexing_in_queries:\n return # already enabled\n self._condition_cache.unnail()\n self._enabled_indexing_in_queries = True\n\n _enableIndexingInQueries = previous_api(_enable_indexing_in_queries)\n\n def _required_expr_vars(self, expression, uservars, depth=1):\n \"\"\"Get the variables required by the `expression`.\n\n A new dictionary defining the variables used in the `expression`\n is returned. Required variables are first looked up in the\n `uservars` mapping, then in the set of top-level columns of the\n table. Unknown variables cause a `NameError` to be raised.\n\n When `uservars` is `None`, the local and global namespace where\n the API callable which uses this method is called is sought\n instead. This mechanism will not work as expected if this\n method is not used *directly* from an API callable. To disable\n this mechanism, just specify a mapping as `uservars`.\n\n Nested columns and columns from other tables are not allowed\n (`TypeError` and `ValueError` are raised, respectively). Also,\n non-column variable values are converted to NumPy arrays.\n\n `depth` specifies the depth of the frame in order to reach local\n or global variables.\n\n \"\"\"\n\n # Get the names of variables used in the expression.\n exprvarscache = self._exprvars_cache\n if not expression in exprvarscache:\n # Protection against growing the cache too much\n if len(exprvarscache) > 256:\n # Remove 10 (arbitrary) elements from the cache\n for k in exprvarscache.keys()[:10]:\n del exprvarscache[k]\n cexpr = compile(expression, '<string>', 'eval')\n exprvars = [var for var in cexpr.co_names\n if var not in ['None', 'False', 'True']\n and var not in numexpr_functions]\n exprvarscache[expression] = exprvars\n else:\n exprvars = exprvarscache[expression]\n\n # Get the local and global variable mappings of the user frame\n # if no mapping has been explicitly given for user variables.\n user_locals, user_globals = {}, {}\n if uservars is None:\n # We use specified depth to get the frame where the API\n # callable using this method is called. For instance:\n #\n # * ``table._required_expr_vars()`` (depth 0) is called by\n # * ``table._where()`` (depth 1) is called by\n # * ``table.where()`` (depth 2) is called by\n # * user-space functions (depth 3)\n user_frame = sys._getframe(depth)\n user_locals = user_frame.f_locals\n user_globals = user_frame.f_globals\n\n colinstances = self.colinstances\n tblfile, tblpath = self._v_file, self._v_pathname\n # Look for the required variables first among the ones\n # explicitly provided by the user, then among implicit columns,\n # then among external variables (only if no explicit variables).\n reqvars = {}\n for var in exprvars:\n # Get the value.\n if uservars is not None and var in uservars:\n val = uservars[var]\n elif var in colinstances:\n val = colinstances[var]\n elif uservars is None and var in user_locals:\n val = user_locals[var]\n elif uservars is None and var in user_globals:\n val = user_globals[var]\n else:\n raise NameError(\"name ``%s`` is not defined\" % var)\n\n # Check the value.\n if hasattr(val, 'pathname'): # non-nested column\n if val.shape[1:] != ():\n raise NotImplementedError(\n \"variable ``%s`` refers to \"\n \"a multidimensional column, \"\n \"not yet supported in conditions, sorry\" % var)\n if (val._table_file is not tblfile or\n val._table_path != tblpath):\n raise ValueError(\"variable ``%s`` refers to a column \"\n \"which is not part of table ``%s``\"\n % (var, tblpath))\n if val.dtype.str[1:] == 'u8':\n raise NotImplementedError(\n \"variable ``%s`` refers to \"\n \"a 64-bit unsigned integer column, \"\n \"not yet supported in conditions, sorry; \"\n \"please use regular Python selections\" % var)\n elif hasattr(val, '_v_colpathnames'): # nested column\n raise TypeError(\n \"variable ``%s`` refers to a nested column, \"\n \"not allowed in conditions\" % var)\n else: # only non-column values are converted to arrays\n # XXX: not 100% sure about this\n if isinstance(val, unicode):\n val = numpy.asarray(val.encode('ascii'))\n else:\n val = numpy.asarray(val)\n reqvars[var] = val\n return reqvars\n\n _requiredExprVars = previous_api(_required_expr_vars)\n\n def _get_condition_key(self, condition, condvars):\n \"\"\"Get the condition cache key for `condition` with `condvars`.\n\n Currently, the key is a tuple of `condition`, column variables\n names, normal variables names, column paths and variable paths\n (all are tuples).\n\n \"\"\"\n\n # Variable names for column and normal variables.\n colnames, varnames = [], []\n # Column paths and types for each of the previous variable.\n colpaths, vartypes = [], []\n for (var, val) in condvars.iteritems():\n if hasattr(val, 'pathname'): # column\n colnames.append(var)\n colpaths.append(val.pathname)\n else: # array\n try:\n varnames.append(var)\n vartypes.append(numexpr_getType(val)) # expensive\n except ValueError:\n # This is more clear than the error given by Numexpr.\n raise TypeError(\"variable ``%s`` has data type ``%s``, \"\n \"not allowed in conditions\"\n % (var, val.dtype.name))\n colnames, varnames = tuple(colnames), tuple(varnames)\n colpaths, vartypes = tuple(colpaths), tuple(vartypes)\n condkey = (condition, colnames, varnames, colpaths, vartypes)\n return condkey\n\n _getConditionKey = previous_api(_get_condition_key)\n\n def _compile_condition(self, condition, condvars):\n \"\"\"Compile the `condition` and extract usable index conditions.\n\n This method returns an instance of ``CompiledCondition``. See\n the ``compile_condition()`` function in the ``conditions``\n module for more information about the compilation process.\n\n This method makes use of the condition cache when possible.\n\n \"\"\"\n\n # Look up the condition in the condition cache.\n condcache = self._condition_cache\n condkey = self._get_condition_key(condition, condvars)\n compiled = condcache.get(condkey)\n if compiled:\n return compiled.with_replaced_vars(condvars) # bingo!\n\n # Bad luck, the condition must be parsed and compiled.\n # Fortunately, the key provides some valuable information. ;)\n (condition, colnames, varnames, colpaths, vartypes) = condkey\n\n # Extract more information from referenced columns.\n typemap = dict(zip(varnames, vartypes)) # start with normal variables\n indexedcols = []\n for colname in colnames:\n col = condvars[colname]\n\n # Extract types from *all* the given variables.\n coltype = col.dtype.type\n typemap[colname] = _nxtype_from_nptype[coltype]\n\n # Get the set of columns with usable indexes.\n if (self._enabled_indexing_in_queries # no in-kernel searches\n and self.colindexed[col.pathname] and not col.index.dirty):\n indexedcols.append(colname)\n\n indexedcols = frozenset(indexedcols)\n # Now let ``compile_condition()`` do the Numexpr-related job.\n compiled = compile_condition(condition, typemap, indexedcols)\n\n # Check that there actually are columns in the condition.\n if not set(compiled.parameters).intersection(set(colnames)):\n raise ValueError(\"there are no columns taking part \"\n \"in condition ``%s``\" % (condition,))\n\n # Store the compiled condition in the cache and return it.\n condcache[condkey] = compiled\n return compiled.with_replaced_vars(condvars)\n\n _compileCondition = previous_api(_compile_condition)\n\n def will_query_use_indexing(self, condition, condvars=None):\n \"\"\"Will a query for the condition use indexing?\n\n The meaning of the condition and *condvars* arguments is the same as in\n the :meth:`Table.where` method. If condition can use indexing, this\n method returns a frozenset with the path names of the columns whose\n index is usable. Otherwise, it returns an empty list.\n\n This method is mainly intended for testing. Keep in mind that changing\n the set of indexed columns or their dirtiness may make this method\n return different values for the same arguments at different times.\n\n \"\"\"\n\n # Compile the condition and extract usable index conditions.\n condvars = self._required_expr_vars(condition, condvars, depth=2)\n compiled = self._compile_condition(condition, condvars)\n # Return the columns in indexed expressions\n idxcols = [condvars[var].pathname for var in compiled.index_variables]\n return frozenset(idxcols)\n\n willQueryUseIndexing = previous_api(will_query_use_indexing)\n\n def where(self, condition, condvars=None,\n start=None, stop=None, step=None):\n \"\"\"Iterate over values fulfilling a condition.\n\n This method returns a Row iterator (see :ref:`RowClassDescr`) which\n only selects rows in the table that satisfy the given condition (an\n expression-like string).\n\n The condvars mapping may be used to define the variable names appearing\n in the condition. condvars should consist of identifier-like strings\n pointing to Column (see :ref:`ColumnClassDescr`) instances *of this\n table*, or to other values (which will be converted to arrays). A\n default set of condition variables is provided where each top-level,\n non-nested column with an identifier-like name appears. Variables in\n condvars override the default ones.\n\n When condvars is not provided or None, the current local and global\n namespace is sought instead of condvars. The previous mechanism is\n mostly intended for interactive usage. To disable it, just specify a\n (maybe empty) mapping as condvars.\n\n If a range is supplied (by setting some of the start, stop or step\n parameters), only the rows in that range and fulfilling the condition\n are used. The meaning of the start, stop and step parameters is the\n same as for Python slices.\n\n When possible, indexed columns participating in the condition will be\n used to speed up the search. It is recommended that you place the\n indexed columns as left and out in the condition as possible. Anyway,\n this method has always better performance than regular Python\n selections on the table.\n\n You can mix this method with regular Python selections in order to\n support even more complex queries. It is strongly recommended that you\n pass the most restrictive condition as the parameter to this method if\n you want to achieve maximum performance.\n\n .. warning::\n\n When in the middle of a table row iterator, you should not\n use methods that can change the number of rows in the table\n (like :meth:`Table.append` or :meth:`Table.remove_rows`) or\n unexpected errors will happen.\n\n Examples\n --------\n\n ::\n\n >>> passvalues = [ row['col3'] for row in\n ... table.where('(col1 > 0) & (col2 <= 20)', step=5)\n ... if your_function(row['col2']) ]\n >>> print(\"Values that pass the cuts:\", passvalues)\n\n .. note::\n\n A special care should be taken when the query condition includes\n string literals. Indeed Python 2 string literals are string of\n bytes while Python 3 strings are unicode objects.\n\n Let's assume that the table ``table`` has the following\n structure::\n\n class Record(IsDescription):\n col1 = StringCol(4) # 4-character String of bytes\n col2 = IntCol()\n col3 = FloatCol()\n\n The type of \"col1\" do not change depending on the Python version\n used (of course) and it always corresponds to strings of bytes.\n\n Any condition involving \"col1\" should be written using the\n appropriate type for string literals in order to avoid\n :exc:`TypeError`\\ s.\n\n The code below will work fine in Python 2 but will fail with a\n :exc:`TypeError` in Python 3::\n\n condition = 'col1 == \"AAAA\"'\n for record in table.where(condition): # TypeError in Python3\n # do something with \"record\"\n\n The reason is that in Python 3 \"condition\" implies a comparison\n between a string of bytes (\"col1\" contents) and an unicode literal\n (\"AAAA\").\n\n The correct way to write the condition is::\n\n condition = 'col1 == b\"AAAA\"'\n\n .. versionchanged:: 3.0\n The start, stop and step parameters now behave like in slice.\n\n \"\"\"\n\n return self._where(condition, condvars, start, stop, step)\n\n def _where(self, condition, condvars, start=None, stop=None, step=None):\n \"\"\"Low-level counterpart of `self.where()`.\"\"\"\n\n if profile:\n tref = time()\n if profile:\n show_stats(\"Entering table._where\", tref)\n # Adjust the slice to be used.\n (start, stop, step) = self._process_range_read(start, stop, step)\n if start >= stop: # empty range, reset conditions\n self._use_index = False\n self._where_condition = None\n return iter([])\n\n # Compile the condition and extract usable index conditions.\n condvars = self._required_expr_vars(condition, condvars, depth=3)\n compiled = self._compile_condition(condition, condvars)\n\n # Can we use indexes?\n if compiled.index_expressions:\n chunkmap = _table__where_indexed(\n self, compiled, condition, condvars, start, stop, step)\n if not isinstance(chunkmap, numpy.ndarray):\n # If it is not a NumPy array it should be an iterator\n # Reset conditions\n self._use_index = False\n self._where_condition = None\n # ...and return the iterator\n return chunkmap\n else:\n chunkmap = None # default to an in-kernel query\n\n args = [condvars[param] for param in compiled.parameters]\n self._where_condition = (compiled.function, args)\n row = tableextension.Row(self)\n if profile:\n show_stats(\"Exiting table._where\", tref)\n return row._iter(start, stop, step, chunkmap=chunkmap)\n\n def read_where(self, condition, condvars=None, field=None,\n start=None, stop=None, step=None):\n \"\"\"Read table data fulfilling the given *condition*.\n\n This method is similar to :meth:`Table.read`, having their common\n arguments and return values the same meanings. However, only the rows\n fulfilling the *condition* are included in the result.\n\n The meaning of the other arguments is the same as in the\n :meth:`Table.where` method.\n\n \"\"\"\n\n self._g_check_open()\n coords = [p.nrow for p in\n self._where(condition, condvars, start, stop, step)]\n self._where_condition = None # reset the conditions\n if len(coords) > 1:\n cstart, cstop = coords[0], coords[-1] + 1\n if cstop - cstart == len(coords):\n # Chances for monotonically increasing row values. Refine.\n inc_seq = numpy.alltrue(\n numpy.arange(cstart, cstop) == numpy.array(coords))\n if inc_seq:\n return self.read(cstart, cstop, field=field)\n return self.read_coordinates(coords, field)\n\n readWhere = previous_api(read_where)\n\n def append_where(self, dstTable, condition, condvars=None,\n start=None, stop=None, step=None):\n \"\"\"Append rows fulfilling the condition to the dstTable table.\n\n dstTable must be capable of taking the rows resulting from the query,\n i.e. it must have columns with the expected names and compatible\n types. The meaning of the other arguments is the same as in the\n :meth:`Table.where` method.\n\n The number of rows appended to dstTable is returned as a result.\n\n .. versionchanged:: 3.0\n The *whereAppend* method has been renamed into *append_where*.\n\n \"\"\"\n\n self._g_check_open()\n\n # Check that the destination file is not in read-only mode.\n dstTable._v_file._check_writable()\n\n # Row objects do not support nested columns, so we must iterate\n # over the flat column paths. When rows support nesting,\n # ``self.colnames`` can be directly iterated upon.\n colNames = [colName for colName in self.colpathnames]\n dstRow = dstTable.row\n nrows = 0\n for srcRow in self._where(condition, condvars, start, stop, step):\n for colName in colNames:\n dstRow[colName] = srcRow[colName]\n dstRow.append()\n nrows += 1\n dstTable.flush()\n return nrows\n\n whereAppend = previous_api(append_where)\n\n def get_where_list(self, condition, condvars=None, sort=False,\n start=None, stop=None, step=None):\n \"\"\"Get the row coordinates fulfilling the given condition.\n\n The coordinates are returned as a list of the current flavor. sort\n means that you want to retrieve the coordinates ordered. The default is\n to not sort them.\n\n The meaning of the other arguments is the same as in the\n :meth:`Table.where` method.\n\n \"\"\"\n\n self._g_check_open()\n\n coords = [p.nrow for p in\n self._where(condition, condvars, start, stop, step)]\n coords = numpy.array(coords, dtype=SizeType)\n # Reset the conditions\n self._where_condition = None\n if sort:\n coords = numpy.sort(coords)\n return internal_to_flavor(coords, self.flavor)\n\n getWhereList = previous_api(get_where_list)\n\n def itersequence(self, sequence):\n \"\"\"Iterate over a sequence of row coordinates.\n\n Notes\n -----\n This iterator can be nested (see :meth:`Table.where` for an example).\n\n \"\"\"\n\n if not hasattr(sequence, '__getitem__'):\n raise TypeError((\"Wrong 'sequence' parameter type. Only sequences \"\n \"are suported.\"))\n # start, stop and step are necessary for the new iterator for\n # coordinates, and perhaps it would be useful to add them as\n # parameters in the future (not now, because I've just removed\n # the `sort` argument for 2.1).\n #\n # *Important note*: Negative values for step are not supported\n # for the general case, but only for the itersorted() and\n # read_sorted() purposes! The self._process_range_read will raise\n # an appropiate error.\n # F. Alted 2008-09-18\n # A.V. 20130513: _process_range_read --> _process_range\n (start, stop, step) = self._process_range(None, None, None)\n if (start > stop) or (len(sequence) == 0):\n return iter([])\n row = tableextension.Row(self)\n return row._iter(start, stop, step, coords=sequence)\n\n def _check_sortby_csi(self, sortby, checkCSI):\n if isinstance(sortby, Column):\n icol = sortby\n elif isinstance(sortby, str):\n icol = self.cols._f_col(sortby)\n else:\n raise TypeError(\n \"`sortby` can only be a `Column` or string object, \"\n \"but you passed an object of type: %s\" % type(sortby))\n if icol.is_indexed and icol.index.kind == \"full\":\n if checkCSI and not icol.index.is_csi:\n # The index exists, but it is not a CSI one.\n raise ValueError(\n \"Field `%s` must have associated a CSI index \"\n \"in table `%s`, but the existing one is not. \"\n % (sortby, self))\n return icol.index\n else:\n raise ValueError(\n \"Field `%s` must have associated a 'full' index \"\n \"in table `%s`.\" % (sortby, self))\n\n _check_sortby_CSI = previous_api(_check_sortby_csi)\n\n def itersorted(self, sortby, checkCSI=False,\n start=None, stop=None, step=None):\n \"\"\"Iterate table data following the order of the index of sortby\n column.\n\n The sortby column must have associated a full index. If you want to\n ensure a fully sorted order, the index must be a CSI one. You may want\n to use the checkCSI argument in order to explicitly check for the\n existence of a CSI index.\n\n The meaning of the start, stop and step arguments is the same as in\n :meth:`Table.read`.\n\n .. versionchanged:: 3.0\n If the *start* parameter is provided and *stop* is None then the\n table is iterated from *start* to the last line.\n In PyTables < 3.0 only one element was returned.\n\n \"\"\"\n\n index = self._check_sortby_csi(sortby, checkCSI)\n # Adjust the slice to be used.\n (start, stop, step) = self._process_range(start, stop, step,\n warn_negstep=False)\n if (start > stop and 0 < step) or (start < stop and 0 > step):\n # Fall-back action is to return an empty iterator\n return iter([])\n row = tableextension.Row(self)\n return row._iter(start, stop, step, coords=index)\n\n def read_sorted(self, sortby, checkCSI=False, field=None,\n start=None, stop=None, step=None):\n \"\"\"Read table data following the order of the index of sortby column.\n\n The sortby column must have associated a full index. If you want to\n ensure a fully sorted order, the index must be a CSI one. You may want\n to use the checkCSI argument in order to explicitly check for the\n existence of a CSI index.\n\n If field is supplied only the named column will be selected. If the\n column is not nested, an *array* of the current flavor will be\n returned; if it is, a *structured array* will be used instead. If no\n field is specified, all the columns will be returned in a structured\n array of the current flavor.\n\n The meaning of the start, stop and step arguments is the same as in\n :meth:`Table.read`.\n\n .. versionchanged:: 3.0\n The start, stop and step parameters now behave like in slice.\n\n \"\"\"\n\n self._g_check_open()\n index = self._check_sortby_csi(sortby, checkCSI)\n coords = index[start:stop:step]\n return self.read_coordinates(coords, field)\n\n readSorted = previous_api(read_sorted)\n\n def iterrows(self, start=None, stop=None, step=None):\n \"\"\"Iterate over the table using a Row instance.\n\n If a range is not supplied, *all the rows* in the table are iterated\n upon - you can also use the :meth:`Table.__iter__` special method for\n that purpose. If you want to iterate over a given *range of rows* in\n the table, you may use the start, stop and step parameters.\n\n .. warning::\n\n When in the middle of a table row iterator, you should not\n use methods that can change the number of rows in the table\n (like :meth:`Table.append` or :meth:`Table.remove_rows`) or\n unexpected errors will happen.\n\n See Also\n --------\n tableextension.Row : the table row iterator and field accessor\n\n Examples\n --------\n\n ::\n\n result = [ row['var2'] for row in table.iterrows(step=5)\n if row['var1'] <= 20 ]\n\n Notes\n -----\n This iterator can be nested (see :meth:`Table.where` for an example).\n\n .. versionchanged:: 3.0\n If the *start* parameter is provided and *stop* is None then the\n table is iterated from *start* to the last line.\n In PyTables < 3.0 only one element was returned.\n\n \"\"\"\n (start, stop, step) = self._process_range(start, stop, step,\n warn_negstep=False)\n if (start > stop and 0 < step) or (start < stop and 0 > step):\n # Fall-back action is to return an empty iterator\n return iter([])\n row = tableextension.Row(self)\n return row._iter(start, stop, step)\n\n def __iter__(self):\n \"\"\"Iterate over the table using a Row instance.\n\n This is equivalent to calling :meth:`Table.iterrows` with default\n arguments, i.e. it iterates over *all the rows* in the table.\n\n See Also\n --------\n tableextension.Row : the table row iterator and field accessor\n\n Examples\n --------\n\n ::\n\n result = [ row['var2'] for row in table if row['var1'] <= 20 ]\n\n Which is equivalent to::\n\n result = [ row['var2'] for row in table.iterrows()\n if row['var1'] <= 20 ]\n\n Notes\n -----\n This iterator can be nested (see :meth:`Table.where` for an example).\n\n \"\"\"\n\n return self.iterrows()\n\n def _read(self, start, stop, step, field=None, out=None):\n \"\"\"Read a range of rows and return an in-memory object.\"\"\"\n\n select_field = None\n if field:\n if field not in self.coldtypes:\n if field in self.description._v_names:\n # Remember to select this field\n select_field = field\n field = None\n else:\n raise KeyError((\"Field {0} not found in table \"\n \"{1}\").format(field, self))\n else:\n # The column hangs directly from the top\n dtype_field = self.coldtypes[field]\n\n # Return a rank-0 array if start > stop\n if (start >= stop and 0 < step) or (start <= stop and 0 > step):\n if field is None:\n nra = self._get_container(0)\n return nra\n return numpy.empty(shape=0, dtype=dtype_field)\n\n nrows = len(xrange(0, stop - start, step))\n\n if out is None:\n # Compute the shape of the resulting column object\n if field:\n # Create a container for the results\n result = numpy.empty(shape=nrows, dtype=dtype_field)\n else:\n # Recarray case\n result = self._get_container(nrows)\n else:\n # there is no fast way to byteswap, since different columns may\n # have different byteorders\n if not out.dtype.isnative:\n raise ValueError((\"output array must be in system's byteorder \"\n \"or results will be incorrect\"))\n if field:\n bytes_required = dtype_field.itemsize * nrows\n else:\n bytes_required = self.rowsize * nrows\n if bytes_required != out.nbytes:\n raise ValueError(('output array size invalid, got {0} bytes, '\n 'need {1} bytes').format(out.nbytes,\n bytes_required))\n if not out.flags['C_CONTIGUOUS']:\n raise ValueError('output array not C contiguous')\n result = out\n\n # Call the routine to fill-up the resulting array\n if step == 1 and not field:\n # This optimization works three times faster than\n # the row._fill_col method (up to 170 MB/s on a pentium IV @ 2GHz)\n self._read_records(start, stop - start, result)\n # Warning!: _read_field_name should not be used until\n # H5TBread_fields_name in tableextension will be finished\n # F. Alted 2005/05/26\n # XYX Ho implementem per a PyTables 2.0??\n elif field and step > 15 and 0:\n # For step>15, this seems to work always faster than row._fill_col.\n self._read_field_name(result, start, stop, step, field)\n else:\n self.row._fill_col(result, start, stop, step, field)\n\n if select_field:\n return result[select_field]\n else:\n return result\n\n def read(self, start=None, stop=None, step=None, field=None, out=None):\n \"\"\"Get data in the table as a (record) array.\n\n The start, stop and step parameters can be used to select only\n a *range of rows* in the table. Their meanings are the same as\n in the built-in Python slices.\n\n If field is supplied only the named column will be selected.\n If the column is not nested, an *array* of the current flavor\n will be returned; if it is, a *structured array* will be used\n instead. If no field is specified, all the columns will be\n returned in a structured array of the current flavor.\n\n Columns under a nested column can be specified in the field\n parameter by using a slash character (/) as a separator (e.g.\n 'position/x').\n\n The out parameter may be used to specify a NumPy array to\n receive the output data. Note that the array must have the\n same size as the data selected with the other parameters.\n Note that the array's datatype is not checked and no type\n casting is performed, so if it does not match the datatype on\n disk, the output will not be correct.\n\n When specifying a single nested column with the field parameter,\n and supplying an output buffer with the out parameter, the\n output buffer must contain all columns in the table.\n The data in all columns will be read into the output buffer.\n However, only the specified nested column will be returned from\n the method call.\n\n When data is read from disk in NumPy format, the output will be\n in the current system's byteorder, regardless of how it is\n stored on disk. If the out parameter is specified, the output\n array also must be in the current system's byteorder.\n\n .. versionchanged:: 3.0\n Added the *out* parameter. Also the start, stop and step\n parameters now behave like in slice.\n\n Examples\n --------\n\n Reading the entire table::\n\n t.read()\n\n Reading record n. 6::\n\n t.read(6, 7)\n\n Reading from record n. 6 to the end of the table::\n\n t.read(6)\n\n \"\"\"\n\n self._g_check_open()\n\n if field:\n self._check_column(field)\n\n if out is not None and self.flavor != 'numpy':\n msg = (\"Optional 'out' argument may only be supplied if array \"\n \"flavor is 'numpy', currently is {0}\").format(self.flavor)\n raise TypeError(msg)\n\n #(start, stop, step) = self._process_range_read(start, stop, step,\n (start, stop, step) = self._process_range(start, stop, step,\n warn_negstep=False)\n\n arr = self._read(start, stop, step, field, out)\n return internal_to_flavor(arr, self.flavor)\n\n def _read_coordinates(self, coords, field=None):\n \"\"\"Private part of `read_coordinates()` with no flavor conversion.\"\"\"\n\n coords = self._point_selection(coords)\n\n ncoords = len(coords)\n # Create a read buffer only if needed\n if field is None or ncoords > 0:\n # Doing a copy is faster when ncoords is small (<1000)\n if ncoords < min(1000, self.nrowsinbuf):\n result = self._v_iobuf[:ncoords].copy()\n else:\n result = self._get_container(ncoords)\n\n # Do the real read\n if ncoords > 0:\n # Turn coords into an array of coordinate indexes, if necessary\n if not (isinstance(coords, numpy.ndarray) and\n coords.dtype.type is _npsizetype and\n coords.flags.contiguous and\n coords.flags.aligned):\n # Get a contiguous and aligned coordinate array\n coords = numpy.array(coords, dtype=SizeType)\n self._read_elements(coords, result)\n\n # Do the final conversions, if needed\n if field:\n if ncoords > 0:\n result = get_nested_field(result, field)\n else:\n # Get an empty array from the cache\n result = self._getemptyarray(self.coldtypes[field])\n return result\n\n _readCoordinates = previous_api(_read_coordinates)\n\n def read_coordinates(self, coords, field=None):\n \"\"\"Get a set of rows given their indexes as a (record) array.\n\n This method works much like the :meth:`Table.read` method, but it uses\n a sequence (coords) of row indexes to select the wanted columns,\n instead of a column range.\n\n The selected rows are returned in an array or structured array of the\n current flavor.\n\n \"\"\"\n\n self._g_check_open()\n result = self._read_coordinates(coords, field)\n return internal_to_flavor(result, self.flavor)\n\n readCoordinates = previous_api(read_coordinates)\n\n def get_enum(self, colname):\n \"\"\"Get the enumerated type associated with the named column.\n\n If the column named colname (a string) exists and is of an enumerated\n type, the corresponding Enum instance (see :ref:`EnumClassDescr`) is\n returned. If it is not of an enumerated type, a TypeError is raised. If\n the column does not exist, a KeyError is raised.\n\n \"\"\"\n\n self._check_column(colname)\n\n try:\n return self._colenums[colname]\n except KeyError:\n raise TypeError(\n \"column ``%s`` of table ``%s`` is not of an enumerated type\"\n % (colname, self._v_pathname))\n\n getEnum = previous_api(get_enum)\n\n def col(self, name):\n \"\"\"Get a column from the table.\n\n If a column called name exists in the table, it is read and returned as\n a NumPy object. If it does not exist, a KeyError is raised.\n\n Examples\n --------\n\n ::\n\n narray = table.col('var2')\n\n That statement is equivalent to::\n\n narray = table.read(field='var2')\n\n Here you can see how this method can be used as a shorthand for the\n :meth:`Table.read` method.\n\n \"\"\"\n\n return self.read(field=name)\n\n def __getitem__(self, key):\n \"\"\"Get a row or a range of rows from the table.\n\n If key argument is an integer, the corresponding table row is returned\n as a record of the current flavor. If key is a slice, the range of rows\n determined by it is returned as a structured array of the current\n flavor.\n\n In addition, NumPy-style point selections are supported. In\n particular, if key is a list of row coordinates, the set of rows\n determined by it is returned. Furthermore, if key is an array of\n boolean values, only the coordinates where key is True are returned.\n Note that for the latter to work it is necessary that key list would\n contain exactly as many rows as the table has.\n\n Examples\n --------\n\n ::\n\n record = table[4]\n recarray = table[4:1000:2]\n recarray = table[[4,1000]] # only retrieves rows 4 and 1000\n recarray = table[[True, False, ..., True]]\n\n Those statements are equivalent to::\n\n record = table.read(start=4)[0]\n recarray = table.read(start=4, stop=1000, step=2)\n recarray = table.read_coordinates([4,1000])\n recarray = table.read_coordinates([True, False, ..., True])\n\n Here, you can see how indexing can be used as a shorthand for the\n :meth:`Table.read` and :meth:`Table.read_coordinates` methods.\n\n \"\"\"\n\n self._g_check_open()\n\n if is_idx(key):\n key = operator.index(key)\n\n # Index out of range protection\n if key >= self.nrows:\n raise IndexError(\"Index out of range\")\n if key < 0:\n # To support negative values\n key += self.nrows\n (start, stop, step) = self._process_range(key, key + 1, 1)\n return self.read(start, stop, step)[0]\n elif isinstance(key, slice):\n (start, stop, step) = self._process_range(\n key.start, key.stop, key.step)\n return self.read(start, stop, step)\n # Try with a boolean or point selection\n elif type(key) in (list, tuple) or isinstance(key, numpy.ndarray):\n return self._read_coordinates(key, None)\n else:\n raise IndexError(\"Invalid index or slice: %r\" % (key,))\n\n def __setitem__(self, key, value):\n \"\"\"Set a row or a range of rows in the table.\n\n It takes different actions depending on the type of the *key*\n parameter: if it is an integer, the corresponding table row is\n set to *value* (a record or sequence capable of being converted\n to the table structure). If *key* is a slice, the row slice\n determined by it is set to *value* (a record array or sequence\n capable of being converted to the table structure).\n\n In addition, NumPy-style point selections are supported. In\n particular, if key is a list of row coordinates, the set of rows\n determined by it is set to value. Furthermore, if key is an array of\n boolean values, only the coordinates where key is True are set to\n values from value. Note that for the latter to work it is necessary\n that key list would contain exactly as many rows as the table has.\n\n Examples\n --------\n\n ::\n\n # Modify just one existing row\n table[2] = [456,'db2',1.2]\n\n # Modify two existing rows\n rows = numpy.rec.array([[457,'db1',1.2],[6,'de2',1.3]],\n formats='i4,a3,f8')\n table[1:30:2] = rows # modify a table slice\n table[[1,3]] = rows # only modifies rows 1 and 3\n table[[True,False,True]] = rows # only modifies rows 0 and 2\n\n Which is equivalent to::\n\n table.modify_rows(start=2, rows=[456,'db2',1.2])\n rows = numpy.rec.array([[457,'db1',1.2],[6,'de2',1.3]],\n formats='i4,a3,f8')\n table.modify_rows(start=1, stop=3, step=2, rows=rows)\n table.modify_coordinates([1,3,2], rows)\n table.modify_coordinates([True, False, True], rows)\n\n Here, you can see how indexing can be used as a shorthand for the\n :meth:`Table.modify_rows` and :meth:`Table.modify_coordinates`\n methods.\n\n \"\"\"\n\n self._g_check_open()\n self._v_file._check_writable()\n\n if is_idx(key):\n key = operator.index(key)\n\n # Index out of range protection\n if key >= self.nrows:\n raise IndexError(\"Index out of range\")\n if key < 0:\n # To support negative values\n key += self.nrows\n return self.modify_rows(key, key + 1, 1, [value])\n elif isinstance(key, slice):\n (start, stop, step) = self._process_range(\n key.start, key.stop, key.step)\n return self.modify_rows(start, stop, step, value)\n # Try with a boolean or point selection\n elif type(key) in (list, tuple) or isinstance(key, numpy.ndarray):\n return self.modify_coordinates(key, value)\n else:\n raise IndexError(\"Invalid index or slice: %r\" % (key,))\n\n def _save_buffered_rows(self, wbufRA, lenrows):\n \"\"\"Update the indexes after a flushing of rows.\"\"\"\n\n self._open_append(wbufRA)\n self._append_records(lenrows)\n self._close_append()\n if self.indexed:\n self._unsaved_indexedrows += lenrows\n # The table caches for indexed queries are dirty now\n self._dirtycache = True\n if self.autoindex:\n # Flush the unindexed rows\n self.flush_rows_to_index(_lastrow=False)\n else:\n # All the columns are dirty now\n self._mark_columns_as_dirty(self.colpathnames)\n\n _saveBufferedRows = previous_api(_save_buffered_rows)\n\n def append(self, rows):\n \"\"\"Append a sequence of rows to the end of the table.\n\n The rows argument may be any object which can be converted to\n a structured array compliant with the table structure\n (otherwise, a ValueError is raised). This includes NumPy\n structured arrays, lists of tuples or array records, and a\n string or Python buffer.\n\n Examples\n --------\n\n ::\n\n import tables as tb\n\n class Particle(tb.IsDescription):\n name = tb.StringCol(16, pos=1) # 16-character String\n lati = tb.IntCol(pos=2) # integer\n longi = tb.IntCol(pos=3) # integer\n pressure = tb.Float32Col(pos=4) # float (single-precision)\n temperature = tb.FloatCol(pos=5) # double (double-precision)\n\n fileh = tb.open_file('test4.h5', mode='w')\n table = fileh.create_table(fileh.root, 'table', Particle,\n \"A table\")\n\n # Append several rows in only one call\n table.append([(\"Particle: 10\", 10, 0, 10 * 10, 10**2),\n (\"Particle: 11\", 11, -1, 11 * 11, 11**2),\n (\"Particle: 12\", 12, -2, 12 * 12, 12**2)])\n fileh.close()\n\n \"\"\"\n\n self._g_check_open()\n self._v_file._check_writable()\n\n if not self._chunked:\n raise HDF5ExtError(\n \"You cannot append rows to a non-chunked table.\", h5bt=False)\n\n # Try to convert the object into a recarray compliant with table\n try:\n iflavor = flavor_of(rows)\n if iflavor != 'python':\n rows = array_as_internal(rows, iflavor)\n # Works for Python structures and always copies the original,\n # so the resulting object is safe for in-place conversion.\n wbufRA = numpy.rec.array(rows, dtype=self._v_dtype)\n except Exception as exc: # XXX\n raise ValueError(\"rows parameter cannot be converted into a \"\n \"recarray object compliant with table '%s'. \"\n \"The error was: <%s>\" % (str(self), exc))\n lenrows = wbufRA.shape[0]\n # If the number of rows to append is zero, don't do anything else\n if lenrows > 0:\n # Save write buffer to disk\n self._save_buffered_rows(wbufRA, lenrows)\n\n def _conv_to_recarr(self, obj):\n \"\"\"Try to convert the object into a recarray.\"\"\"\n\n try:\n iflavor = flavor_of(obj)\n if iflavor != 'python':\n obj = array_as_internal(obj, iflavor)\n if hasattr(obj, \"shape\") and obj.shape == ():\n # To allow conversion of scalars (void type) into arrays.\n # See http://projects.scipy.org/scipy/numpy/ticket/315\n # for discussion on how to pass buffers to constructors\n # See also http://projects.scipy.org/scipy/numpy/ticket/348\n recarr = numpy.array([obj], dtype=self._v_dtype)\n else:\n # Works for Python structures and always copies the original,\n # so the resulting object is safe for in-place conversion.\n recarr = numpy.rec.array(obj, dtype=self._v_dtype)\n except Exception as exc: # XXX\n raise ValueError(\"Object cannot be converted into a recarray \"\n \"object compliant with table format '%s'. \"\n \"The error was: <%s>\" %\n (self.description._v_nested_descr, exc))\n\n return recarr\n\n def modify_coordinates(self, coords, rows):\n \"\"\"Modify a series of rows in positions specified in coords.\n\n The values in the selected rows will be modified with the data given in\n rows. This method returns the number of rows modified.\n\n The possible values for the rows argument are the same as in\n :meth:`Table.append`.\n\n \"\"\"\n\n if rows is None: # Nothing to be done\n return SizeType(0)\n\n # Convert the coordinates to something expected by HDF5\n coords = self._point_selection(coords)\n\n lcoords = len(coords)\n if len(rows) < lcoords:\n raise ValueError(\"The value has not enough elements to fill-in \"\n \"the specified range\")\n\n # Convert rows into a recarray\n recarr = self._conv_to_recarr(rows)\n\n if len(coords) > 0:\n # Do the actual update of rows\n self._update_elements(lcoords, coords, recarr)\n\n # Redo the index if needed\n self._reindex(self.colpathnames)\n\n return SizeType(lcoords)\n\n modifyCoordinates = previous_api(modify_coordinates)\n\n def modify_rows(self, start=None, stop=None, step=None, rows=None):\n \"\"\"Modify a series of rows in the slice [start:stop:step].\n\n The values in the selected rows will be modified with the data given in\n rows. This method returns the number of rows modified. Should the\n modification exceed the length of the table, an IndexError is raised\n before changing data.\n\n The possible values for the rows argument are the same as in\n :meth:`Table.append`.\n\n \"\"\"\n\n if step is None:\n step = 1\n if rows is None: # Nothing to be done\n return SizeType(0)\n if start is None:\n start = 0\n\n if start < 0:\n raise ValueError(\"'start' must have a positive value.\")\n if step < 1:\n raise ValueError(\n \"'step' must have a value greater or equal than 1.\")\n if stop is None:\n # compute the stop value. start + len(rows)*step does not work\n stop = start + (len(rows) - 1) * step + 1\n\n (start, stop, step) = self._process_range(start, stop, step)\n if stop > self.nrows:\n raise IndexError(\"This modification will exceed the length of \"\n \"the table. Giving up.\")\n # Compute the number of rows to read.\n nrows = len(xrange(0, stop - start, step))\n if len(rows) != nrows:\n raise ValueError(\"The value has different elements than the \"\n \"specified range\")\n\n # Convert rows into a recarray\n recarr = self._conv_to_recarr(rows)\n\n lenrows = len(recarr)\n if start + lenrows > self.nrows:\n raise IndexError(\"This modification will exceed the length of the \"\n \"table. Giving up.\")\n\n # Do the actual update\n self._update_records(start, stop, step, recarr)\n\n # Redo the index if needed\n self._reindex(self.colpathnames)\n\n return SizeType(lenrows)\n\n modifyRows = previous_api(modify_rows)\n\n def modify_column(self, start=None, stop=None, step=None,\n column=None, colname=None):\n \"\"\"Modify one single column in the row slice [start:stop:step].\n\n The colname argument specifies the name of the column in the\n table to be modified with the data given in column. This\n method returns the number of rows modified. Should the\n modification exceed the length of the table, an IndexError is\n raised before changing data.\n\n The *column* argument may be any object which can be converted\n to a (record) array compliant with the structure of the column\n to be modified (otherwise, a ValueError is raised). This\n includes NumPy (record) arrays, lists of scalars, tuples or\n array records, and a string or Python buffer.\n\n \"\"\"\n if step is None:\n step = 1\n if not isinstance(colname, str):\n raise TypeError(\"The 'colname' parameter must be a string.\")\n self._v_file._check_writable()\n\n if column is None: # Nothing to be done\n return SizeType(0)\n if start is None:\n start = 0\n\n if start < 0:\n raise ValueError(\"'start' must have a positive value.\")\n if step < 1:\n raise ValueError(\n \"'step' must have a value greater or equal than 1.\")\n # Get the column format to be modified:\n objcol = self._get_column_instance(colname)\n descr = [objcol._v_parent._v_nested_descr[objcol._v_pos]]\n # Try to convert the column object into a NumPy ndarray\n try:\n # If the column is a recarray (or kind of), convert into ndarray\n if hasattr(column, 'dtype') and column.dtype.kind == 'V':\n column = numpy.rec.array(column, dtype=descr).field(0)\n else:\n # Make sure the result is always a *copy* of the original,\n # so the resulting object is safe for in-place conversion.\n iflavor = flavor_of(column)\n column = array_as_internal(column, iflavor)\n except Exception as exc: # XXX\n raise ValueError(\"column parameter cannot be converted into a \"\n \"ndarray object compliant with specified column \"\n \"'%s'. The error was: <%s>\" % (str(column), exc))\n\n # Get rid of single-dimensional dimensions\n column = column.squeeze()\n if column.shape == ():\n # Oops, stripped off to much dimensions\n column.shape = (1,)\n\n if stop is None:\n # compute the stop value. start + len(rows)*step does not work\n stop = start + (len(column) - 1) * step + 1\n (start, stop, step) = self._process_range(start, stop, step)\n if stop > self.nrows:\n raise IndexError(\"This modification will exceed the length of \"\n \"the table. Giving up.\")\n # Compute the number of rows to read.\n nrows = len(xrange(0, stop - start, step))\n if len(column) < nrows:\n raise ValueError(\"The value has not enough elements to fill-in \"\n \"the specified range\")\n # Now, read the original values:\n mod_recarr = self._read(start, stop, step)\n # Modify the appropriate column in the original recarray\n mod_col = get_nested_field(mod_recarr, colname)\n mod_col[:] = column\n # save this modified rows in table\n self._update_records(start, stop, step, mod_recarr)\n # Redo the index if needed\n self._reindex([colname])\n\n return SizeType(nrows)\n\n modifyColumn = previous_api(modify_column)\n\n def modify_columns(self, start=None, stop=None, step=None,\n columns=None, names=None):\n \"\"\"Modify a series of columns in the row slice [start:stop:step].\n\n The names argument specifies the names of the columns in the\n table to be modified with the data given in columns. This\n method returns the number of rows modified. Should the\n modification exceed the length of the table, an IndexError\n is raised before changing data.\n\n The columns argument may be any object which can be converted\n to a structured array compliant with the structure of the\n columns to be modified (otherwise, a ValueError is raised).\n This includes NumPy structured arrays, lists of tuples or array\n records, and a string or Python buffer.\n\n \"\"\"\n if step is None:\n step = 1\n if type(names) not in (list, tuple):\n raise TypeError(\"The 'names' parameter must be a list of strings.\")\n\n if columns is None: # Nothing to be done\n return SizeType(0)\n if start is None:\n start = 0\n if start < 0:\n raise ValueError(\"'start' must have a positive value.\")\n if step < 1:\n raise ValueError((\"'step' must have a value greater or \"\n \"equal than 1.\"))\n descr = []\n for colname in names:\n objcol = self._get_column_instance(colname)\n descr.append(objcol._v_parent._v_nested_descr[objcol._v_pos])\n # descr.append(objcol._v_parent._v_dtype[objcol._v_pos])\n # Try to convert the columns object into a recarray\n try:\n # Make sure the result is always a *copy* of the original,\n # so the resulting object is safe for in-place conversion.\n iflavor = flavor_of(columns)\n if iflavor != 'python':\n columns = array_as_internal(columns, iflavor)\n recarray = numpy.rec.array(columns, dtype=descr)\n else:\n recarray = numpy.rec.fromarrays(columns, dtype=descr)\n except Exception as exc: # XXX\n raise ValueError(\"columns parameter cannot be converted into a \"\n \"recarray object compliant with table '%s'. \"\n \"The error was: <%s>\" % (str(self), exc))\n\n if stop is None:\n # compute the stop value. start + len(rows)*step does not work\n stop = start + (len(recarray) - 1) * step + 1\n (start, stop, step) = self._process_range(start, stop, step)\n if stop > self.nrows:\n raise IndexError(\"This modification will exceed the length of \"\n \"the table. Giving up.\")\n # Compute the number of rows to read.\n nrows = len(xrange(0, stop - start, step))\n if len(recarray) < nrows:\n raise ValueError(\"The value has not enough elements to fill-in \"\n \"the specified range\")\n # Now, read the original values:\n mod_recarr = self._read(start, stop, step)\n # Modify the appropriate columns in the original recarray\n for i, name in enumerate(recarray.dtype.names):\n mod_col = get_nested_field(mod_recarr, names[i])\n mod_col[:] = recarray[name].squeeze()\n # save this modified rows in table\n self._update_records(start, stop, step, mod_recarr)\n # Redo the index if needed\n self._reindex(names)\n\n return SizeType(nrows)\n\n modifyColumns = previous_api(modify_columns)\n\n def flush_rows_to_index(self, _lastrow=True):\n \"\"\"Add remaining rows in buffers to non-dirty indexes.\n\n This can be useful when you have chosen non-automatic indexing\n for the table (see the :attr:`Table.autoindex` property in\n :class:`Table`) and you want to update the indexes on it.\n\n \"\"\"\n\n rowsadded = 0\n if self.indexed:\n # Update the number of unsaved indexed rows\n start = self._indexedrows\n nrows = self._unsaved_indexedrows\n for (colname, colindexed) in self.colindexed.iteritems():\n if colindexed:\n col = self.cols._g_col(colname)\n if nrows > 0 and not col.index.dirty:\n rowsadded = self._add_rows_to_index(\n colname, start, nrows, _lastrow, update=True)\n self._unsaved_indexedrows -= rowsadded\n self._indexedrows += rowsadded\n return rowsadded\n\n flushRowsToIndex = previous_api(flush_rows_to_index)\n\n def _add_rows_to_index(self, colname, start, nrows, lastrow, update):\n \"\"\"Add more elements to the existing index.\"\"\"\n\n # This method really belongs to Column, but since it makes extensive\n # use of the table, it gets dangerous when closing the file, since the\n # column may be accessing a table which is being destroyed.\n index = self.cols._g_col(colname).index\n slicesize = index.slicesize\n # The next loop does not rely on xrange so that it can\n # deal with long ints (i.e. more than 32-bit integers)\n # This allows to index columns with more than 2**31 rows\n # F. Alted 2005-05-09\n startLR = index.sorted.nrows * slicesize\n indexedrows = startLR - start\n stop = start + nrows - slicesize + 1\n while startLR < stop:\n index.append(\n [self._read(startLR, startLR + slicesize, 1, colname)],\n update=update)\n indexedrows += slicesize\n startLR += slicesize\n # index the remaining rows in last row\n if lastrow and startLR < self.nrows:\n index.append_last_row(\n [self._read(startLR, self.nrows, 1, colname)],\n update=update)\n indexedrows += self.nrows - startLR\n return indexedrows\n\n _addRowsToIndex = previous_api(_add_rows_to_index)\n\n def remove_rows(self, start=None, stop=None, step=None):\n \"\"\"Remove a range of rows in the table.\n\n .. versionchanged:: 3.0\n The start, stop and step parameters now behave like in slice.\n\n .. seealso:: remove_row()\n\n Parameters\n ----------\n start : int\n Sets the starting row to be removed. It accepts negative values\n meaning that the count starts from the end. A value of 0 means the\n first row.\n stop : int\n Sets the last row to be removed to stop-1, i.e. the end point is\n omitted (in the Python range() tradition). Negative values are also\n accepted.\n step : int\n The step size between rows to remove.\n\n .. versionadded:: 3.0\n\n Examples\n --------\n\n Removing rows from 5 to 10 (excluded)::\n\n t.remove_rows(5, 10)\n\n Removing all rows starting drom the 10th::\n\n t.remove_rows(10)\n\n Removing the 6th row::\n\n t.remove_rows(6, 7)\n\n .. note::\n\n removing a single row can be done using the specific\n :meth:`remove_row` method.\n\n \"\"\"\n\n (start, stop, step) = self._process_range(start, stop, step)\n nrows = numpy.abs(stop - start)\n if nrows >= self.nrows:\n raise NotImplementedError('You are trying to delete all the rows '\n 'in table \"%s\". This is not supported '\n 'right now due to limitations on the '\n 'underlying HDF5 library. Sorry!' %\n self._v_pathname)\n nrows = self._remove_rows(start, stop, step)\n # remove_rows is a invalidating index operation\n self._reindex(self.colpathnames)\n\n return SizeType(nrows)\n\n removeRows = previous_api(remove_rows)\n\n def remove_row(self, n):\n \"\"\"Removes a row from the table.\n\n If only start is supplied, only this row is to be deleted. If a range\n is supplied, i.e. both the start and stop parameters are passed, all\n the rows in the range are removed. A step parameter is not supported,\n and it is not foreseen to be implemented anytime soon.\n\n Parameters\n ----------\n n : int\n The index of the row to remove.\n\n .. versionadded:: 3.0\n\n \"\"\"\n\n self.remove_rows(start=n, stop=n + 1)\n\n def _g_update_dependent(self):\n super(Table, self)._g_update_dependent()\n\n # Update the new path in columns\n self.cols._g_update_table_location(self)\n\n # Update the new path in the Row instance, if cached. Fixes #224.\n if 'row' in self.__dict__:\n self.__dict__['row'] = tableextension.Row(self)\n\n _g_updateDependent = previous_api(_g_update_dependent)\n\n def _g_move(self, newparent, newname):\n \"\"\"Move this node in the hierarchy.\n\n This overloads the Node._g_move() method.\n\n \"\"\"\n\n itgpathname = _index_pathname_of(self)\n\n # First, move the table to the new location.\n super(Table, self)._g_move(newparent, newname)\n\n # Then move the associated index group (if any).\n try:\n itgroup = self._v_file._get_node(itgpathname)\n except NoSuchNodeError:\n pass\n else:\n newigroup = self._v_parent\n newiname = _index_name_of(self)\n itgroup._g_move(newigroup, newiname)\n\n def _g_remove(self, recursive=False, force=False):\n # Remove the associated index group (if any).\n itgpathname = _index_pathname_of(self)\n try:\n itgroup = self._v_file._get_node(itgpathname)\n except NoSuchNodeError:\n pass\n else:\n itgroup._f_remove(recursive=True)\n self.indexed = False # there are indexes no more\n\n # Remove the leaf itself from the hierarchy.\n super(Table, self)._g_remove(recursive, force)\n\n def _set_column_indexing(self, colpathname, indexed):\n \"\"\"Mark the referred column as indexed or non-indexed.\"\"\"\n\n colindexed = self.colindexed\n isindexed, wasindexed = bool(indexed), colindexed[colpathname]\n if isindexed == wasindexed:\n return # indexing state is unchanged\n\n # Changing the set of indexed columns invalidates the condition cache\n self._condition_cache.clear()\n colindexed[colpathname] = isindexed\n self.indexed = max(colindexed.values()) # this is an OR :)\n\n _setColumnIndexing = previous_api(_set_column_indexing)\n\n def _mark_columns_as_dirty(self, colnames):\n \"\"\"Mark column indexes in `colnames` as dirty.\"\"\"\n\n assert len(colnames) > 0\n if self.indexed:\n colindexed, cols = self.colindexed, self.cols\n # Mark the proper indexes as dirty\n for colname in colnames:\n if colindexed[colname]:\n col = cols._g_col(colname)\n col.index.dirty = True\n\n _markColumnsAsDirty = previous_api(_mark_columns_as_dirty)\n\n def _reindex(self, colnames):\n \"\"\"Re-index columns in `colnames` if automatic indexing is true.\"\"\"\n\n if self.indexed:\n colindexed, cols = self.colindexed, self.cols\n colstoindex = []\n # Mark the proper indexes as dirty\n for colname in colnames:\n if colindexed[colname]:\n col = cols._g_col(colname)\n col.index.dirty = True\n colstoindex.append(colname)\n # Now, re-index the dirty ones\n if self.autoindex and colstoindex:\n self._do_reindex(dirty=True)\n # The table caches for indexed queries are dirty now\n self._dirtycache = True\n\n _reIndex = previous_api(_reindex)\n\n def _do_reindex(self, dirty):\n \"\"\"Common code for `reindex()` and `reindex_dirty()`.\"\"\"\n\n indexedrows = 0\n for (colname, colindexed) in self.colindexed.iteritems():\n if colindexed:\n indexcol = self.cols._g_col(colname)\n indexedrows = indexcol._do_reindex(dirty)\n # Update counters in case some column has been updated\n if indexedrows > 0:\n self._indexedrows = indexedrows\n self._unsaved_indexedrows = self.nrows - indexedrows\n\n return SizeType(indexedrows)\n\n _doReIndex = previous_api(_do_reindex)\n\n def reindex(self):\n \"\"\"Recompute all the existing indexes in the table.\n\n This can be useful when you suspect that, for any reason, the\n index information for columns is no longer valid and want to\n rebuild the indexes on it.\n\n \"\"\"\n\n self._do_reindex(dirty=False)\n\n reIndex = previous_api(reindex)\n\n def reindex_dirty(self):\n \"\"\"Recompute the existing indexes in table, *if* they are dirty.\n\n This can be useful when you have set :attr:`Table.autoindex`\n (see :class:`Table`) to false for the table and you want to\n update the indexes after a invalidating index operation\n (:meth:`Table.remove_rows`, for example).\n\n \"\"\"\n\n self._do_reindex(dirty=True)\n\n reIndexDirty = previous_api(reindex_dirty)\n\n def _g_copy_rows(self, object, start, stop, step, sortby, checkCSI):\n \"Copy rows from self to object\"\n if sortby is None:\n self._g_copy_rows_optim(object, start, stop, step)\n return\n lenbuf = self.nrowsinbuf\n absstep = step\n if step < 0:\n absstep = -step\n start, stop = stop + 1, start + 1\n if sortby is not None:\n index = self._check_sortby_csi(sortby, checkCSI)\n for start2 in xrange(start, stop, absstep * lenbuf):\n stop2 = start2 + absstep * lenbuf\n if stop2 > stop:\n stop2 = stop\n # The next 'if' is not needed, but it doesn't bother either\n if sortby is None:\n rows = self[start2:stop2:step]\n else:\n coords = index[start2:stop2:step]\n rows = self.read_coordinates(coords)\n # Save the records on disk\n object.append(rows)\n object.flush()\n\n _g_copyRows = previous_api(_g_copy_rows)\n\n def _g_copy_rows_optim(self, object, start, stop, step):\n \"\"\"Copy rows from self to object (optimized version)\"\"\"\n\n nrowsinbuf = self.nrowsinbuf\n object._open_append(self._v_iobuf)\n nrowsdest = object.nrows\n for start2 in xrange(start, stop, step * nrowsinbuf):\n # Save the records on disk\n stop2 = start2 + step * nrowsinbuf\n if stop2 > stop:\n stop2 = stop\n # Optimized version (it saves some conversions)\n nrows = ((stop2 - start2 - 1) // step) + 1\n self.row._fill_col(self._v_iobuf, start2, stop2, step, None)\n # The output buffer is created anew,\n # so the operation is safe to in-place conversion.\n object._append_records(nrows)\n nrowsdest += nrows\n object._close_append()\n\n _g_copyRows_optim = previous_api(_g_copy_rows_optim)\n\n def _g_prop_indexes(self, other):\n \"\"\"Generate index in `other` table for every indexed column here.\"\"\"\n\n oldcols, newcols = self.colinstances, other.colinstances\n for colname in newcols:\n if (isinstance(oldcols[colname], Column)):\n oldcolindexed = oldcols[colname].is_indexed\n if oldcolindexed:\n oldcolindex = oldcols[colname].index\n newcol = newcols[colname]\n newcol.create_index(\n kind=oldcolindex.kind, optlevel=oldcolindex.optlevel,\n filters=oldcolindex.filters, tmp_dir=None)\n\n _g_propIndexes = previous_api(_g_prop_indexes)\n\n def _g_copy_with_stats(self, group, name, start, stop, step,\n title, filters, chunkshape, _log, **kwargs):\n \"\"\"Private part of Leaf.copy() for each kind of leaf.\"\"\"\n\n # Get the private args for the Table flavor of copy()\n sortby = kwargs.pop('sortby', None)\n propindexes = kwargs.pop('propindexes', False)\n checkCSI = kwargs.pop('checkCSI', False)\n # Compute the correct indices.\n (start, stop, step) = self._process_range_read(\n start, stop, step, warn_negstep=sortby is None)\n # And the number of final rows\n nrows = len(xrange(0, stop - start, step))\n # Create the new table and copy the selected data.\n newtable = Table(group, name, self.description, title=title,\n filters=filters, expectedrows=nrows,\n chunkshape=chunkshape,\n _log=_log)\n self._g_copy_rows(newtable, start, stop, step, sortby, checkCSI)\n nbytes = newtable.nrows * newtable.rowsize\n # Generate equivalent indexes in the new table, if required.\n if propindexes and self.indexed:\n self._g_prop_indexes(newtable)\n return (newtable, nbytes)\n\n _g_copyWithStats = previous_api(_g_copy_with_stats)\n\n # This overloading of copy is needed here in order to document\n # the additional keywords for the Table case.\n def copy(self, newparent=None, newname=None, overwrite=False,\n createparents=False, **kwargs):\n \"\"\"Copy this table and return the new one.\n\n This method has the behavior and keywords described in\n :meth:`Leaf.copy`. Moreover, it recognises the following additional\n keyword arguments.\n\n Parameters\n ----------\n sortby\n If specified, and sortby corresponds to a column with an index,\n then the copy will be sorted by this index. If you want to ensure\n a fully sorted order, the index must be a CSI one. A reverse\n sorted copy can be achieved by specifying a negative value for the\n step keyword. If sortby is omitted or None, the original table\n order is used.\n checkCSI\n If true and a CSI index does not exist for the sortby column, an\n error will be raised. If false (the default), it does nothing.\n You can use this flag in order to explicitly check for the\n existence of a CSI index.\n propindexes\n If true, the existing indexes in the source table are propagated\n (created) to the new one. If false (the default), the indexes are\n not propagated.\n\n \"\"\"\n\n return super(Table, self).copy(\n newparent, newname, overwrite, createparents, **kwargs)\n\n def flush(self):\n \"\"\"Flush the table buffers.\"\"\"\n\n # Flush rows that remains to be appended\n if 'row' in self.__dict__:\n self.row._flush_buffered_rows()\n if self.indexed and self.autoindex:\n # Flush any unindexed row\n rowsadded = self.flush_rows_to_index(_lastrow=True)\n assert rowsadded <= 0 or self._indexedrows == self.nrows, \\\n (\"internal error: the number of indexed rows (%d) \"\n \"and rows in the table (%d) is not equal; \"\n \"please report this to the authors.\"\n % (self._indexedrows, self.nrows))\n if self._dirtyindexes:\n # Finally, re-index any dirty column\n self.reindex_dirty()\n\n super(Table, self).flush()\n\n def _g_pre_kill_hook(self):\n \"\"\"Code to be called before killing the node.\"\"\"\n\n # Flush the buffers before to clean-up them\n # self.flush()\n # It seems that flushing during the __del__ phase is a sure receipt for\n # bringing all kind of problems:\n # 1. Illegal Instruction\n # 2. Malloc(): trying to call free() twice\n # 3. Bus Error\n # 4. Segmentation fault\n # So, the best would be doing *nothing* at all in this __del__ phase.\n # As a consequence, the I/O will not be cleaned until a call to\n # Table.flush() would be done. This could lead to a potentially large\n # memory consumption.\n # NOTE: The user should make a call to Table.flush() whenever he has\n # finished working with his table.\n # I've added a Performance warning in order to compel the user to\n # call self.flush() before the table is being preempted.\n # F. Alted 2006-08-03\n if (('row' in self.__dict__ and self.row._get_unsaved_nrows() > 0) or\n (self.indexed and self.autoindex and\n (self._unsaved_indexedrows > 0 or self._dirtyindexes))):\n warnings.warn((\"table ``%s`` is being preempted from alive nodes \"\n \"without its buffers being flushed or with some \"\n \"index being dirty. This may lead to very \"\n \"ineficient use of resources and even to fatal \"\n \"errors in certain situations. Please do a call \"\n \"to the .flush() or .reindex_dirty() methods on \"\n \"this table before start using other nodes.\")\n % (self._v_pathname), PerformanceWarning)\n # Get rid of the IO buffers (if they have been created at all)\n mydict = self.__dict__\n if '_v_iobuf' in mydict:\n del mydict['_v_iobuf']\n if '_v_wdflts' in mydict:\n del mydict['_v_wdflts']\n\n _g_preKillHook = previous_api(_g_pre_kill_hook)\n\n def _f_close(self, flush=True):\n if not self._v_isopen:\n return # the node is already closed\n\n # .. note::\n #\n # As long as ``Table`` objects access their indices on closing,\n # ``File.close()`` will need to make *two separate passes*\n # to first close ``Table`` objects and then ``Index`` hierarchies.\n #\n\n # Flush right now so the row object does not get in the middle.\n if flush:\n self.flush()\n\n # Some warnings can be issued after calling `self._g_set_location()`\n # in `self.__init__()`. If warnings are turned into exceptions,\n # `self._g_post_init_hook` may not be called and `self.cols` not set.\n # One example of this is\n # ``test_create.createTestCase.test05_maxFieldsExceeded()``.\n cols = self.cols\n if cols is not None:\n cols._g_close()\n\n # Close myself as a leaf.\n super(Table, self)._f_close(False)\n\n def __repr__(self):\n \"\"\"This provides column metainfo in addition to standard __str__\"\"\"\n\n if self.indexed:\n format = \"\"\"\\\n%s\n description := %r\n byteorder := %r\n chunkshape := %r\n autoindex := %r\n colindexes := %r\"\"\"\n return format % (str(self), self.description, self.byteorder,\n self.chunkshape, self.autoindex,\n _ColIndexes(self.colindexes))\n else:\n return \"\"\"\\\n%s\n description := %r\n byteorder := %r\n chunkshape := %r\"\"\" % \\\n (str(self), self.description, self.byteorder, self.chunkshape)\n\n\nclass Cols(object):\n \"\"\"Container for columns in a table or nested column.\n\n This class is used as an *accessor* to the columns in a table or nested\n column. It supports the *natural naming* convention, so that you can\n access the different columns as attributes which lead to Column instances\n (for non-nested columns) or other Cols instances (for nested columns).\n\n For instance, if table.cols is a Cols instance with a column named col1\n under it, the later can be accessed as table.cols.col1. If col1 is nested\n and contains a col2 column, this can be accessed as table.cols.col1.col2\n and so on. Because of natural naming, the names of members start with\n special prefixes, like in the Group class (see :ref:`GroupClassDescr`).\n\n Like the Column class (see :ref:`ColumnClassDescr`), Cols supports item\n access to read and write ranges of values in the table or nested column.\n\n\n .. rubric:: Cols attributes\n\n .. attribute:: _v_colnames\n\n A list of the names of the columns hanging directly\n from the associated table or nested column. The order of\n the names matches the order of their respective columns in\n the containing table.\n\n .. attribute:: _v_colpathnames\n\n A list of the pathnames of all the columns under the\n associated table or nested column (in preorder). If it does\n not contain nested columns, this is exactly the same as the\n :attr:`Cols._v_colnames` attribute.\n\n .. attribute:: _v_desc\n\n The associated Description instance (see\n :ref:`DescriptionClassDescr`).\n\n \"\"\"\n\n def _g_gettable(self):\n return self._v__tableFile._get_node(self._v__tablePath)\n\n _v_table = property(\n _g_gettable, None, None,\n \"The parent Table instance (see :ref:`TableClassDescr`).\")\n\n def __init__(self, table, desc):\n\n myDict = self.__dict__\n myDict['_v__tableFile'] = table._v_file\n myDict['_v__tablePath'] = table._v_pathname\n myDict['_v_desc'] = desc\n myDict['_v_colnames'] = desc._v_names\n myDict['_v_colpathnames'] = table.description._v_pathnames\n # Put the column in the local dictionary\n for name in desc._v_names:\n if name in desc._v_types:\n myDict[name] = Column(table, name, desc)\n else:\n myDict[name] = Cols(table, desc._v_colobjects[name])\n\n def _g_update_table_location(self, table):\n \"\"\"Updates the location information about the associated `table`.\"\"\"\n\n myDict = self.__dict__\n myDict['_v__tableFile'] = table._v_file\n myDict['_v__tablePath'] = table._v_pathname\n\n # Update the locations in individual columns.\n for colname in self._v_colnames:\n myDict[colname]._g_update_table_location(table)\n\n _g_updateTableLocation = previous_api(_g_update_table_location)\n\n def __len__(self):\n \"\"\"Get the number of top level columns in table.\"\"\"\n\n return len(self._v_colnames)\n\n def _f_col(self, colname):\n \"\"\"Get an accessor to the column colname.\n\n This method returns a Column instance (see :ref:`ColumnClassDescr`) if\n the requested column is not nested, and a Cols instance (see\n :ref:`ColsClassDescr`) if it is. You may use full column pathnames in\n colname.\n\n Calling cols._f_col('col1/col2') is equivalent to using cols.col1.col2.\n However, the first syntax is more intended for programmatic use. It is\n also better if you want to access columns with names that are not valid\n Python identifiers.\n\n \"\"\"\n\n if not isinstance(colname, str):\n raise TypeError(\"Parameter can only be an string. You passed \"\n \"object: %s\" % colname)\n if ((colname.find('/') > -1 and\n not colname in self._v_colpathnames) and\n not colname in self._v_colnames):\n raise KeyError((\"Cols accessor ``%s.cols%s`` does not have a \"\n \"column named ``%s``\")\n % (self._v__tablePath, self._v_desc._v_pathname,\n colname))\n\n return self._g_col(colname)\n\n def _g_col(self, colname):\n \"\"\"Like `self._f_col()` but it does not check arguments.\"\"\"\n\n # Get the Column or Description object\n inames = colname.split('/')\n cols = self\n for iname in inames:\n cols = cols.__dict__[iname]\n return cols\n\n def __getitem__(self, key):\n \"\"\"Get a row or a range of rows from a table or nested column.\n\n If key argument is an integer, the corresponding nested type row is\n returned as a record of the current flavor. If key is a slice, the\n range of rows determined by it is returned as a structured array of the\n current flavor.\n\n Examples\n --------\n\n ::\n\n record = table.cols[4] # equivalent to table[4]\n recarray = table.cols.Info[4:1000:2]\n\n Those statements are equivalent to::\n\n nrecord = table.read(start=4)[0]\n nrecarray = table.read(start=4, stop=1000, step=2).field('Info')\n\n Here you can see how a mix of natural naming, indexing and slicing can\n be used as shorthands for the :meth:`Table.read` method.\n\n \"\"\"\n\n table = self._v_table\n nrows = table.nrows\n if is_idx(key):\n key = operator.index(key)\n\n # Index out of range protection\n if key >= nrows:\n raise IndexError(\"Index out of range\")\n if key < 0:\n # To support negative values\n key += nrows\n (start, stop, step) = table._process_range(key, key + 1, 1)\n colgroup = self._v_desc._v_pathname\n if colgroup == \"\": # The root group\n return table.read(start, stop, step)[0]\n else:\n crecord = table.read(start, stop, step)[0]\n return crecord[colgroup]\n elif isinstance(key, slice):\n (start, stop, step) = table._process_range(\n key.start, key.stop, key.step)\n colgroup = self._v_desc._v_pathname\n if colgroup == \"\": # The root group\n return table.read(start, stop, step)\n else:\n crecarray = table.read(start, stop, step)\n if hasattr(crecarray, \"field\"):\n return crecarray.field(colgroup) # RecArray case\n else:\n return get_nested_field(crecarray, colgroup) # numpy case\n else:\n raise TypeError(\"invalid index or slice: %r\" % (key,))\n\n def __setitem__(self, key, value):\n \"\"\"Set a row or a range of rows in a table or nested column.\n\n If key argument is an integer, the corresponding row is set to\n value. If key is a slice, the range of rows determined by it is set to\n value.\n\n Examples\n --------\n\n ::\n\n table.cols[4] = record\n table.cols.Info[4:1000:2] = recarray\n\n Those statements are equivalent to::\n\n table.modify_rows(4, rows=record)\n table.modify_column(4, 1000, 2, colname='Info', column=recarray)\n\n Here you can see how a mix of natural naming, indexing and slicing\n can be used as shorthands for the :meth:`Table.modify_rows` and\n :meth:`Table.modify_column` methods.\n\n \"\"\"\n\n table = self._v_table\n nrows = table.nrows\n if is_idx(key):\n key = operator.index(key)\n\n # Index out of range protection\n if key >= nrows:\n raise IndexError(\"Index out of range\")\n if key < 0:\n # To support negative values\n key += nrows\n (start, stop, step) = table._process_range(key, key + 1, 1)\n elif isinstance(key, slice):\n (start, stop, step) = table._process_range(\n key.start, key.stop, key.step)\n else:\n raise TypeError(\"invalid index or slice: %r\" % (key,))\n\n # Actually modify the correct columns\n colgroup = self._v_desc._v_pathname\n if colgroup == \"\": # The root group\n table.modify_rows(start, stop, step, rows=value)\n else:\n table.modify_column(\n start, stop, step, colname=colgroup, column=value)\n\n def _g_close(self):\n # First, close the columns (ie possible indices open)\n for col in self._v_colnames:\n colobj = self._g_col(col)\n if isinstance(colobj, Column):\n colobj.close()\n # Delete the reference to column\n del self.__dict__[col]\n else:\n colobj._g_close()\n\n self.__dict__.clear()\n\n def __str__(self):\n \"\"\"The string representation for this object.\"\"\"\n\n # The pathname\n tablepathname = self._v__tablePath\n descpathname = self._v_desc._v_pathname\n if descpathname:\n descpathname = \".\" + descpathname\n # Get this class name\n classname = self.__class__.__name__\n # The number of columns\n ncols = len(self._v_colnames)\n return \"%s.cols%s (%s), %s columns\" % \\\n (tablepathname, descpathname, classname, ncols)\n\n def __repr__(self):\n \"\"\"A detailed string representation for this object.\"\"\"\n\n out = str(self) + \"\\n\"\n for name in self._v_colnames:\n # Get this class name\n classname = getattr(self, name).__class__.__name__\n # The type\n if name in self._v_desc._v_dtypes:\n tcol = self._v_desc._v_dtypes[name]\n # The shape for this column\n shape = (self._v_table.nrows,) + \\\n self._v_desc._v_dtypes[name].shape\n else:\n tcol = \"Description\"\n # Description doesn't have a shape currently\n shape = ()\n out += \" %s (%s%s, %s)\" % (name, classname, shape, tcol) + \"\\n\"\n return out\n\n\nclass Column(object):\n \"\"\"Accessor for a non-nested column in a table.\n\n Each instance of this class is associated with one *non-nested* column of a\n table. These instances are mainly used to read and write data from the\n table columns using item access (like the Cols class - see\n :ref:`ColsClassDescr`), but there are a few other associated methods to\n deal with indexes.\n\n .. rubric:: Column attributes\n\n .. attribute:: descr\n\n The Description (see :ref:`DescriptionClassDescr`) instance of the\n parent table or nested column.\n\n .. attribute:: name\n\n The name of the associated column.\n\n .. attribute:: pathname\n\n The complete pathname of the associated column (the same as\n Column.name if the column is not inside a nested column).\n\n Parameters\n ----------\n table\n The parent table instance\n name\n The name of the column that is associated with this object\n descr\n The parent description object\n\n \"\"\"\n\n # Lazy read-only attributes\n # `````````````````````````\n @lazyattr\n def dtype(self):\n \"\"\"The NumPy dtype that most closely matches this column.\"\"\"\n\n return self.descr._v_dtypes[self.name].base # Get rid of shape info\n\n @lazyattr\n def type(self):\n \"\"\"The PyTables type of the column (a string).\"\"\"\n\n return self.descr._v_types[self.name]\n\n # Properties\n # ~~~~~~~~~~\n def _gettable(self):\n return self._table_file._get_node(self._table_path)\n\n table = property(_gettable, None, None,\n \"\"\"The parent Table instance (see\n :ref:`TableClassDescr`).\"\"\")\n\n def _getindex(self):\n indexPath = _index_pathname_of_column_(self._table_path, self.pathname)\n try:\n index = self._table_file._get_node(indexPath)\n except NodeError:\n index = None # The column is not indexed\n return index\n\n index = property(_getindex, None, None,\n \"\"\"The Index instance (see :ref:`IndexClassDescr`)\n associated with this column (None if the column is not\n indexed).\"\"\")\n\n @lazyattr\n def _itemtype(self):\n return self.descr._v_dtypes[self.name]\n\n def _getshape(self):\n return (self.table.nrows,) + self.descr._v_dtypes[self.name].shape\n\n shape = property(_getshape, None, None, \"The shape of this column.\")\n\n def _isindexed(self):\n if self.index is None:\n return False\n else:\n return True\n\n is_indexed = property(_isindexed, None, None,\n \"True if the column is indexed, false otherwise.\")\n\n maindim = property(\n lambda self: 0, None, None,\n \"\"\"\"The dimension along which iterators work. Its value is 0 (i.e. the\n first dimension).\"\"\")\n\n def __init__(self, table, name, descr):\n\n self._table_file = table._v_file\n self._table_path = table._v_pathname\n self.name = name\n \"\"\"The name of the associated column.\"\"\"\n self.pathname = descr._v_colobjects[name]._v_pathname\n \"\"\"The complete pathname of the associated column (the same as\n Column.name if the column is not inside a nested column).\"\"\"\n self.descr = descr\n \"\"\"The Description (see :ref:`DescriptionClassDescr`) instance of the\n parent table or nested column.\"\"\"\n\n def _g_update_table_location(self, table):\n \"\"\"Updates the location information about the associated `table`.\"\"\"\n\n self._table_file = table._v_file\n self._table_path = table._v_pathname\n\n _g_updateTableLocation = previous_api(_g_update_table_location)\n\n def __len__(self):\n \"\"\"Get the number of elements in the column.\n\n This matches the length in rows of the parent table.\n\n \"\"\"\n\n return self.table.nrows\n\n def __getitem__(self, key):\n \"\"\"Get a row or a range of rows from a column.\n\n If key argument is an integer, the corresponding element in the column\n is returned as an object of the current flavor. If key is a slice, the\n range of elements determined by it is returned as an array of the\n current flavor.\n\n Examples\n --------\n\n ::\n\n print(\"Column handlers:\")\n for name in table.colnames:\n print(table.cols._f_col(name))\n print(\"Select table.cols.name[1]-->\", table.cols.name[1])\n print(\"Select table.cols.name[1:2]-->\", table.cols.name[1:2])\n print(\"Select table.cols.name[:]-->\", table.cols.name[:])\n print(\"Select table.cols._f_col('name')[:]-->\",\n table.cols._f_col('name')[:])\n\n The output of this for a certain arbitrary table is::\n\n Column handlers:\n /table.cols.name (Column(), string, idx=None)\n /table.cols.lati (Column(), int32, idx=None)\n /table.cols.longi (Column(), int32, idx=None)\n /table.cols.vector (Column(2,), int32, idx=None)\n /table.cols.matrix2D (Column(2, 2), float64, idx=None)\n Select table.cols.name[1]--> Particle: 11\n Select table.cols.name[1:2]--> ['Particle: 11']\n Select table.cols.name[:]--> ['Particle: 10'\n 'Particle: 11' 'Particle: 12'\n 'Particle: 13' 'Particle: 14']\n Select table.cols._f_col('name')[:]--> ['Particle: 10'\n 'Particle: 11' 'Particle: 12'\n 'Particle: 13' 'Particle: 14']\n\n See the :file:`examples/table2.py` file for a more complete example.\n\n \"\"\"\n\n table = self.table\n\n # Generalized key support not there yet, but at least allow\n # for a tuple with one single element (the main dimension).\n # (key,) --> key\n if isinstance(key, tuple) and len(key) == 1:\n key = key[0]\n\n if is_idx(key):\n key = operator.index(key)\n\n # Index out of range protection\n if key >= table.nrows:\n raise IndexError(\"Index out of range\")\n if key < 0:\n # To support negative values\n key += table.nrows\n (start, stop, step) = table._process_range(key, key + 1, 1)\n return table.read(start, stop, step, self.pathname)[0]\n elif isinstance(key, slice):\n (start, stop, step) = table._process_range(\n key.start, key.stop, key.step)\n return table.read(start, stop, step, self.pathname)\n else:\n raise TypeError(\n \"'%s' key type is not valid in this context\" % key)\n\n def __iter__(self):\n \"\"\"Iterate through all items in the column.\"\"\"\n\n table = self.table\n itemsize = self.dtype.itemsize\n nrowsinbuf = table._v_file.params['IO_BUFFER_SIZE'] // itemsize\n buf = numpy.empty((nrowsinbuf, ), self._itemtype)\n max_row = len(self)\n for start_row in xrange(0, len(self), nrowsinbuf):\n end_row = min(start_row + nrowsinbuf, max_row)\n buf_slice = buf[0:end_row - start_row]\n table.read(start_row, end_row, 1, field=self.pathname,\n out=buf_slice)\n for row in buf_slice:\n yield row\n\n def __setitem__(self, key, value):\n \"\"\"Set a row or a range of rows in a column.\n\n If key argument is an integer, the corresponding element is set to\n value. If key is a slice, the range of elements determined by it is\n set to value.\n\n Examples\n --------\n\n ::\n\n # Modify row 1\n table.cols.col1[1] = -1\n\n # Modify rows 1 and 3\n table.cols.col1[1::2] = [2,3]\n\n Which is equivalent to::\n\n # Modify row 1\n table.modify_columns(start=1, columns=[[-1]], names=['col1'])\n\n # Modify rows 1 and 3\n columns = numpy.rec.fromarrays([[2,3]], formats='i4')\n table.modify_columns(start=1, step=2, columns=columns,\n names=['col1'])\n\n \"\"\"\n\n table = self.table\n table._v_file._check_writable()\n\n # Generalized key support not there yet, but at least allow\n # for a tuple with one single element (the main dimension).\n # (key,) --> key\n if isinstance(key, tuple) and len(key) == 1:\n key = key[0]\n\n if is_idx(key):\n key = operator.index(key)\n\n # Index out of range protection\n if key >= table.nrows:\n raise IndexError(\"Index out of range\")\n if key < 0:\n # To support negative values\n key += table.nrows\n return table.modify_column(key, key + 1, 1,\n [[value]], self.pathname)\n elif isinstance(key, slice):\n (start, stop, step) = table._process_range(\n key.start, key.stop, key.step)\n return table.modify_column(start, stop, step,\n value, self.pathname)\n else:\n raise ValueError(\"Non-valid index or slice: %s\" % key)\n\n def create_index(self, optlevel=6, kind=\"medium\", filters=None,\n tmp_dir=None, _blocksizes=None, _testmode=False,\n _verbose=False):\n \"\"\"Create an index for this column.\n\n .. warning::\n\n In some situations it is useful to get a completely sorted\n index (CSI). For those cases, it is best to use the\n :meth:`Column.create_csindex` method instead.\n\n Parameters\n ----------\n optlevel : int\n The optimization level for building the index. The levels ranges\n from 0 (no optimization) up to 9 (maximum optimization). Higher\n levels of optimization mean better chances for reducing the entropy\n of the index at the price of using more CPU, memory and I/O\n resources for creating the index.\n kind : str\n The kind of the index to be built. It can take the 'ultralight',\n 'light', 'medium' or 'full' values. Lighter kinds ('ultralight'\n and 'light') mean that the index takes less space on disk, but will\n perform queries slower. Heavier kinds ('medium' and 'full') mean\n better chances for reducing the entropy of the index (increasing\n the query speed) at the price of using more disk space as well as\n more CPU, memory and I/O resources for creating the index.\n\n Note that selecting a full kind with an optlevel of 9 (the maximum)\n guarantees the creation of an index with zero entropy, that is, a\n completely sorted index (CSI) - provided that the number of rows in\n the table does not exceed the 2**48 figure (that is more than 100\n trillions of rows). See :meth:`Column.create_csindex` method for a\n more direct way to create a CSI index.\n filters : Filters\n Specify the Filters instance used to compress the index. If None,\n default index filters will be used (currently, zlib level 1 with\n shuffling).\n tmp_dir\n When kind is other than 'ultralight', a temporary file is created\n during the index build process. You can use the tmp_dir argument\n to specify the directory for this temporary file. The default is\n to create it in the same directory as the file containing the\n original table.\n\n \"\"\"\n\n kinds = ['ultralight', 'light', 'medium', 'full']\n if kind not in kinds:\n raise ValueError(\"Kind must have any of these values: %s\" % kinds)\n if (not isinstance(optlevel, (int, long)) or\n (optlevel < 0 or optlevel > 9)):\n raise ValueError(\"Optimization level must be an integer in the \"\n \"range 0-9\")\n if filters is None:\n filters = default_index_filters\n if tmp_dir is None:\n tmp_dir = os.path.dirname(self._table_file.filename)\n else:\n if not os.path.isdir(tmp_dir):\n raise ValueError(\"Temporary directory '%s' does not exist\" %\n tmp_dir)\n if (_blocksizes is not None and\n (not isinstance(_blocksizes, tuple) or len(_blocksizes) != 4)):\n raise ValueError(\"_blocksizes must be a tuple with exactly 4 \"\n \"elements\")\n idxrows = _column__create_index(self, optlevel, kind, filters,\n tmp_dir, _blocksizes, _verbose)\n return SizeType(idxrows)\n\n createIndex = previous_api(create_index)\n\n def create_csindex(self, filters=None, tmp_dir=None,\n _blocksizes=None, _testmode=False, _verbose=False):\n \"\"\"Create a completely sorted index (CSI) for this column.\n\n This method guarantees the creation of an index with zero entropy, that\n is, a completely sorted index (CSI) -- provided that the number of rows\n in the table does not exceed the 2**48 figure (that is more than 100\n trillions of rows). A CSI index is needed for some table methods (like\n :meth:`Table.itersorted` or :meth:`Table.read_sorted`) in order to\n ensure completely sorted results.\n\n For the meaning of filters and tmp_dir arguments see\n :meth:`Column.create_index`.\n\n Notes\n -----\n This method is equivalent to\n Column.create_index(optlevel=9, kind='full', ...).\n\n \"\"\"\n\n return self.create_index(\n kind='full', optlevel=9, filters=filters, tmp_dir=tmp_dir,\n _blocksizes=_blocksizes, _testmode=_testmode, _verbose=_verbose)\n\n createCSIndex = previous_api(create_csindex)\n\n def _do_reindex(self, dirty):\n \"\"\"Common code for reindex() and reindex_dirty() codes.\"\"\"\n\n index = self.index\n dodirty = True\n if dirty and not index.dirty:\n dodirty = False\n if index is not None and dodirty:\n self._table_file._check_writable()\n # Get the old index parameters\n kind = index.kind\n optlevel = index.optlevel\n filters = index.filters\n # We *need* to tell the index that it is going to be undirty.\n # This is needed here so as to unnail() the condition cache.\n index.dirty = False\n # Delete the existing Index\n index._f_remove()\n # Create a new Index with the previous parameters\n return SizeType(self.create_index(\n kind=kind, optlevel=optlevel, filters=filters))\n else:\n return SizeType(0) # The column is not intended for indexing\n\n _doReIndex = previous_api(_do_reindex)\n\n def reindex(self):\n \"\"\"Recompute the index associated with this column.\n\n This can be useful when you suspect that, for any reason,\n the index information is no longer valid and you want to rebuild it.\n\n This method does nothing if the column is not indexed.\n\n \"\"\"\n\n self._do_reindex(dirty=False)\n\n reIndex = previous_api(reindex)\n\n def reindex_dirty(self):\n \"\"\"Recompute the associated index only if it is dirty.\n\n This can be useful when you have set :attr:`Table.autoindex` to false\n for the table and you want to update the column's index after an\n invalidating index operation (like :meth:`Table.remove_rows`).\n\n This method does nothing if the column is not indexed.\n\n \"\"\"\n\n self._do_reindex(dirty=True)\n\n reIndexDirty = previous_api(reindex_dirty)\n\n def remove_index(self):\n \"\"\"Remove the index associated with this column.\n\n This method does nothing if the column is not indexed. The removed\n index can be created again by calling the :meth:`Column.create_index`\n method.\n\n \"\"\"\n\n self._table_file._check_writable()\n\n # Remove the index if existing.\n if self.is_indexed:\n index = self.index\n index._f_remove()\n self.table._set_column_indexing(self.pathname, False)\n\n removeIndex = previous_api(remove_index)\n\n def close(self):\n \"\"\"Close this column.\"\"\"\n\n self.__dict__.clear()\n\n def __str__(self):\n \"\"\"The string representation for this object.\"\"\"\n\n # The pathname\n tablepathname = self._table_path\n pathname = self.pathname.replace('/', '.')\n # Get this class name\n classname = self.__class__.__name__\n # The shape for this column\n shape = self.shape\n # The type\n tcol = self.descr._v_types[self.name]\n return \"%s.cols.%s (%s%s, %s, idx=%s)\" % \\\n (tablepathname, pathname, classname, shape, tcol, self.index)\n\n def __repr__(self):\n \"\"\"A detailed string representation for this object.\"\"\"\n\n return str(self)\n\n\n## Local Variables:\n## mode: python\n## py-indent-offset: 4\n## tab-width: 4\n## fill-column: 72\n## End:\n",
"\"\"\" A set of NumPy functions to apply per chunk \"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nfrom collections import Container, Iterable, Sequence\nfrom functools import wraps\n\nfrom toolz import concat\nimport numpy as np\n\nfrom ..compatibility import builtins, getargspec\nfrom ..utils import ignoring\n\n\ndef keepdims_wrapper(a_callable):\n \"\"\"\n A wrapper for functions that don't provide keepdims to ensure that they do.\n \"\"\"\n\n if \"keepdims\" in getargspec(a_callable).args:\n return a_callable\n\n @wraps(a_callable)\n def keepdims_wrapped_callable(x, axis=None, keepdims=None, *args, **kwargs):\n r = a_callable(x, axis=axis, *args, **kwargs)\n\n if not keepdims:\n return r\n\n axes = axis\n\n if axes is None:\n axes = range(x.ndim)\n\n if not isinstance(axes, (Container, Iterable, Sequence)):\n axes = [axes]\n\n r_slice = tuple()\n for each_axis in range(x.ndim):\n if each_axis in axes:\n r_slice += (None,)\n else:\n r_slice += (slice(None),)\n\n r = r[r_slice]\n\n return r\n\n return keepdims_wrapped_callable\n\n\n# Wrap NumPy functions to ensure they provide keepdims.\nsum = keepdims_wrapper(np.sum)\nprod = keepdims_wrapper(np.prod)\nmin = keepdims_wrapper(np.min)\nmax = keepdims_wrapper(np.max)\nargmin = keepdims_wrapper(np.argmin)\nnanargmin = keepdims_wrapper(np.nanargmin)\nargmax = keepdims_wrapper(np.argmax)\nnanargmax = keepdims_wrapper(np.nanargmax)\nany = keepdims_wrapper(np.any)\nall = keepdims_wrapper(np.all)\nnansum = keepdims_wrapper(np.nansum)\n\nwith ignoring(AttributeError):\n nanprod = keepdims_wrapper(np.nanprod)\n\nnanmin = keepdims_wrapper(np.nanmin)\nnanmax = keepdims_wrapper(np.nanmax)\nmean = keepdims_wrapper(np.mean)\n\nwith ignoring(AttributeError):\n nanmean = keepdims_wrapper(np.nanmean)\n\nvar = keepdims_wrapper(np.var)\n\nwith ignoring(AttributeError):\n nanvar = keepdims_wrapper(np.nanvar)\n\nstd = keepdims_wrapper(np.std)\n\nwith ignoring(AttributeError):\n nanstd = keepdims_wrapper(np.nanstd)\n\n\ndef coarsen(reduction, x, axes, trim_excess=False):\n \"\"\" Coarsen array by applying reduction to fixed size neighborhoods\n\n Parameters\n ----------\n\n reduction: function\n Function like np.sum, np.mean, etc...\n x: np.ndarray\n Array to be coarsened\n axes: dict\n Mapping of axis to coarsening factor\n\n Examples\n --------\n\n >>> x = np.array([1, 2, 3, 4, 5, 6])\n >>> coarsen(np.sum, x, {0: 2})\n array([ 3, 7, 11])\n >>> coarsen(np.max, x, {0: 3})\n array([3, 6])\n\n Provide dictionary of scale per dimension\n\n >>> x = np.arange(24).reshape((4, 6))\n >>> x\n array([[ 0, 1, 2, 3, 4, 5],\n [ 6, 7, 8, 9, 10, 11],\n [12, 13, 14, 15, 16, 17],\n [18, 19, 20, 21, 22, 23]])\n\n >>> coarsen(np.min, x, {0: 2, 1: 3})\n array([[ 0, 3],\n [12, 15]])\n\n You must avoid excess elements explicitly\n >>> x = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n >>> coarsen(np.min, x, {0: 3}, trim_excess=True)\n array([1, 4])\n \"\"\"\n # Insert singleton dimensions if they don't exist already\n for i in range(x.ndim):\n if i not in axes:\n axes[i] = 1\n\n if trim_excess:\n ind = tuple(slice(0, -(d % axes[i]))\n if d % axes[i] else\n slice(None, None) for i, d in enumerate(x.shape))\n x = x[ind]\n\n # (10, 10) -> (5, 2, 5, 2)\n newshape = tuple(concat([(x.shape[i] / axes[i], axes[i])\n for i in range(x.ndim)]))\n\n\n return reduction(x.reshape(newshape), axis=tuple(range(1, x.ndim*2, 2)))\n\n\ndef trim(x, axes=None):\n \"\"\" Trim boundaries off of array\n\n >>> x = np.arange(24).reshape((4, 6))\n >>> trim(x, axes={0: 0, 1: 1})\n array([[ 1, 2, 3, 4],\n [ 7, 8, 9, 10],\n [13, 14, 15, 16],\n [19, 20, 21, 22]])\n\n >>> trim(x, axes={0: 1, 1: 1})\n array([[ 7, 8, 9, 10],\n [13, 14, 15, 16]])\n \"\"\"\n if isinstance(axes, int):\n axes = [axes] * x.ndim\n if isinstance(axes, dict):\n axes = [axes.get(i, 0) for i in range(x.ndim)]\n\n return x[tuple(slice(ax, -ax if ax else None) for ax in axes)]\n\n\ntry:\n from numpy import broadcast_to\nexcept ImportError: # pragma: no cover\n # broadcast_to will arrive in numpy v1.10. Until then, it is duplicated\n # here.\n\n # See https://github.com/numpy/numpy/blob/master/LICENSE.txt\n # or NUMPY_LICENSE.txt within this directory\n def _maybe_view_as_subclass(original_array, new_array):\n if type(original_array) is not type(new_array):\n # if input was an ndarray subclass and subclasses were OK,\n # then view the result as that subclass.\n new_array = new_array.view(type=type(original_array))\n # Since we have done something akin to a view from original_array, we\n # should let the subclass finalize (if it has it implemented, i.e., is\n # not None).\n if new_array.__array_finalize__:\n new_array.__array_finalize__(original_array)\n return new_array\n\n\n def _broadcast_to(array, shape, subok, readonly):\n shape = tuple(shape) if np.iterable(shape) else (shape,)\n array = np.array(array, copy=False, subok=subok)\n if not shape and array.shape:\n raise ValueError('cannot broadcast a non-scalar to a scalar array')\n if builtins.any(size < 0 for size in shape):\n raise ValueError('all elements of broadcast shape must be non-'\n 'negative')\n broadcast = np.nditer(\n (array,), flags=['multi_index', 'zerosize_ok', 'refs_ok'],\n op_flags=['readonly'], itershape=shape, order='C').itviews[0]\n result = _maybe_view_as_subclass(array, broadcast)\n if not readonly and array.flags.writeable:\n result.flags.writeable = True\n return result\n\n\n def broadcast_to(array, shape, subok=False):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n array : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned array will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcast : array\n A readonly view on the original array with the given shape. It is\n typically not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location.\n\n Raises\n ------\n ValueError\n If the array is not compatible with the new shape according to NumPy's\n broadcasting rules.\n\n Examples\n --------\n >>> x = np.array([1, 2, 3])\n >>> np.broadcast_to(x, (3, 3)) # doctest: +SKIP\n array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]])\n \"\"\"\n return _broadcast_to(array, shape, subok=subok, readonly=True)\n\n\ndef topk(k, x):\n \"\"\" Top k elements of an array\n\n >>> topk(2, np.array([5, 1, 3, 6]))\n array([6, 5])\n \"\"\"\n # http://stackoverflow.com/a/23734295/616616 by larsmans\n k = np.minimum(k, len(x))\n ind = np.argpartition(x, -k)[-k:]\n return np.sort(x[ind])[::-1]\n",
"# You must first run \"bokeh serve\" to view this example\n#\n# Example inspired by:\n#\n# https://www.youtube.com/watch?v=LznjC4Lo7lE\n\nfrom __future__ import division\n\nfrom collections import OrderedDict\nfrom math import pi\n\nimport numpy as np\n\nfrom bokeh.client import push_session\nfrom bokeh.driving import repeat\nfrom bokeh.io import vplot\nfrom bokeh.models.sources import ColumnDataSource as CDS\nfrom bokeh.plotting import figure, curdoc\n\nN = 100\nnewx = x = np.linspace(0, 2*pi, N)\nshift = 2.2\nbase_x = x + shift\n\nperiod = pi/2\npalette = ['#08519c', '#3182bd', '#6baed6', '#bdd7e7']\n\ndef new_source():\n return dict(curve=CDS(), lines=CDS(), circle_point=CDS(), circleds=CDS())\n\ndef create_circle_glyphs(p, color, sources):\n p.circle('x', 'y', size=1., line_color=color, color=None, source=sources['circleds'])\n p.circle('x', 'y', size=5, line_color=color, color=color, source=sources['circle_point'])\n p.line('radius_x', 'radius_y', line_color=color, color=color, alpha=0.5, source=sources['lines'])\n\ndef create_plot(foos, title='', r = 1, y_range=None, period = pi/2, cfoos=None):\n if y_range is None:\n y_range=[-2, 2]\n\n # create new figure\n p = figure(title=title, width=800, height=300, x_range=[-2.5, 9], y_range=y_range)\n p.xgrid.bounds = (-2, 2)\n p.xaxis.bounds = (-2, 2)\n\n _sources = []\n cx, cy = 0, 0\n for i, foo in enumerate(foos):\n sources = new_source()\n get_new_sources(x, foo, sources, cfoos[i], cx, cy, i==0)\n cp = sources['circle_point'].data\n cx, cy = cp['x'][0], cp['y'][0]\n\n if i==0:\n # compute the full fourier eq\n full_y = sum([foo(x) for foo in foos])\n # replace the foo curve with the full fourier eq\n sources['curve'] = CDS(dict(x=x, base_x=base_x, y=full_y))\n # draw the line\n p.line('base_x','y', color=\"orange\", line_width=2, source=sources['curve'],\n legend=\"4sin(x)/pi + 4sin(3x)/3pi + 4sin(5x)/5pi + 4sin(7x)/7pi\")\n\n if i==len(foos)-1:\n # if it's the last foo let's draw a circle on the head of the curve\n sources['floating_point'] = CDS({'x':[shift], 'y': [cy]})\n p.line('line_x', 'line_y', color=\"palette[i]\", line_width=2, source=sources['lines'])\n p.circle('x', 'y', size=10, line_color=palette[i], color=palette[i], source=sources['floating_point'])\n\n # draw the circle, radius and circle point realted to foo domain\n create_circle_glyphs(p, palette[i], sources)\n _sources.append(sources)\n\n return p, _sources\n\n\ndef get_new_sources(xs, foo, sources, cfoo, cx=0, cy=0, compute_curve = True):\n if compute_curve:\n ys = foo(xs)\n sources['curve'].data = dict(x=xs, base_x=base_x, y=ys)\n\n r = foo(period)\n y = foo(xs[0]) + cy\n x = cfoo(xs[0]) + cx\n\n sources['lines'].data = {\n 'line_x': [x, shift], 'line_y': [y, y],\n 'radius_x': [0, x], 'radius_y': [0, y]\n }\n sources['circle_point'].data = {'x': [x], 'y': [y], 'r': [r]}\n sources['circleds'].data=dict(\n x = cx + np.cos(np.linspace(0, 2*pi, N)) * r,\n y = cy + np.sin(np.linspace(0, 2*pi, N)) * r,\n )\n\ndef update_sources(sources, foos, newx, ind, cfoos):\n cx, cy = 0, 0\n\n for i, foo in enumerate(foos):\n get_new_sources(newx, foo, sources[i], cfoos[i], cx, cy,\n compute_curve = i != 0)\n\n if i == 0:\n full_y = sum([foo(newx) for foo in foos])\n sources[i]['curve'].data = dict(x=newx, base_x=base_x, y=full_y)\n\n cp = sources[i]['circle_point'].data\n cx, cy = cp['x'][0], cp['y'][0]\n\n if i == len(foos)-1:\n sources[i]['floating_point'].data['x'] = [shift]\n sources[i]['floating_point'].data['y'] = [cy]\n\ndef update_centric_sources(sources, foos, newx, ind, cfoos):\n for i, foo in enumerate(foos):\n get_new_sources(newx, foo, sources[i], cfoos[i])\n\ndef create_centric_plot(foos, title='', r = 1, y_range=(-2, 2), period = pi/2, cfoos=None):\n p = figure(title=title, width=800, height=300, x_range=[-1.5, 10.5], y_range=y_range)\n p.xgrid.bounds = (-2, 2)\n p.xaxis.bounds = (-2, 2)\n\n _sources = []\n for i, foo in enumerate(foos):\n sources = new_source()\n get_new_sources(x, foo, sources, cfoos[i])\n _sources.append(sources)\n\n if i:\n legend = \"4sin(%(c)sx)/%(c)spi\" % {'c': i*2+1}\n else:\n legend = \"4sin(x)/pi\"\n\n p.line('base_x','y', color=palette[i], line_width=2, source=sources['curve'])\n p.line('line_x', 'line_y', color=palette[i], line_width=2,\n source=sources['lines'], legend=legend)\n\n create_circle_glyphs(p, palette[i], sources)\n\n return p, _sources\n\n# create the series partials\nf1 = lambda x: (4*np.sin(x))/pi\nf2 = lambda x: (4*np.sin(3*x))/(3*pi)\nf3 = lambda x: (4*np.sin(5*x))/(5*pi)\nf4 = lambda x: (4*np.sin(7*x))/(7*pi)\ncf1 = lambda x: (4*np.cos(x))/pi\ncf2 = lambda x: (4*np.cos(3*x))/(3*pi)\ncf3 = lambda x: (4*np.cos(5*x))/(5*pi)\ncf4 = lambda x: (4*np.cos(7*x))/(7*pi)\nfourier = OrderedDict(\n fourier_4 = {\n 'f': lambda x: f1(x) + f2(x) + f3(x) + f4(x),\n 'fs': [f1, f2, f3, f4],\n 'cfs': [cf1, cf2, cf3, cf4]\n },\n)\n\nfor k, p in fourier.items():\n p['plot'], p['sources'] = create_plot(\n p['fs'], 'Fourier (Sum of the first 4 Harmonic Circles)', r = p['f'](period), cfoos = p['cfs']\n )\n\nfor k, p in fourier.items():\n p['cplot'], p['csources'] = create_centric_plot(\n p['fs'], 'Fourier First 4 Harmonics & Harmonic Circles', r = p['f'](period), cfoos = p['cfs']\n )\n\nlayout = vplot(*[f['plot'] for f in fourier.values()] + [f['cplot'] for f in fourier.values()])\n\n# open a session to keep our local document in sync with server\nsession = push_session(curdoc())\n\n@repeat(range(N))\ndef cb(gind):\n global newx\n oldx = np.delete(newx, 0)\n newx = np.hstack([oldx, [oldx[-1] + 2*pi/N]])\n\n for k, p in fourier.items():\n update_sources(p['sources'], p['fs'], newx, gind, p['cfs'])\n update_centric_sources(p['csources'], p['fs'], newx, gind, p['cfs'])\n\ncurdoc().add_periodic_callback(cb, 100)\n\nsession.show(layout) # open the document in a browser\n\nsession.loop_until_closed() # run forever\n",
"from __future__ import absolute_import, division, print_function\n\nimport pytest\npyspark = pytest.importorskip('pyspark')\n\nimport pandas as pd\nfrom blaze import compute, symbol, summary, exp, by, join, merge\nfrom toolz import identity\n\n\ndata = [['Alice', 100, 1],\n ['Bob', 200, 2],\n ['Alice', 50, 3]]\n\ndata2 = [['Alice', 'Austin'],\n ['Bob', 'Boston']]\n\ndf = pd.DataFrame(data, columns=['name', 'amount', 'id'])\n\n\n# this only exists because we need to have a single session scoped spark\n# context, otherwise these would simply be global variables\[email protected]\ndef rdd(sc):\n return sc.parallelize(data)\n\n\[email protected]\ndef rdd2(sc):\n return sc.parallelize(data2)\n\n\nt = symbol('t', 'var * {name: string, amount: int, id: int}')\n\nt2 = symbol('t2', 'var * {name: string, city: string}')\n\n# Web Commons Graph Example data\ndata_idx = [['A', 1],\n ['B', 2],\n ['C', 3]]\n\ndata_arc = [[1, 3],\n [2, 3],\n [3, 1]]\n\nt_idx = symbol('idx', 'var * {name: string, node_id: int32}')\n\nt_arc = symbol('arc', 'var * {node_out: int32, node_id: int32}')\n\n\ndef test_symbol(rdd):\n assert compute(t, rdd) == rdd\n\n\ndef test_projection(rdd):\n assert compute(t['name'], rdd).collect() == [row[0] for row in data]\n\n\ndef test_multicols_projection(rdd):\n result = compute(t[['amount', 'name']], rdd).collect()\n expected = [(100, 'Alice'), (200, 'Bob'), (50, 'Alice')]\n\n print(result)\n print(expected)\n\n assert result == expected\n\n\nreduction_exprs = [\n t['amount'].sum(),\n t['amount'].min(),\n t['amount'].max(),\n t['amount'].nunique(),\n t['name'].nunique(),\n t['amount'].count(),\n (t['amount'] > 150).any(),\n (t['amount'] > 150).all(),\n t['amount'].mean(),\n t['amount'].var(),\n summary(a=t.amount.sum(), b=t.id.count()),\n t['amount'].std()\n]\n\n\ndef test_reductions(rdd):\n for expr in reduction_exprs:\n result = compute(expr, rdd)\n expected = compute(expr, data)\n if not result == expected:\n print(result)\n print(expected)\n if isinstance(result, float):\n assert abs(result - expected) < 0.001\n else:\n assert result == expected\n\nexprs = [\n t['amount'],\n t['amount'] == 100,\n t['amount'].truncate(150),\n t[t['name'] == 'Alice'],\n t[t['amount'] == 0],\n t[t['amount'] > 150],\n t['amount'] + t['id'],\n t['amount'] % t['id'],\n exp(t['amount']),\n by(t['name'], total=t['amount'].sum()),\n by(t['name'], total=(t['amount'] + 1).sum()),\n (t['amount'] * 1).label('foo'),\n t.map(lambda tup: tup[1] + tup[2], 'real'),\n t[t.name.like('Alice')],\n t['amount'].apply(identity, 'var * real', splittable=True),\n t['amount'].map(lambda x: x + 1, 'int')\n]\n\nexprs = list(zip(map(str, exprs), exprs))\n\n\ndef tuplify(x):\n return tuple(x) if isinstance(x, list) else x\n\n\[email protected](['string', 'expr'], exprs)\ndef test_basic(rdd, string, expr):\n result = set(map(tuplify, compute(expr, rdd).collect()))\n expected = set(map(tuplify, compute(expr, data)))\n assert result == expected\n\n\ntbig = symbol(\n 'tbig', 'var * {name: string, sex: string[1], amount: int, id: int}')\n\nbig_exprs = [\n by(tbig[['name', 'sex']], total=tbig['amount'].sum()),\n by(tbig[['name', 'sex']], total=(tbig['id'] + tbig['amount']).sum())]\n\n\[email protected]('expr', big_exprs)\ndef test_big_by(sc, expr):\n data = [['Alice', 'F', 100, 1],\n ['Alice', 'F', 100, 3],\n ['Drew', 'F', 100, 4],\n ['Drew', 'M', 100, 5],\n ['Drew', 'M', 200, 5]]\n rdd = sc.parallelize(data)\n result = set(map(tuplify, compute(expr, rdd).collect()))\n expected = set(map(tuplify, compute(expr, data)))\n assert result == expected\n\n\ndef test_head(rdd):\n assert list(compute(t.head(1), rdd)) == list(compute(t.head(1), data))\n\n\nsort_exprs = [\n t.sort('amount'),\n t.sort('amount', ascending=True),\n t.sort(t['amount'], ascending=True),\n t.sort(-t['amount'].label('foo') + 1, ascending=True),\n t.sort(['amount', 'id'])\n]\n\n\[email protected]('expr', sort_exprs)\ndef test_sort(rdd, expr):\n result = compute(expr, rdd).collect()\n expected = list(compute(expr, data))\n assert result == expected\n\n\ndef test_distinct(rdd):\n assert set(compute(t['name'].distinct(), rdd).collect()) == \\\n set(['Alice', 'Bob'])\n\n\[email protected](\n raises=NotImplementedError,\n reason='cannot specify columns to distinct on yet',\n)\ndef test_distinct_on(rdd):\n compute(t.distinct('name'), rdd)\n\n\ndef test_join(rdd, rdd2):\n\n joined = join(t, t2, 'name')\n expected = [('Alice', 100, 1, 'Austin'),\n ('Bob', 200, 2, 'Boston'),\n ('Alice', 50, 3, 'Austin')]\n result = compute(joined, {t: rdd, t2: rdd2}).collect()\n assert all(i in expected for i in result)\n\n\ndef test_multi_column_join(sc):\n left = [(1, 2, 3),\n (2, 3, 4),\n (1, 3, 5)]\n right = [(1, 2, 30),\n (1, 3, 50),\n (1, 3, 150)]\n rleft = sc.parallelize(left)\n rright = sc.parallelize(right)\n\n L = symbol('L', 'var * {x: int, y: int, z: int}')\n R = symbol('R', 'var * {x: int, y: int, w: int}')\n\n j = join(L, R, ['x', 'y'])\n\n result = compute(j, {L: rleft, R: rright})\n expected = [(1, 2, 3, 30),\n (1, 3, 5, 50),\n (1, 3, 5, 150)]\n\n assert set(result.collect()) == set(expected)\n\n\ndef test_groupby(sc):\n rddidx = sc.parallelize(data_idx)\n rddarc = sc.parallelize(data_arc)\n\n joined = join(t_arc, t_idx, \"node_id\")\n\n t = by(joined['name'], count=joined['node_id'].count())\n a = compute(t, {t_arc: rddarc, t_idx: rddidx})\n in_degree = dict(a.collect())\n assert in_degree == {'A': 1, 'C': 2}\n\n\ndef test_multi_level_rowfunc_works(rdd):\n expr = t['amount'].map(lambda x: x + 1, 'int')\n\n assert compute(expr, rdd).collect() == [x[1] + 1 for x in data]\n\n\ndef test_merge(rdd):\n col = (t['amount'] * 2).label('new')\n expr = merge(t['name'], col)\n\n assert compute(expr, rdd).collect() == [\n (row[0], row[1] * 2) for row in data]\n\n\ndef test_selection_out_of_order(rdd):\n expr = t['name'][t['amount'] < 100]\n\n assert compute(expr, rdd).collect() == ['Alice']\n\n\ndef test_recursive_rowfunc_is_used(rdd):\n expr = by(t['name'], total=(2 * (t['amount'] + t['id'])).sum())\n expected = [('Alice', 2 * (101 + 53)),\n ('Bob', 2 * (202))]\n assert set(compute(expr, rdd).collect()) == set(expected)\n\n\ndef test_outer_join(sc):\n left = [(1, 'Alice', 100),\n (2, 'Bob', 200),\n (4, 'Dennis', 400)]\n left = sc.parallelize(left)\n right = [('NYC', 1),\n ('Boston', 1),\n ('LA', 3),\n ('Moscow', 4)]\n right = sc.parallelize(right)\n\n L = symbol('L', 'var * {id: int, name: string, amount: real}')\n R = symbol('R', 'var * {city: string, id: int}')\n\n assert set(compute(join(L, R), {L: left, R: right}).collect()) == set(\n [(1, 'Alice', 100, 'NYC'),\n (1, 'Alice', 100, 'Boston'),\n (4, 'Dennis', 400, 'Moscow')])\n\n assert set(compute(join(L, R, how='left'), {L: left, R: right}).collect()) == set(\n [(1, 'Alice', 100, 'NYC'),\n (1, 'Alice', 100, 'Boston'),\n (2, 'Bob', 200, None),\n (4, 'Dennis', 400, 'Moscow')])\n\n assert set(compute(join(L, R, how='right'), {L: left, R: right}).collect()) == set(\n [(1, 'Alice', 100, 'NYC'),\n (1, 'Alice', 100, 'Boston'),\n (3, None, None, 'LA'),\n (4, 'Dennis', 400, 'Moscow')])\n\n # Full outer join not yet supported\n assert set(compute(join(L, R, how='outer'), {L: left, R: right}).collect()) == set(\n [(1, 'Alice', 100, 'NYC'),\n (1, 'Alice', 100, 'Boston'),\n (2, 'Bob', 200, None),\n (3, None, None, 'LA'),\n (4, 'Dennis', 400, 'Moscow')])\n"
] | [
[
"numpy.dot",
"numpy.asarray",
"numpy.cumsum",
"scipy.optimize.fmin",
"numpy.where",
"numpy.arange",
"numpy.lexsort",
"numpy.copy",
"numpy.zeros",
"scipy.stats.chi2.ppf",
"numpy.log",
"numpy.int_",
"numpy.cumprod",
"numpy.delete",
"numpy.sum",
"numpy.abs",
"numpy.shape",
"scipy.optimize.brentq",
"scipy.stats.chi2.sf"
],
[
"numpy.cos",
"numpy.sin"
],
[
"numpy.hstack",
"numpy.logical_not",
"numpy.abs",
"numpy.logical_and",
"numpy.asarray",
"numpy.atleast_1d",
"numpy.all",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.vstack"
],
[
"numpy.allclose",
"numpy.uint32",
"numpy.linspace",
"numpy.arange",
"numpy.vectorize"
],
[
"numpy.asscalar",
"numpy.asarray",
"numpy.isnan",
"numpy.copy",
"numpy.isscalar",
"numpy.ravel"
],
[
"numpy.array"
],
[
"scipy.optimize.fmin_cg",
"numpy.abs",
"scipy.optimize.fmin_l_bfgs_b",
"numpy.asarray",
"scipy.optimize.fmin_bfgs",
"numpy.linalg.inv",
"scipy.optimize.fmin_ncg",
"numpy.all",
"scipy.optimize.fmin",
"scipy.optimize.fmin_powell",
"numpy.diag_indices",
"scipy.optimize.basinhopping"
],
[
"numpy.random.rand",
"numpy.random.seed"
],
[
"numpy.arange",
"numpy.zeros",
"numpy.ones"
],
[
"numpy.abs",
"numpy.asarray",
"numpy.arange",
"numpy.rec.fromarrays",
"numpy.dtype",
"numpy.sort",
"numpy.rec.array",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.nditer",
"numpy.sort",
"numpy.argpartition",
"numpy.iterable",
"numpy.array"
],
[
"numpy.hstack",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.delete"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mberaha/ProjectedWasserstein | [
"20d19fc49f20124762eb497031cba0918b5eaadb"
] | [
"pwass/regression/simplicial.py"
] | [
"import numpy as np\nfrom sklearn.base import BaseEstimator\n\nfrom pwass.spline import SplineBasis\nfrom pwass.distributions import Distribution\n\n\nclass SimpliciadDistribOnDistrib(BaseEstimator):\n def __init__(self, fit_intercept=True, nbasis=-1, spline_basis=None,\n compute_spline=True):\n self.fit_intercept = fit_intercept\n self.nbasis = nbasis\n self.spline_basis = spline_basis\n self.compute_spline = compute_spline\n\n def _initialize(self, X):\n if self.spline_basis is None:\n self.spline_basis = SplineBasis(\n 2, nbasis=self.nbasis, xgrid=X[0].pdf_grid)\n else:\n self.nbasis = self.spline_basis.nbasis\n\n self.spline_basis.eval_metric()\n\n def fit(self, X, Y):\n self._initialize(X)\n self.n_samples = len(X)\n self.X = X\n self.Y = Y\n\n if self.compute_spline:\n for x in self.X:\n x.xbasis = self.spline_basis\n x.compute_spline_expansions()\n\n for y in self.Y:\n y.xbasis = self.spline_basis\n y.compute_spline_expansions()\n\n self.Xmat = self.get_spline_mat(self.X)\n self.Ymat = self.get_spline_mat(self.Y)\n if self.fit_intercept:\n self.Xmat = np.hstack(\n [np.ones(self.n_samples).reshape(-1, 1), self.Xmat])\n\n self.beta = np.linalg.solve(\n np.matmul(self.Xmat.T, self.Xmat),\n np.matmul(self.Xmat.T, self.Ymat))\n\n def predict(self, Xnew):\n if self.compute_spline:\n for x in Xnew:\n x.xbasis = self.spline_basis\n x.compute_spline_expansions()\n\n Xmat = self.get_spline_mat(Xnew)\n Ypred = np.zeros_like(Xmat)\n\n if self.fit_intercept:\n Xmat = np.hstack(\n [np.ones(Xmat.shape[0]).reshape(-1, 1), Xmat])\n\n out = []\n for i in range(Xmat.shape[0]):\n y_ = np.matmul(Xmat[i, :], self.beta)\n curr = Distribution(smooth_sigma=Xnew[0].smooth_sigma)\n curr.init_from_clr(\n self.spline_basis.xgrid, self.spline_basis.eval_spline(y_))\n out.append(curr)\n\n return out\n\n def get_spline_mat(self, distribs):\n \"\"\"Stacks all the coefficient of the spline expansions by row\n \"\"\"\n out = np.zeros((len(distribs), self.nbasis))\n for i, d in enumerate(distribs):\n out[i, :] = d.clr_coeffs\n\n eps = np.ones(out.shape[1]) * 1e-6\n for i in range(out.shape[1]):\n out[:, i] += np.sum(eps[:i])\n\n return out\n"
] | [
[
"numpy.zeros_like",
"numpy.sum",
"numpy.matmul",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ursinus-IDS301-S2020/Week9Class | [
"c8173f9f793fedb1cee6e71d272282766861a8eb",
"c8173f9f793fedb1cee6e71d272282766861a8eb"
] | [
"ClassMDS.py",
"Tempogram.py"
] | [
"\"\"\"\nPurpose: To show how to use \"Multidimensional Scaling\" (MDS)\nto find a set of coordinates in 2D that best respect a matrix.\n\nIn this particular example, students gave a distance matrix\nwhere they expressed how similar they thought different majors \nwere to each other. This code loops through all student\nsubmissions and plots the results of MDS so we can see\nspatially where the students place these majors in relation\nto each other\n\"\"\"\nfrom sklearn.manifold import MDS\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import pairwise_distances\nimport glob\n\ndef draw_distance_matrix(D, labels, vmax = None):\n \"\"\"\n Plot a distance matrix with labels in each element\n Parameters\n ----------\n D: narray(N, N)\n A distance matrix\n labels: list (N)\n A list of strings to label each row\n vmax: float\n The max distance to which to scale the plots\n (by default, just the max distance of the matrix,\n but this can be used to make sure colorbars are\n consistent across plots)\n \"\"\"\n if not vmax:\n vmax = np.max(D)\n N = D.shape[0]\n plt.imshow(D, interpolation='none', cmap='magma_r', vmin=0, vmax=vmax)\n plt.colorbar()\n for i in range(N):\n for j in range(N):\n plt.text(j-0.4, i, \"%.2g\"%D[i, j], c='white')\n plt.xticks(np.arange(N), labels, rotation=90)\n plt.yticks(np.arange(N), labels)\n plt.ylim([N, -1])\n\n\nlabels = [\"Art History\", \"English\", \"Math\", \"CS\", \"Physics\", \"Philosophy\", \"Politics\"]\n\n# The \"glob\" library can be used to list all of the files\n# in a directory that match a specified pattern. In this\n# case, we want all of the csv files in the ratings directory\nfiles = glob.glob(\"Ratings/*.csv\")\n\n# Loop through each student's ratings\nfor f in files:\n # Load in the rating that a particular student gave\n D = np.loadtxt(f, delimiter=',')\n # Use the filename to figure out the student's name\n student = f.split(\"/\")[-1][0:-4]\n # Just in case the student didn't make a symmetric matrix\n # (where Dij = Dji), make it symmetric now by averaging\n # all pairs Dij and Dji\n D = 0.5*(D+D.T)\n \n # Compute multidimensional scaling to find coordinates\n # in 2D that best respect the desired distances\n embedding = MDS(n_components=2, dissimilarity='precomputed')\n X = embedding.fit_transform(D)\n # Compute the distances of the points after MDS\n # so we can compare how close they are to the spec\n DMDS = pairwise_distances(X)\n\n # Plot the results\n plt.figure(figsize=(16, 5))\n plt.subplot(131)\n draw_distance_matrix(D, labels, vmax=1)\n plt.title(\"%s's Original Distances\"%student)\n\n plt.subplot(132)\n draw_distance_matrix(DMDS, labels, vmax=1)\n plt.title(\"MDS distances Distances\")\n\n plt.subplot(133)\n plt.scatter(X[:, 0], X[:, 1])\n for i, label in enumerate(labels):\n plt.text(X[i, 0], X[i, 1], label)\n plt.title(\"MDS Coordinates\")\n plt.tight_layout()\n plt.show()",
"\"\"\"\nPurpose: To use the librosa library to \"vectorize\" audio into features.\nThe audio is split up into little snippets that are arranged in order\nin time. Each snippet is summarized by 384 dimensions of a \"tempogram,\" \nwhich is a structure designed to pick up on timbral information \n(e.g. instrumentation / \"feel\") of the audio\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.decomposition import PCA\nimport librosa\n\ny, sr = librosa.load(\"MJ.mp3\")\nhop_length = 512\n\noenv = librosa.onset.onset_strength(y=y, sr=sr,hop_length=hop_length, max_size=3)\nX = librosa.feature.tempogram(onset_envelope=oenv, sr=sr, hop_length=hop_length).T\nprint(X.shape)\n# Compute the times at which each snippet occurs in the audio\ntimes = np.arange(X.shape[0])*hop_length/sr\n\n# Compute the pairwise distances in 384 dimensional space\n# between all snippets\nD = pairwise_distances(X)\nplt.imshow(D, cmap='magma_r', extent=(times[0], times[-1], times[-1], times[0]))\nplt.xlabel(\"Time (Seconds)\")\nplt.ylabel(\"Time (Seconds)\")\nplt.colorbar()\nplt.title(\"Distance matrix for audio snippets\")\nplt.show()\n\n# Use PCA to reduce the dimension and flatten the \n# data to 2D for visualization. When we plot it, we\n# can see the verse and chorus reside in different\n# parts of the space, and it switches between the\n# two at around 13 seconds\npca = PCA(n_components=2)\nY = pca.fit_transform(X)\n\nplt.figure()\nplt.scatter(Y[:, 0], Y[:, 1], c=times, cmap='magma_r')\nplt.colorbar()\nplt.title(\"PCA for Audio Snippets (Colored by Time)\")\nplt.show()\n"
] | [
[
"sklearn.metrics.pairwise_distances",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"numpy.arange",
"sklearn.manifold.MDS",
"matplotlib.pyplot.colorbar",
"numpy.max",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.text",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
],
[
"sklearn.metrics.pairwise_distances",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HiramHerrera/desisim | [
"3ae76e4c921f72b71ff7522462740e904136f428",
"3ae76e4c921f72b71ff7522462740e904136f428"
] | [
"etc/sim_quercus/mpi_newexp_random.py",
"py/desisim/test/test_quickcat.py"
] | [
"\nfrom mpi4py import MPI\n\nimport sys\nimport os\nimport argparse\nimport traceback\n\nimport numpy as np\n\nfrom desispec.util import option_list\n\nfrom desispec.parallel import stdouterr_redirected\n\nfrom desisim import obs\n\nimport desisim.scripts.newexp_random as newexp\n\n\nflavors = ['arc', 'arc', 'arc', \n 'flat', 'flat', 'flat', \n 'dark', 'gray', 'bright'\n ]\n\nnights = [\n '20191001',\n '20191002',\n '20191003',\n '20191004',\n '20191005'\n]\n\nntask = len(flavors) * len(nights)\n\ncomm = MPI.COMM_WORLD\nif comm.size < ntask:\n if comm.rank == 0:\n print(\"Communicator size ({}) too small for {} tasks\".format(comm.size, ntask), flush=True)\n comm.Abort()\n\nnp.random.seed(123456)\nseeds = np.random.randint(2**32, size=ntask)\n\nexpids = None\nif comm.rank == 0:\n expids = obs.get_next_expid(ntask)\nexpids = comm.bcast(expids, root=0)\n\ntileids = list()\nif comm.rank == 0:\n for nt in nights:\n for fl in flavors:\n flavor = fl.lower()\n t = obs.get_next_tileid(program=flavor)\n tileids.append(t)\n if flavor in ('arc', 'flat'):\n obs.update_obslog(obstype=flavor, program='calib', tileid=t)\n elif flavor in ('bright', 'bgs', 'mws'):\n obs.update_obslog(obstype='science', program='bright', tileid=t)\n elif flavor in ('gray', 'grey'):\n obs.update_obslog(obstype='science', program='gray', tileid=t)\n else:\n obs.update_obslog(obstype='science', program='dark', tileid=t)\n\ntileids = comm.bcast(tileids, root=0)\n\nif comm.rank == 0:\n simdir = os.path.join(os.environ['DESI_SPECTRO_SIM'], \n os.environ['PIXPROD'])\n etcdir = os.path.join(simdir, 'etc')\n if not os.path.isdir(etcdir):\n os.makedirs(etcdir)\n for nt in nights:\n ntdir = os.path.join(simdir, nt)\n if not os.path.isdir(ntdir):\n os.makedirs(ntdir)\n\ncomm.barrier()\n\ntaskproc = 1\n\ncomm_group = comm\ncomm_rank = None\ngroup = comm.rank\nngroup = comm.size\ngroup_rank = 0\nif comm is not None:\n if taskproc > 1:\n ngroup = int(nproc / taskproc)\n group = int(rank / taskproc)\n group_rank = rank % taskproc\n comm_group = comm.Split(color=group, key=group_rank)\n comm_rank = comm.Split(color=group_rank, key=group)\n else:\n comm_group = MPI.COMM_SELF\n comm_rank = comm\n\nlog_root = \"newexp_\"\n\ntask = 0\nfor nt in nights:\n for fl in flavors:\n tasklog = \"{}{}-{:08d}.log\".format(log_root, nt, expids[task])\n if task == group:\n with stdouterr_redirected(to=tasklog, comm=comm_group):\n try:\n options = {}\n options[\"program\"] = fl\n options[\"night\"] = nt\n options[\"expid\"] = expids[task]\n options[\"tileid\"] = tileids[task]\n options[\"seed\"] = seeds[task]\n #options[\"nproc\"] = 1\n optarray = option_list(options)\n args = newexp.parse(optarray)\n newexp.main(args)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n print(\"\".join(lines), flush=True)\n task += 1\n\n",
"import os\nimport numpy as np\nimport unittest\nfrom astropy.table import Table, Column\nfrom astropy.io import fits\nfrom desisim.quickcat import quickcat\nfrom desitarget.targetmask import desi_mask, bgs_mask, mws_mask\nimport desimodel.io\n\nclass TestQuickCat(unittest.TestCase):\n \n @classmethod\n def setUpClass(cls):\n np.random.seed(50)\n cls.ntiles = 4\n tiles = desimodel.io.load_tiles()\n cls.tileids = tiles['TILEID'][0:cls.ntiles]\n cls.tilefiles = ['tile-{:05d}.fits'.format(i) for i in cls.tileids]\n cls.tilefiles_multiobs = ['multitile-{:05d}.fits'.format(i) for i in cls.tileids]\n\n cls.nspec = n = 5000\n targets = Table()\n targets['TARGETID'] = np.random.randint(0,2**60, size=n)\n targets['DESI_TARGET'] = 2**np.random.randint(0,3,size=n)\n targets['BGS_TARGET'] = np.zeros(n, dtype=int)\n targets['MWS_TARGET'] = np.zeros(n, dtype=int)\n isLRG = (targets['DESI_TARGET'] & desi_mask.LRG) != 0\n isELG = (targets['DESI_TARGET'] & desi_mask.ELG) != 0\n isQSO = (targets['DESI_TARGET'] & desi_mask.QSO) != 0\n cls.targets = targets\n\n #- Make a few of them BGS and MWS\n iibright = np.random.choice(np.arange(n), size=6, replace=False)\n isBGS = iibright[0:3]\n isMWS = iibright[3:6]\n targets['DESI_TARGET'][isBGS] = desi_mask.BGS_ANY\n targets['DESI_TARGET'][isMWS] = desi_mask.MWS_ANY\n targets['BGS_TARGET'][isBGS] = bgs_mask.BGS_BRIGHT\n try:\n #- desitarget >= 0.25.0\n targets['MWS_TARGET'][isMWS] = mws_mask.MWS_BROAD\n except AttributeError:\n #- desitarget <= 0.24.0\n targets['MWS_TARGET'][isMWS] = mws_mask.MWS_MAIN\n\n #- Add some fake photometry; no attempt to get colors right\n flux = np.zeros((n, 6)) #- ugrizY; DESI has grz\n flux[isLRG, 1] = np.random.uniform(0, 1.0, np.count_nonzero(isLRG))\n flux[isLRG, 2] = np.random.uniform(0, 5.0, np.count_nonzero(isLRG))\n flux[isLRG, 4] = np.random.uniform(0, 5.0, np.count_nonzero(isLRG))\n flux[isELG, 1] = np.random.uniform(0, 4.0, np.count_nonzero(isELG))\n flux[isELG, 2] = np.random.uniform(0, 4.0, np.count_nonzero(isELG))\n flux[isELG, 4] = np.random.uniform(0, 10.0, np.count_nonzero(isELG))\n flux[isQSO, 1] = np.random.uniform(0, 4.0, np.count_nonzero(isQSO))\n flux[isQSO, 2] = np.random.uniform(0, 4.0, np.count_nonzero(isQSO))\n flux[isQSO, 4] = np.random.uniform(0, 6.0, np.count_nonzero(isQSO))\n # isBGS and isMWS are arrays of indices, not arrays of booleans\n flux[isBGS, 1] = np.random.uniform(10, 600, isBGS.size)\n flux[isBGS, 2] = np.random.uniform(15, 1000, isBGS.size)\n flux[isBGS, 4] = np.random.uniform(10, 1400, isBGS.size)\n flux[isMWS, 1] = np.random.uniform(10, 150, isMWS.size)\n flux[isMWS, 2] = np.random.uniform(15, 350, isMWS.size)\n flux[isMWS, 4] = np.random.uniform(10, 1500, isMWS.size)\n targets['DECAM_FLUX'] = flux\n\n truth = Table()\n truth['TARGETID'] = targets['TARGETID'].copy()\n truth['TRUEZ'] = np.random.uniform(0, 1.5, size=n)\n truth['TRUESPECTYPE'] = np.zeros(n, dtype=(str, 10))\n truth['GMAG'] = np.random.uniform(18.0, 24.0, size=n)\n ii = (targets['DESI_TARGET'] & desi_mask.mask('LRG|ELG|BGS_ANY')) != 0\n truth['TRUESPECTYPE'][ii] = 'GALAXY'\n ii = (targets['DESI_TARGET'] == desi_mask.QSO)\n truth['TRUESPECTYPE'][ii] = 'QSO'\n starmask = desi_mask.mask('MWS_ANY|STD_FAINT|STD_WD|STD_BRIGHT') \n ii = ((targets['DESI_TARGET'] & starmask) != 0) \n truth['TRUESPECTYPE'][ii] = 'STAR'\n\n #- Add some fake [OII] fluxes for the ELGs; include some that will fail\n isELG = (targets['DESI_TARGET'] & desi_mask.ELG) != 0\n nELG = np.count_nonzero(isELG)\n truth['OIIFLUX'] = np.zeros(n, dtype=float)\n truth['OIIFLUX'][isELG] = np.random.normal(2e-17, 2e-17, size=nELG).clip(0)\n\n cls.truth = truth\n\n fiberassign = truth['TARGETID',]\n fiberassign['RA'] = np.random.uniform(0,5, size=n)\n fiberassign['DEC'] = np.random.uniform(0,5, size=n)\n fiberassign.meta['EXTNAME'] = 'FIBERASSIGN'\n nx = cls.nspec // cls.ntiles\n cls.targets_in_tile = dict()\n for i, filename in enumerate(cls.tilefiles):\n subset = fiberassign[i*nx:(i+1)*nx]\n subset.write(filename)\n cls.targets_in_tile[cls.tileids[i]] = subset['TARGETID']\n hdulist = fits.open(filename, mode='update')\n hdr = hdulist[1].header\n hdr.set('TILEID', cls.tileids[i])\n hdulist.close()\n\n #- Also create a test of tile files that have multiple observations\n nx = cls.nspec // cls.ntiles\n for i, filename in enumerate(cls.tilefiles_multiobs):\n subset = fiberassign[0:(i+1)*nx]\n subset.write(filename)\n hdulist = fits.open(filename, mode='update')\n hdr = hdulist[1].header\n hdr.set('TILEID', cls.tileids[i])\n hdulist.close()\n\n #- Cleanup test files if they exist\n @classmethod\n def tearDownClass(cls):\n for filename in cls.tilefiles + cls.tilefiles_multiobs:\n if os.path.exists(filename):\n os.remove(filename)\n \n def test_quickcat(self):\n #- First round of obs: perfect input z -> output z\n zcat1 = quickcat(self.tilefiles[0:2], self.targets, truth=self.truth, perfect=True)\n \n zcat1.sort(keys='TARGETID')\n nx = self.nspec // self.ntiles\n truth_01 = self.truth[0:2*nx].copy()\n truth_01.sort(keys='TARGETID')\n self.assertTrue(np.all(zcat1['TARGETID'] == truth_01['TARGETID']))\n self.assertTrue(np.all(zcat1['Z'] == truth_01['TRUEZ']))\n self.assertTrue(np.all(zcat1['ZWARN'] == 0))\n\n #- Now observe with random redshift errors\n zcat2 = quickcat(self.tilefiles[0:2], self.targets, truth=self.truth, perfect=False)\n zcat2_sorted = zcat2.copy()\n zcat2_sorted.sort(keys='TARGETID')\n self.assertTrue(np.all(zcat2_sorted['TARGETID'] == truth_01['TARGETID']))\n self.assertTrue(np.all(zcat2_sorted['Z'] != truth_01['TRUEZ']))\n self.assertTrue(np.any(zcat2_sorted['ZWARN'] != 0))\n\n #- And add a second round of observations\n zcat3 = quickcat(self.tilefiles[2:4], self.targets, truth=self.truth, zcat=zcat2, perfect=False)\n zcat3_sorted = zcat3.copy()\n zcat3_sorted.sort(keys='TARGETID')\n truth_sorted = self.truth.copy()\n truth_sorted.sort(keys='TARGETID')\n self.assertTrue(np.all(zcat3_sorted['TARGETID'] == truth_sorted['TARGETID']))\n self.assertTrue(np.all(zcat3_sorted['Z'] != truth_sorted['TRUEZ']))\n \n #- successful targets in the first round of observations shouldn't be updated\n ii2 = np.in1d(zcat2_sorted['TARGETID'], zcat3_sorted['TARGETID']) & (zcat2_sorted['ZWARN'] == 0)\n ii3 = np.in1d(zcat3_sorted['TARGETID'], zcat2_sorted['TARGETID'][ii2])\n ii = zcat2_sorted['Z'][ii2] == zcat3_sorted['Z'][ii3]\n self.assertTrue(np.all(zcat2_sorted['Z'][ii2] == zcat3_sorted['Z'][ii3]))\n \n #- Observe the last tile again\n zcat3copy = zcat3_sorted.copy()\n zcat4 = quickcat(self.tilefiles[3:4], self.targets, truth=self.truth, zcat=zcat3copy)\n zcat4_sorted = zcat4.copy()\n zcat4_sorted.sort(keys='TARGETID')\n self.assertTrue(np.all(zcat3copy == zcat3_sorted)) #- original unmodified\n self.assertTrue(np.all(zcat4_sorted['TARGETID'] == truth_sorted['TARGETID'])) #- all IDS observed\n self.assertTrue(np.all(zcat4_sorted['Z'] != truth_sorted['TRUEZ']))\n\n #- Check that NUMOBS was incremented\n i3 = np.in1d(zcat3_sorted['TARGETID'], self.targets_in_tile[self.tileids[3]]) # ids observed in the last tile\n i4 = np.in1d(zcat4_sorted['TARGETID'], self.targets_in_tile[self.tileids[3]]) # ids observed in the last tile\n self.assertTrue(np.all(zcat4_sorted['NUMOBS'][i4] == zcat3_sorted['NUMOBS'][i3]+1))\n\n #- ZWARN==0 targets should be preserved, while ZWARN!=0 updated\n z3 = zcat3_sorted[i3]\n z4 = zcat4_sorted[i4]\n ii = (z3['ZWARN'] != 0)\n self.assertTrue(np.all(z3['Z'][~ii] == z4['Z'][~ii]))\n self.assertTrue(np.all(z3['Z'][ii] != z4['Z'][ii]))\n\n\n def test_multiobs(self):\n # Targets with more observations should have a better efficiency\n zcat = quickcat(self.tilefiles_multiobs, self.targets, truth=self.truth, perfect=False)\n \n oneobs = (zcat['NUMOBS'] == 1)\n manyobs = (zcat['NUMOBS'] == np.max(zcat['NUMOBS']))\n goodz = (zcat['ZWARN'] == 0)\n \n p1 = np.count_nonzero(oneobs & goodz) / np.count_nonzero(oneobs)\n p2 = np.count_nonzero(manyobs & goodz) / np.count_nonzero(manyobs)\n self.assertGreater(p2, p1)\n \nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.random.seed",
"numpy.random.randint"
],
[
"numpy.random.seed",
"numpy.arange",
"numpy.in1d",
"numpy.all",
"numpy.max",
"numpy.random.normal",
"numpy.any",
"numpy.count_nonzero",
"numpy.random.uniform",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.